diff --git a/.gitattributes b/.gitattributes index c3b959bffd..ce1d6cd14e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,2 @@ /py/bitbox02/bitbox02/generated/* linguist-generated=true -/src/rust/vendor/** linguist-generated=true +/external/vendor/** linguist-generated=true diff --git a/external/vendor/async-channel/.cargo-checksum.json b/external/vendor/async-channel/.cargo-checksum.json new file mode 100644 index 0000000000..e739b2faa6 --- /dev/null +++ b/external/vendor/async-channel/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a8e8bda4555101b91c85a44b7987f172e06605e05c27e658d4cc52951715478b","CHANGELOG.md":"cf0f1ed9633cb79eaae0eebba3d40430321a180522a3f9fdc2a1c2b89cf0c21d","Cargo.lock":"e343f900ca65af36577564255e41e6b6ae4bd1b872fdb0f55d4d99816495f458","Cargo.toml":"0d56b953fd80e91c2f60d92df14866dede9822a91fa2abccc618b76abbf1ea97","Cargo.toml.orig":"4b88d2c58ff7187b3d96b14b1d60fc7c01145aa946f2ce04c0e23d140e4a2463","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"a9ae50c2a6df06ff476c369d23553a7bbcb4c47f25a3353d168e05289acbb4ed","src/lib.rs":"96d786959c66fc32a0181a8015ff9fd06b7a9955d878bfd7760b81304ee35223","tests/bounded.rs":"8365bccd2c413f4261ccbaff0e62593b51b8a097944148d13ceae9b19d77e796","tests/unbounded.rs":"1bed2b00f11495247f4a5084f553016bf4cbf6136ad159a776dcbd9a8bc29f3c"},"package":"924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2"} \ No newline at end of file diff --git a/external/vendor/async-channel/.cargo_vcs_info.json b/external/vendor/async-channel/.cargo_vcs_info.json new file mode 100644 index 0000000000..1d0157d97d --- /dev/null +++ b/external/vendor/async-channel/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "35a63c456aaa1906015f5a825e7e35505a749afa" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/async-channel/CHANGELOG.md b/external/vendor/async-channel/CHANGELOG.md new file mode 100644 index 0000000000..afe13b1b8a --- /dev/null +++ b/external/vendor/async-channel/CHANGELOG.md @@ -0,0 +1,107 @@ +# Version 2.5.0 + +- Add `Sender::closed()` (#102) + +# Version 2.4.0 + +- Add `Sender::same_channel()` and `Receiver::same_channel()`. (#98) +- Add `portable-atomic` feature to support platforms without atomics. (#106) + +# Version 2.3.1 + +- Use the correct version of `async-channel` in our manifest. (#93) + +# Version 2.3.0 + +- Add `force_send` for sending items over the channel that displace other items. (#89) + +# Version 2.2.1 + +- Fix the CI badge in the `crates.io` page. (#84) + +# Version 2.2.0 + +- Bump `event-listener` to v5.0.0. (#79) +- Bump MSRV to 1.60. (#80) + +# Version 2.1.1 + +- Bump `event-listener` to v4.0.0. (#73) + +# Version 2.1.0 + +- Bump `futures-lite` to its latest version. (#70) + +# Version 2.0.0 + +- **Breaking:** Make `Send`, `Recv` and `Receiver` `!Unpin`. This enables more efficient event notification strategies. (#59) +- **Breaking:** Add an `std` enabled-by-default feature that enables parts of the API that require `std`. (#59) +- Add support for the `wasm32` target. (#67) + +# Version 1.9.0 + +- Fix a bug where `WeakSender/WeakReceiver` could incorrectly return `Some` even if the channel is already closed (#60) +- Remove the unnecessary `T: Clone` bound from `WeakSender/WeakReceiver`'s `Clone` implementation (#62) + +# Version 1.8.0 + +- Prevent deadlock if sender/receiver is forgotten (#49) +- Add weak sender and receiver (#51) +- Update `concurrent-queue` to v2 (#50) + +# Version 1.7.1 + +- Work around MSRV increase due to a cargo bug. + +# Version 1.7.0 + +- Add `send_blocking` and `recv_blocking` (#47) + +# Version 1.6.1 + +- Make `send` return `Send` (#34) + +# Version 1.6.0 + +- Added `Send` and `Recv` futures (#33) +- impl `FusedStream` for `Receiver` (#30) + +# Version 1.5.1 + +- Fix typos in the docs. + +# Version 1.5.0 + +- Add `receiver_count()` and `sender_count()`. + +# Version 1.4.2 + +- Fix a bug that would sometime cause 100% CPU usage. + +# Version 1.4.1 + +- Update dependencies. + +# Version 1.4.0 + +- Update dependencies. + +# Version 1.3.0 + +- Add `Sender::is_closed()` and `Receiver::is_closed()`. + +# Version 1.2.0 + +- Add `Sender::close()` and `Receiver::close()`. + +# Version 1.1.1 + +- Replace `usize::MAX` with `std::usize::MAX`. + +# Version 1.1.0 + +- Add methods to error types. + +# Version 1.0.0 + +- Initial version diff --git a/external/vendor/async-channel/Cargo.lock b/external/vendor/async-channel/Cargo.lock new file mode 100644 index 0000000000..e2079f70ce --- /dev/null +++ b/external/vendor/async-channel/Cargo.lock @@ -0,0 +1,420 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "async-channel" +version = "2.5.0" +dependencies = [ + "concurrent-queue", + "easy-parallel", + "event-listener-strategy", + "futures-core", + "futures-lite", + "pin-project-lite", + "portable-atomic", + "portable-atomic-util", + "wasm-bindgen-test", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "cc" +version = "1.2.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1599538de2394445747c8cf7935946e3cc27e9625f889d979bfb2aaf569362" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", + "portable-atomic", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "easy-parallel" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2afbb9b0aef60e4f0d2b18129b6c0dff035a6f7dbbd17c2f38c1432102ee223c" + +[[package]] +name = "event-listener" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", + "portable-atomic", + "portable-atomic-util", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "minicov" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" +dependencies = [ + "cc", + "walkdir", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +dependencies = [ + "js-sys", + "minicov", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/external/vendor/async-channel/Cargo.toml b/external/vendor/async-channel/Cargo.toml new file mode 100644 index 0000000000..9a78053c01 --- /dev/null +++ b/external/vendor/async-channel/Cargo.toml @@ -0,0 +1,100 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "async-channel" +version = "2.5.0" +authors = ["Stjepan Glavina "] +build = false +exclude = ["/.*"] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Async multi-producer multi-consumer channel" +readme = "README.md" +keywords = [ + "mpmc", + "mpsc", + "spmc", + "chan", + "futures", +] +categories = [ + "asynchronous", + "concurrency", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/async-channel" + +[features] +default = ["std"] +portable-atomic = [ + "concurrent-queue/portable-atomic", + "event-listener-strategy/portable-atomic", + "dep:portable-atomic-util", + "dep:portable-atomic", +] +std = [ + "concurrent-queue/std", + "event-listener-strategy/std", +] + +[lib] +name = "async_channel" +path = "src/lib.rs" + +[[test]] +name = "bounded" +path = "tests/bounded.rs" + +[[test]] +name = "unbounded" +path = "tests/unbounded.rs" + +[dependencies.concurrent-queue] +version = "2.5" +default-features = false + +[dependencies.event-listener-strategy] +version = "0.5.4" +default-features = false + +[dependencies.futures-core] +version = "0.3.5" +default-features = false + +[dependencies.pin-project-lite] +version = "0.2.11" + +[dependencies.portable-atomic] +version = "1" +features = ["require-cas"] +optional = true +default-features = false + +[dependencies.portable-atomic-util] +version = "0.2" +features = ["alloc"] +optional = true +default-features = false + +[dev-dependencies.easy-parallel] +version = "3" + +[dev-dependencies.futures-lite] +version = "2" + +[target.'cfg(target_family = "wasm")'.dev-dependencies.wasm-bindgen-test] +version = "0.3.37" diff --git a/external/vendor/async-channel/Cargo.toml.orig b/external/vendor/async-channel/Cargo.toml.orig new file mode 100644 index 0000000000..8c23891336 --- /dev/null +++ b/external/vendor/async-channel/Cargo.toml.orig @@ -0,0 +1,36 @@ +[package] +name = "async-channel" +# When publishing a new version: +# - Update CHANGELOG.md +# - Create "v2.x.y" git tag +version = "2.5.0" +authors = ["Stjepan Glavina "] +edition = "2021" +rust-version = "1.60" +description = "Async multi-producer multi-consumer channel" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/async-channel" +keywords = ["mpmc", "mpsc", "spmc", "chan", "futures"] +categories = ["asynchronous", "concurrency"] +exclude = ["/.*"] + +[dependencies] +concurrent-queue = { version = "2.5", default-features = false } +event-listener-strategy = { version = "0.5.4", default-features = false } +futures-core = { version = "0.3.5", default-features = false } +pin-project-lite = "0.2.11" + +portable-atomic = { version = "1", default-features = false, features = ["require-cas"], optional = true } +portable-atomic-util = { version = "0.2", default-features = false, features = ["alloc"], optional = true } + +[dev-dependencies] +easy-parallel = "3" +futures-lite = "2" + +[target.'cfg(target_family = "wasm")'.dev-dependencies] +wasm-bindgen-test = "0.3.37" + +[features] +default = ["std"] +std = ["concurrent-queue/std", "event-listener-strategy/std"] +portable-atomic = ["concurrent-queue/portable-atomic", "event-listener-strategy/portable-atomic", "dep:portable-atomic-util", "dep:portable-atomic"] diff --git a/external/vendor/async-channel/LICENSE-APACHE b/external/vendor/async-channel/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/async-channel/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/async-channel/LICENSE-MIT b/external/vendor/async-channel/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/async-channel/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/async-channel/README.md b/external/vendor/async-channel/README.md new file mode 100644 index 0000000000..8809b27c46 --- /dev/null +++ b/external/vendor/async-channel/README.md @@ -0,0 +1,51 @@ +# async-channel + +[![Build](https://github.com/smol-rs/async-channel/actions/workflows/ci.yml/badge.svg)]( +https://github.com/smol-rs/async-channel/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/async-channel) +[![Cargo](https://img.shields.io/crates/v/async-channel.svg)]( +https://crates.io/crates/async-channel) +[![Documentation](https://docs.rs/async-channel/badge.svg)]( +https://docs.rs/async-channel) + +An async multi-producer multi-consumer channel, where each message can be received by only +one of all existing consumers. + +There are two kinds of channels: + +1. Bounded channel with limited capacity. +2. Unbounded channel with unlimited capacity. + +A channel has the `Sender` and `Receiver` side. Both sides are cloneable and can be shared +among multiple threads. + +When all `Sender`s or all `Receiver`s are dropped, the channel becomes closed. When a +channel is closed, no more messages can be sent, but remaining messages can still be received. + +The channel can also be closed manually by calling `Sender::close()` or +`Receiver::close()`. + +## Examples + +```rust +let (s, r) = async_channel::unbounded(); + +assert_eq!(s.send("Hello").await, Ok(())); +assert_eq!(r.recv().await, Ok("Hello")); +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/async-channel/src/lib.rs b/external/vendor/async-channel/src/lib.rs new file mode 100644 index 0000000000..61e0b53daa --- /dev/null +++ b/external/vendor/async-channel/src/lib.rs @@ -0,0 +1,1388 @@ +//! An async multi-producer multi-consumer channel, where each message can be received by only +//! one of all existing consumers. +//! +//! There are two kinds of channels: +//! +//! 1. [Bounded][`bounded()`] channel with limited capacity. +//! 2. [Unbounded][`unbounded()`] channel with unlimited capacity. +//! +//! A channel has the [`Sender`] and [`Receiver`] side. Both sides are cloneable and can be shared +//! among multiple threads. +//! +//! When all [`Sender`]s or all [`Receiver`]s are dropped, the channel becomes closed. When a +//! channel is closed, no more messages can be sent, but remaining messages can still be received. +//! +//! The channel can also be closed manually by calling [`Sender::close()`] or +//! [`Receiver::close()`]. +//! +//! # Examples +//! +//! ``` +//! # futures_lite::future::block_on(async { +//! let (s, r) = async_channel::unbounded(); +//! +//! assert_eq!(s.send("Hello").await, Ok(())); +//! assert_eq!(r.recv().await, Ok("Hello")); +//! # }); +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] +#![forbid(unsafe_code)] +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +#[cfg(not(feature = "portable-atomic"))] +extern crate alloc; + +use core::fmt; +use core::future::Future; +use core::marker::PhantomPinned; +use core::pin::Pin; +use core::task::{Context, Poll}; + +#[cfg(not(feature = "portable-atomic"))] +use alloc::sync::Arc; +#[cfg(not(feature = "portable-atomic"))] +use core::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(feature = "portable-atomic")] +use portable_atomic::{AtomicUsize, Ordering}; +#[cfg(feature = "portable-atomic")] +use portable_atomic_util::Arc; + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; +use event_listener_strategy::{ + easy_wrapper, + event_listener::{Event, EventListener}, + EventListenerFuture, Strategy, +}; +use futures_core::ready; +use futures_core::stream::Stream; +use pin_project_lite::pin_project; + +struct Channel { + /// Inner message queue. + queue: ConcurrentQueue, + + /// Send operations waiting while the channel is full. + send_ops: Event, + + /// Receive operations waiting while the channel is empty and not closed. + recv_ops: Event, + + /// Stream operations while the channel is empty and not closed. + stream_ops: Event, + + /// Closed operations while the channel is not closed. + closed_ops: Event, + + /// The number of currently active `Sender`s. + sender_count: AtomicUsize, + + /// The number of currently active `Receivers`s. + receiver_count: AtomicUsize, +} + +impl Channel { + /// Closes the channel and notifies all blocked operations. + /// + /// Returns `true` if this call has closed the channel and it was not closed already. + fn close(&self) -> bool { + if self.queue.close() { + // Notify all send operations. + self.send_ops.notify(usize::MAX); + + // Notify all receive and stream operations. + self.recv_ops.notify(usize::MAX); + self.stream_ops.notify(usize::MAX); + self.closed_ops.notify(usize::MAX); + + true + } else { + false + } + } +} + +/// Creates a bounded channel. +/// +/// The created channel has space to hold at most `cap` messages at a time. +/// +/// # Panics +/// +/// Capacity must be a positive number. If `cap` is zero, this function will panic. +/// +/// # Examples +/// +/// ``` +/// # futures_lite::future::block_on(async { +/// use async_channel::{bounded, TryRecvError, TrySendError}; +/// +/// let (s, r) = bounded(1); +/// +/// assert_eq!(s.send(10).await, Ok(())); +/// assert_eq!(s.try_send(20), Err(TrySendError::Full(20))); +/// +/// assert_eq!(r.recv().await, Ok(10)); +/// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +/// # }); +/// ``` +pub fn bounded(cap: usize) -> (Sender, Receiver) { + assert!(cap > 0, "capacity cannot be zero"); + + let channel = Arc::new(Channel { + queue: ConcurrentQueue::bounded(cap), + send_ops: Event::new(), + recv_ops: Event::new(), + stream_ops: Event::new(), + closed_ops: Event::new(), + sender_count: AtomicUsize::new(1), + receiver_count: AtomicUsize::new(1), + }); + + let s = Sender { + channel: channel.clone(), + }; + let r = Receiver { + listener: None, + channel, + _pin: PhantomPinned, + }; + (s, r) +} + +/// Creates an unbounded channel. +/// +/// The created channel can hold an unlimited number of messages. +/// +/// # Examples +/// +/// ``` +/// # futures_lite::future::block_on(async { +/// use async_channel::{unbounded, TryRecvError}; +/// +/// let (s, r) = unbounded(); +/// +/// assert_eq!(s.send(10).await, Ok(())); +/// assert_eq!(s.send(20).await, Ok(())); +/// +/// assert_eq!(r.recv().await, Ok(10)); +/// assert_eq!(r.recv().await, Ok(20)); +/// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +/// # }); +/// ``` +pub fn unbounded() -> (Sender, Receiver) { + let channel = Arc::new(Channel { + queue: ConcurrentQueue::unbounded(), + send_ops: Event::new(), + recv_ops: Event::new(), + stream_ops: Event::new(), + closed_ops: Event::new(), + sender_count: AtomicUsize::new(1), + receiver_count: AtomicUsize::new(1), + }); + + let s = Sender { + channel: channel.clone(), + }; + let r = Receiver { + listener: None, + channel, + _pin: PhantomPinned, + }; + (s, r) +} + +/// The sending side of a channel. +/// +/// Senders can be cloned and shared among threads. When all senders associated with a channel are +/// dropped, the channel becomes closed. +/// +/// The channel can also be closed manually by calling [`Sender::close()`]. +pub struct Sender { + /// Inner channel state. + channel: Arc>, +} + +impl Sender { + /// Attempts to send a message into the channel. + /// + /// If the channel is full or closed, this method returns an error. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{bounded, TrySendError}; + /// + /// let (s, r) = bounded(1); + /// + /// assert_eq!(s.try_send(1), Ok(())); + /// assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); + /// + /// drop(r); + /// assert_eq!(s.try_send(3), Err(TrySendError::Closed(3))); + /// ``` + pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { + match self.channel.queue.push(msg) { + Ok(()) => { + // Notify a blocked receive operation. If the notified operation gets canceled, + // it will notify another blocked receive operation. + self.channel.recv_ops.notify_additional(1); + + // Notify all blocked streams. + self.channel.stream_ops.notify(usize::MAX); + + Ok(()) + } + Err(PushError::Full(msg)) => Err(TrySendError::Full(msg)), + Err(PushError::Closed(msg)) => Err(TrySendError::Closed(msg)), + } + } + + /// Sends a message into the channel. + /// + /// If the channel is full, this method waits until there is space for a message. + /// + /// If the channel is closed, this method returns an error. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, SendError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send(1).await, Ok(())); + /// drop(r); + /// assert_eq!(s.send(2).await, Err(SendError(2))); + /// # }); + /// ``` + pub fn send(&self, msg: T) -> Send<'_, T> { + Send::_new(SendInner { + sender: self, + msg: Some(msg), + listener: None, + _pin: PhantomPinned, + }) + } + + /// Completes when all receivers have dropped. + /// + /// This allows the producers to get notified when interest in the produced values is canceled and immediately stop doing work. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, SendError}; + /// + /// let (s, r) = unbounded::(); + /// drop(r); + /// s.closed().await; + /// # }); + /// ``` + pub fn closed(&self) -> Closed<'_, T> { + Closed::_new(ClosedInner { + sender: self, + listener: None, + _pin: PhantomPinned, + }) + } + + /// Sends a message into this channel using the blocking strategy. + /// + /// If the channel is full, this method will block until there is room. + /// If the channel is closed, this method returns an error. + /// + /// # Blocking + /// + /// Rather than using asynchronous waiting, like the [`send`](Self::send) method, + /// this method will block the current thread until the message is sent. + /// + /// This method should not be used in an asynchronous context. It is intended + /// to be used such that a channel can be used in both asynchronous and synchronous contexts. + /// Calling this method in an asynchronous context may result in deadlocks. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{unbounded, SendError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send_blocking(1), Ok(())); + /// drop(r); + /// assert_eq!(s.send_blocking(2), Err(SendError(2))); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub fn send_blocking(&self, msg: T) -> Result<(), SendError> { + self.send(msg).wait() + } + + /// Forcefully push a message into this channel. + /// + /// If the channel is full, this method will replace an existing message in the + /// channel and return it as `Ok(Some(value))`. If the channel is closed, this + /// method will return an error. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{bounded, SendError}; + /// + /// let (s, r) = bounded(3); + /// + /// assert_eq!(s.send(1).await, Ok(())); + /// assert_eq!(s.send(2).await, Ok(())); + /// assert_eq!(s.force_send(3), Ok(None)); + /// assert_eq!(s.force_send(4), Ok(Some(1))); + /// + /// assert_eq!(r.recv().await, Ok(2)); + /// assert_eq!(r.recv().await, Ok(3)); + /// assert_eq!(r.recv().await, Ok(4)); + /// # }); + /// ``` + pub fn force_send(&self, msg: T) -> Result, SendError> { + match self.channel.queue.force_push(msg) { + Ok(backlog) => { + // Notify a blocked receive operation. If the notified operation gets canceled, + // it will notify another blocked receive operation. + self.channel.recv_ops.notify_additional(1); + + // Notify all blocked streams. + self.channel.stream_ops.notify(usize::MAX); + + Ok(backlog) + } + + Err(ForcePushError(reject)) => Err(SendError(reject)), + } + } + + /// Closes the channel. + /// + /// Returns `true` if this call has closed the channel and it was not closed already. + /// + /// The remaining messages can still be received. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.send(1).await, Ok(())); + /// assert!(s.close()); + /// + /// assert_eq!(r.recv().await, Ok(1)); + /// assert_eq!(r.recv().await, Err(RecvError)); + /// # }); + /// ``` + pub fn close(&self) -> bool { + self.channel.close() + } + + /// Returns `true` if the channel is closed. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded::<()>(); + /// assert!(!s.is_closed()); + /// + /// drop(r); + /// assert!(s.is_closed()); + /// # }); + /// ``` + pub fn is_closed(&self) -> bool { + self.channel.queue.is_closed() + } + + /// Returns `true` if the channel is empty. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// + /// assert!(s.is_empty()); + /// s.send(1).await; + /// assert!(!s.is_empty()); + /// # }); + /// ``` + pub fn is_empty(&self) -> bool { + self.channel.queue.is_empty() + } + + /// Returns `true` if the channel is full. + /// + /// Unbounded channels are never full. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::bounded; + /// + /// let (s, r) = bounded(1); + /// + /// assert!(!s.is_full()); + /// s.send(1).await; + /// assert!(s.is_full()); + /// # }); + /// ``` + pub fn is_full(&self) -> bool { + self.channel.queue.is_full() + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.len(), 0); + /// + /// s.send(1).await; + /// s.send(2).await; + /// assert_eq!(s.len(), 2); + /// # }); + /// ``` + pub fn len(&self) -> usize { + self.channel.queue.len() + } + + /// Returns the channel capacity if it's bounded. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{bounded, unbounded}; + /// + /// let (s, r) = bounded::(5); + /// assert_eq!(s.capacity(), Some(5)); + /// + /// let (s, r) = unbounded::(); + /// assert_eq!(s.capacity(), None); + /// ``` + pub fn capacity(&self) -> Option { + self.channel.queue.capacity() + } + + /// Returns the number of receivers for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(s.receiver_count(), 1); + /// + /// let r2 = r.clone(); + /// assert_eq!(s.receiver_count(), 2); + /// # }); + /// ``` + pub fn receiver_count(&self) -> usize { + self.channel.receiver_count.load(Ordering::SeqCst) + } + + /// Returns the number of senders for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(s.sender_count(), 1); + /// + /// let s2 = s.clone(); + /// assert_eq!(s.sender_count(), 2); + /// # }); + /// ``` + pub fn sender_count(&self) -> usize { + self.channel.sender_count.load(Ordering::SeqCst) + } + + /// Downgrade the sender to a weak reference. + pub fn downgrade(&self) -> WeakSender { + WeakSender { + channel: self.channel.clone(), + } + } + + /// Returns whether the senders belong to the same channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// let s2 = s.clone(); + /// + /// assert!(s.same_channel(&s2)); + /// # }); + /// ``` + pub fn same_channel(&self, other: &Sender) -> bool { + Arc::ptr_eq(&self.channel, &other.channel) + } +} + +impl Drop for Sender { + fn drop(&mut self) { + // Decrement the sender count and close the channel if it drops down to zero. + if self.channel.sender_count.fetch_sub(1, Ordering::AcqRel) == 1 { + self.channel.close(); + } + } +} + +impl fmt::Debug for Sender { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Sender {{ .. }}") + } +} + +impl Clone for Sender { + fn clone(&self) -> Sender { + let count = self.channel.sender_count.fetch_add(1, Ordering::Relaxed); + + // Make sure the count never overflows, even if lots of sender clones are leaked. + if count > usize::MAX / 2 { + abort(); + } + + Sender { + channel: self.channel.clone(), + } + } +} + +pin_project! { + /// The receiving side of a channel. + /// + /// Receivers can be cloned and shared among threads. When all receivers associated with a channel + /// are dropped, the channel becomes closed. + /// + /// The channel can also be closed manually by calling [`Receiver::close()`]. + /// + /// Receivers implement the [`Stream`] trait. + pub struct Receiver { + // Inner channel state. + channel: Arc>, + + // Listens for a send or close event to unblock this stream. + listener: Option, + + // Keeping this type `!Unpin` enables future optimizations. + #[pin] + _pin: PhantomPinned + } + + impl PinnedDrop for Receiver { + fn drop(this: Pin<&mut Self>) { + let this = this.project(); + + // Decrement the receiver count and close the channel if it drops down to zero. + if this.channel.receiver_count.fetch_sub(1, Ordering::AcqRel) == 1 { + this.channel.close(); + } + } + } +} + +impl Receiver { + /// Attempts to receive a message from the channel. + /// + /// If the channel is empty, or empty and closed, this method returns an error. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, TryRecvError}; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.send(1).await, Ok(())); + /// + /// assert_eq!(r.try_recv(), Ok(1)); + /// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + /// + /// drop(s); + /// assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); + /// # }); + /// ``` + pub fn try_recv(&self) -> Result { + match self.channel.queue.pop() { + Ok(msg) => { + // Notify a blocked send operation. If the notified operation gets canceled, it + // will notify another blocked send operation. + self.channel.send_ops.notify_additional(1); + + Ok(msg) + } + Err(PopError::Empty) => Err(TryRecvError::Empty), + Err(PopError::Closed) => Err(TryRecvError::Closed), + } + } + + /// Receives a message from the channel. + /// + /// If the channel is empty, this method waits until there is a message. + /// + /// If the channel is closed, this method receives a message or returns an error if there are + /// no more messages. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send(1).await, Ok(())); + /// drop(s); + /// + /// assert_eq!(r.recv().await, Ok(1)); + /// assert_eq!(r.recv().await, Err(RecvError)); + /// # }); + /// ``` + pub fn recv(&self) -> Recv<'_, T> { + Recv::_new(RecvInner { + receiver: self, + listener: None, + _pin: PhantomPinned, + }) + } + + /// Receives a message from the channel using the blocking strategy. + /// + /// If the channel is empty, this method waits until there is a message. + /// If the channel is closed, this method receives a message or returns an error if there are + /// no more messages. + /// + /// # Blocking + /// + /// Rather than using asynchronous waiting, like the [`recv`](Self::recv) method, + /// this method will block the current thread until the message is received. + /// + /// This method should not be used in an asynchronous context. It is intended + /// to be used such that a channel can be used in both asynchronous and synchronous contexts. + /// Calling this method in an asynchronous context may result in deadlocks. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send_blocking(1), Ok(())); + /// drop(s); + /// + /// assert_eq!(r.recv_blocking(), Ok(1)); + /// assert_eq!(r.recv_blocking(), Err(RecvError)); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub fn recv_blocking(&self) -> Result { + self.recv().wait() + } + + /// Closes the channel. + /// + /// Returns `true` if this call has closed the channel and it was not closed already. + /// + /// The remaining messages can still be received. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.send(1).await, Ok(())); + /// + /// assert!(r.close()); + /// assert_eq!(r.recv().await, Ok(1)); + /// assert_eq!(r.recv().await, Err(RecvError)); + /// # }); + /// ``` + pub fn close(&self) -> bool { + self.channel.close() + } + + /// Returns `true` if the channel is closed. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded::<()>(); + /// assert!(!r.is_closed()); + /// + /// drop(s); + /// assert!(r.is_closed()); + /// # }); + /// ``` + pub fn is_closed(&self) -> bool { + self.channel.queue.is_closed() + } + + /// Returns `true` if the channel is empty. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// + /// assert!(s.is_empty()); + /// s.send(1).await; + /// assert!(!s.is_empty()); + /// # }); + /// ``` + pub fn is_empty(&self) -> bool { + self.channel.queue.is_empty() + } + + /// Returns `true` if the channel is full. + /// + /// Unbounded channels are never full. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::bounded; + /// + /// let (s, r) = bounded(1); + /// + /// assert!(!r.is_full()); + /// s.send(1).await; + /// assert!(r.is_full()); + /// # }); + /// ``` + pub fn is_full(&self) -> bool { + self.channel.queue.is_full() + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(r.len(), 0); + /// + /// s.send(1).await; + /// s.send(2).await; + /// assert_eq!(r.len(), 2); + /// # }); + /// ``` + pub fn len(&self) -> usize { + self.channel.queue.len() + } + + /// Returns the channel capacity if it's bounded. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{bounded, unbounded}; + /// + /// let (s, r) = bounded::(5); + /// assert_eq!(r.capacity(), Some(5)); + /// + /// let (s, r) = unbounded::(); + /// assert_eq!(r.capacity(), None); + /// ``` + pub fn capacity(&self) -> Option { + self.channel.queue.capacity() + } + + /// Returns the number of receivers for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(r.receiver_count(), 1); + /// + /// let r2 = r.clone(); + /// assert_eq!(r.receiver_count(), 2); + /// # }); + /// ``` + pub fn receiver_count(&self) -> usize { + self.channel.receiver_count.load(Ordering::SeqCst) + } + + /// Returns the number of senders for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(r.sender_count(), 1); + /// + /// let s2 = s.clone(); + /// assert_eq!(r.sender_count(), 2); + /// # }); + /// ``` + pub fn sender_count(&self) -> usize { + self.channel.sender_count.load(Ordering::SeqCst) + } + + /// Downgrade the receiver to a weak reference. + pub fn downgrade(&self) -> WeakReceiver { + WeakReceiver { + channel: self.channel.clone(), + } + } + + /// Returns whether the receivers belong to the same channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// let r2 = r.clone(); + /// + /// assert!(r.same_channel(&r2)); + /// # }); + /// ``` + pub fn same_channel(&self, other: &Receiver) -> bool { + Arc::ptr_eq(&self.channel, &other.channel) + } +} + +impl fmt::Debug for Receiver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Receiver {{ .. }}") + } +} + +impl Clone for Receiver { + fn clone(&self) -> Receiver { + let count = self.channel.receiver_count.fetch_add(1, Ordering::Relaxed); + + // Make sure the count never overflows, even if lots of receiver clones are leaked. + if count > usize::MAX / 2 { + abort(); + } + + Receiver { + channel: self.channel.clone(), + listener: None, + _pin: PhantomPinned, + } + } +} + +impl Stream for Receiver { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + // If this stream is listening for events, first wait for a notification. + { + let this = self.as_mut().project(); + if let Some(listener) = this.listener.as_mut() { + ready!(Pin::new(listener).poll(cx)); + *this.listener = None; + } + } + + loop { + // Attempt to receive a message. + match self.try_recv() { + Ok(msg) => { + // The stream is not blocked on an event - drop the listener. + let this = self.as_mut().project(); + *this.listener = None; + return Poll::Ready(Some(msg)); + } + Err(TryRecvError::Closed) => { + // The stream is not blocked on an event - drop the listener. + let this = self.as_mut().project(); + *this.listener = None; + return Poll::Ready(None); + } + Err(TryRecvError::Empty) => {} + } + + // Receiving failed - now start listening for notifications or wait for one. + let this = self.as_mut().project(); + if this.listener.is_some() { + // Go back to the outer loop to wait for a notification. + break; + } else { + *this.listener = Some(this.channel.stream_ops.listen()); + } + } + } + } +} + +impl futures_core::stream::FusedStream for Receiver { + fn is_terminated(&self) -> bool { + self.channel.queue.is_closed() && self.channel.queue.is_empty() + } +} + +/// A [`Sender`] that does not prevent the channel from being closed. +/// +/// This is created through the [`Sender::downgrade`] method. In order to use it, it needs +/// to be upgraded into a [`Sender`] through the `upgrade` method. +pub struct WeakSender { + channel: Arc>, +} + +impl WeakSender { + /// Upgrade the [`WeakSender`] into a [`Sender`]. + pub fn upgrade(&self) -> Option> { + if self.channel.queue.is_closed() { + None + } else { + match self.channel.sender_count.fetch_update( + Ordering::Relaxed, + Ordering::Relaxed, + |count| if count == 0 { None } else { Some(count + 1) }, + ) { + Err(_) => None, + Ok(new_value) if new_value > usize::MAX / 2 => { + // Make sure the count never overflows, even if lots of sender clones are leaked. + abort(); + } + Ok(_) => Some(Sender { + channel: self.channel.clone(), + }), + } + } + } +} + +impl Clone for WeakSender { + fn clone(&self) -> Self { + WeakSender { + channel: self.channel.clone(), + } + } +} + +impl fmt::Debug for WeakSender { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "WeakSender {{ .. }}") + } +} + +/// A [`Receiver`] that does not prevent the channel from being closed. +/// +/// This is created through the [`Receiver::downgrade`] method. In order to use it, it needs +/// to be upgraded into a [`Receiver`] through the `upgrade` method. +pub struct WeakReceiver { + channel: Arc>, +} + +impl WeakReceiver { + /// Upgrade the [`WeakReceiver`] into a [`Receiver`]. + pub fn upgrade(&self) -> Option> { + if self.channel.queue.is_closed() { + None + } else { + match self.channel.receiver_count.fetch_update( + Ordering::Relaxed, + Ordering::Relaxed, + |count| if count == 0 { None } else { Some(count + 1) }, + ) { + Err(_) => None, + Ok(new_value) if new_value > usize::MAX / 2 => { + // Make sure the count never overflows, even if lots of receiver clones are leaked. + abort(); + } + Ok(_) => Some(Receiver { + channel: self.channel.clone(), + listener: None, + _pin: PhantomPinned, + }), + } + } + } +} + +impl Clone for WeakReceiver { + fn clone(&self) -> Self { + WeakReceiver { + channel: self.channel.clone(), + } + } +} + +impl fmt::Debug for WeakReceiver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "WeakReceiver {{ .. }}") + } +} + +/// An error returned from [`Sender::send()`]. +/// +/// Received because the channel is closed. +#[derive(PartialEq, Eq, Clone, Copy)] +pub struct SendError(pub T); + +impl SendError { + /// Unwraps the message that couldn't be sent. + pub fn into_inner(self) -> T { + self.0 + } +} + +#[cfg(feature = "std")] +impl std::error::Error for SendError {} + +impl fmt::Debug for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SendError(..)") + } +} + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "sending into a closed channel") + } +} + +/// An error returned from [`Sender::try_send()`]. +#[derive(PartialEq, Eq, Clone, Copy)] +pub enum TrySendError { + /// The channel is full but not closed. + Full(T), + + /// The channel is closed. + Closed(T), +} + +impl TrySendError { + /// Unwraps the message that couldn't be sent. + pub fn into_inner(self) -> T { + match self { + TrySendError::Full(t) => t, + TrySendError::Closed(t) => t, + } + } + + /// Returns `true` if the channel is full but not closed. + pub fn is_full(&self) -> bool { + match self { + TrySendError::Full(_) => true, + TrySendError::Closed(_) => false, + } + } + + /// Returns `true` if the channel is closed. + pub fn is_closed(&self) -> bool { + match self { + TrySendError::Full(_) => false, + TrySendError::Closed(_) => true, + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for TrySendError {} + +impl fmt::Debug for TrySendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TrySendError::Full(..) => write!(f, "Full(..)"), + TrySendError::Closed(..) => write!(f, "Closed(..)"), + } + } +} + +impl fmt::Display for TrySendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TrySendError::Full(..) => write!(f, "sending into a full channel"), + TrySendError::Closed(..) => write!(f, "sending into a closed channel"), + } + } +} + +/// An error returned from [`Receiver::recv()`]. +/// +/// Received because the channel is empty and closed. +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct RecvError; + +#[cfg(feature = "std")] +impl std::error::Error for RecvError {} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "receiving from an empty and closed channel") + } +} + +/// An error returned from [`Receiver::try_recv()`]. +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub enum TryRecvError { + /// The channel is empty but not closed. + Empty, + + /// The channel is empty and closed. + Closed, +} + +impl TryRecvError { + /// Returns `true` if the channel is empty but not closed. + pub fn is_empty(&self) -> bool { + match self { + TryRecvError::Empty => true, + TryRecvError::Closed => false, + } + } + + /// Returns `true` if the channel is empty and closed. + pub fn is_closed(&self) -> bool { + match self { + TryRecvError::Empty => false, + TryRecvError::Closed => true, + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for TryRecvError {} + +impl fmt::Display for TryRecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TryRecvError::Empty => write!(f, "receiving from an empty channel"), + TryRecvError::Closed => write!(f, "receiving from an empty and closed channel"), + } + } +} + +easy_wrapper! { + /// A future returned by [`Sender::send()`]. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Send<'a, T>(SendInner<'a, T> => Result<(), SendError>); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub(crate) wait(); +} + +pin_project! { + #[derive(Debug)] + #[project(!Unpin)] + struct SendInner<'a, T> { + // Reference to the original sender. + sender: &'a Sender, + + // The message to send. + msg: Option, + + // Listener waiting on the channel. + listener: Option, + + // Keeping this type `!Unpin` enables future optimizations. + #[pin] + _pin: PhantomPinned + } +} + +impl EventListenerFuture for SendInner<'_, T> { + type Output = Result<(), SendError>; + + /// Run this future with the given `Strategy`. + fn poll_with_strategy<'x, S: Strategy<'x>>( + self: Pin<&mut Self>, + strategy: &mut S, + context: &mut S::Context, + ) -> Poll>> { + let this = self.project(); + + loop { + let msg = this.msg.take().unwrap(); + // Attempt to send a message. + match this.sender.try_send(msg) { + Ok(()) => return Poll::Ready(Ok(())), + Err(TrySendError::Closed(msg)) => return Poll::Ready(Err(SendError(msg))), + Err(TrySendError::Full(m)) => *this.msg = Some(m), + } + + // Sending failed - now start listening for notifications or wait for one. + if this.listener.is_some() { + // Poll using the given strategy + ready!(S::poll(strategy, &mut *this.listener, context)); + } else { + *this.listener = Some(this.sender.channel.send_ops.listen()); + } + } + } +} + +easy_wrapper! { + /// A future returned by [`Receiver::recv()`]. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Recv<'a, T>(RecvInner<'a, T> => Result); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub(crate) wait(); +} + +pin_project! { + #[derive(Debug)] + #[project(!Unpin)] + struct RecvInner<'a, T> { + // Reference to the receiver. + receiver: &'a Receiver, + + // Listener waiting on the channel. + listener: Option, + + // Keeping this type `!Unpin` enables future optimizations. + #[pin] + _pin: PhantomPinned + } +} + +impl EventListenerFuture for RecvInner<'_, T> { + type Output = Result; + + /// Run this future with the given `Strategy`. + fn poll_with_strategy<'x, S: Strategy<'x>>( + self: Pin<&mut Self>, + strategy: &mut S, + cx: &mut S::Context, + ) -> Poll> { + let this = self.project(); + + loop { + // Attempt to receive a message. + match this.receiver.try_recv() { + Ok(msg) => return Poll::Ready(Ok(msg)), + Err(TryRecvError::Closed) => return Poll::Ready(Err(RecvError)), + Err(TryRecvError::Empty) => {} + } + + // Receiving failed - now start listening for notifications or wait for one. + if this.listener.is_some() { + // Poll using the given strategy + ready!(S::poll(strategy, &mut *this.listener, cx)); + } else { + *this.listener = Some(this.receiver.channel.recv_ops.listen()); + } + } + } +} + +easy_wrapper! { + /// A future returned by [`Sender::closed()`]. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Closed<'a, T>(ClosedInner<'a, T> => ()); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub(crate) wait(); +} + +pin_project! { + #[derive(Debug)] + #[project(!Unpin)] + struct ClosedInner<'a, T> { + // Reference to the sender. + sender: &'a Sender, + + // Listener waiting on the channel. + listener: Option, + + // Keeping this type `!Unpin` enables future optimizations. + #[pin] + _pin: PhantomPinned + } +} + +impl<'a, T> EventListenerFuture for ClosedInner<'a, T> { + type Output = (); + + /// Run this future with the given `Strategy`. + fn poll_with_strategy<'x, S: Strategy<'x>>( + self: Pin<&mut Self>, + strategy: &mut S, + cx: &mut S::Context, + ) -> Poll<()> { + let this = self.project(); + + loop { + // Check if the channel is closed. + if this.sender.is_closed() { + return Poll::Ready(()); + } + + // Not closed - now start listening for notifications or wait for one. + if this.listener.is_some() { + // Poll using the given strategy + ready!(S::poll(strategy, &mut *this.listener, cx)); + } else { + *this.listener = Some(this.sender.channel.closed_ops.listen()); + } + } + } +} + +#[cfg(feature = "std")] +use std::process::abort; + +#[cfg(not(feature = "std"))] +fn abort() -> ! { + struct PanicOnDrop; + + impl Drop for PanicOnDrop { + fn drop(&mut self) { + panic!("Panic while panicking to abort"); + } + } + + let _bomb = PanicOnDrop; + panic!("Panic while panicking to abort") +} diff --git a/external/vendor/async-channel/tests/bounded.rs b/external/vendor/async-channel/tests/bounded.rs new file mode 100644 index 0000000000..25b01f0b31 --- /dev/null +++ b/external/vendor/async-channel/tests/bounded.rs @@ -0,0 +1,552 @@ +#![allow(clippy::bool_assert_comparison, unused_imports)] + +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; + +use async_channel::{bounded, RecvError, SendError, TryRecvError, TrySendError}; +use easy_parallel::Parallel; +use futures_lite::{future, prelude::*}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[cfg(not(target_family = "wasm"))] +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + let (s, r) = bounded(1); + + future::block_on(s.send(7)).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + future::block_on(s.send(8)).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn smoke_blocking() { + let (s, r) = bounded(1); + + s.send_blocking(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + s.send_blocking(8).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + + future::block_on(s.send(9)).unwrap(); + assert_eq!(r.recv_blocking(), Ok(9)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn capacity() { + for i in 1..10 { + let (s, r) = bounded::<()>(i); + assert_eq!(s.capacity(), Some(i)); + assert_eq!(r.capacity(), Some(i)); + } +} + +#[test] +fn len_empty_full() { + let (s, r) = bounded(2); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + future::block_on(s.send(())).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); + + future::block_on(s.send(())).unwrap(); + + assert_eq!(s.len(), 2); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), true); + assert_eq!(r.len(), 2); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), true); + + future::block_on(r.recv()).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn try_recv() { + let (s, r) = bounded(100); + + Parallel::new() + .add(move || { + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + sleep(ms(1500)); + assert_eq!(r.try_recv(), Ok(7)); + sleep(ms(500)); + assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); + }) + .add(move || { + sleep(ms(1000)); + future::block_on(s.send(7)).unwrap(); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn recv() { + let (s, r) = bounded(100); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Ok(7)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1500)); + future::block_on(s.send(7)).unwrap(); + future::block_on(s.send(8)).unwrap(); + future::block_on(s.send(9)).unwrap(); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn try_send() { + let (s, r) = bounded(1); + + Parallel::new() + .add(move || { + assert_eq!(s.try_send(1), Ok(())); + assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); + sleep(ms(1500)); + assert_eq!(s.try_send(3), Ok(())); + sleep(ms(500)); + assert_eq!(s.try_send(4), Err(TrySendError::Closed(4))); + }) + .add(move || { + sleep(ms(1000)); + assert_eq!(r.try_recv(), Ok(1)); + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn send() { + let (s, r) = bounded(1); + + Parallel::new() + .add(|| { + future::block_on(s.send(7)).unwrap(); + sleep(ms(1000)); + future::block_on(s.send(8)).unwrap(); + sleep(ms(1000)); + future::block_on(s.send(9)).unwrap(); + sleep(ms(1000)); + future::block_on(s.send(10)).unwrap(); + }) + .add(|| { + sleep(ms(1500)); + assert_eq!(future::block_on(r.recv()), Ok(7)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn closed() { + let (s, r) = bounded(1); + + Parallel::new() + .add(|| { + future::block_on(s.send(7)).unwrap(); + let before = s.closed(); + let mut before = std::pin::pin!(before); + assert!(future::block_on(future::poll_once(&mut before)).is_none()); + sleep(ms(1000)); + assert_eq!(future::block_on(future::poll_once(before)), Some(())); + assert_eq!(future::block_on(future::poll_once(s.closed())), Some(())); + }) + .add(|| { + assert_eq!(future::block_on(r.recv()), Ok(7)); + sleep(ms(500)); + drop(r); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn force_send() { + let (s, r) = bounded(1); + + Parallel::new() + .add(|| { + s.force_send(7).unwrap(); + sleep(ms(1000)); + s.force_send(8).unwrap(); + sleep(ms(1000)); + s.force_send(9).unwrap(); + sleep(ms(1000)); + s.force_send(10).unwrap(); + }) + .add(|| { + sleep(ms(1500)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + assert_eq!(future::block_on(r.recv()), Ok(10)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn send_after_close() { + let (s, r) = bounded(100); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(r); + + assert_eq!(future::block_on(s.send(4)), Err(SendError(4))); + assert_eq!(s.try_send(5), Err(TrySendError::Closed(5))); + assert_eq!(future::block_on(s.send(6)), Err(SendError(6))); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn recv_after_close() { + let (s, r) = bounded(100); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(s); + + assert_eq!(future::block_on(r.recv()), Ok(1)); + assert_eq!(future::block_on(r.recv()), Ok(2)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn len() { + const COUNT: usize = 25_000; + const CAP: usize = 1000; + + let (s, r) = bounded(CAP); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for _ in 0..CAP / 10 { + for i in 0..50 { + future::block_on(s.send(i)).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for i in 0..50 { + future::block_on(r.recv()).unwrap(); + assert_eq!(r.len(), 50 - i - 1); + } + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for i in 0..CAP { + future::block_on(s.send(i)).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for _ in 0..CAP { + future::block_on(r.recv()).unwrap(); + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + assert_eq!(future::block_on(r.recv()), Ok(i)); + let len = r.len(); + assert!(len <= CAP); + } + }) + .add(|| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + let len = s.len(); + assert!(len <= CAP); + } + }) + .run(); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); +} + +#[test] +fn receiver_count() { + let (s, r) = bounded::<()>(5); + let receiver_clones: Vec<_> = (0..20).map(|_| r.clone()).collect(); + + assert_eq!(s.receiver_count(), 21); + assert_eq!(r.receiver_count(), 21); + + drop(receiver_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[test] +fn sender_count() { + let (s, r) = bounded::<()>(5); + let sender_clones: Vec<_> = (0..20).map(|_| s.clone()).collect(); + + assert_eq!(s.sender_count(), 21); + assert_eq!(r.sender_count(), 21); + + drop(sender_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn close_wakes_sender() { + let (s, r) = bounded(1); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(s.send(())), Ok(())); + assert_eq!(future::block_on(s.send(())), Err(SendError(()))); + }) + .add(move || { + sleep(ms(1000)); + drop(r); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn close_wakes_receiver() { + let (s, r) = bounded::<()>(1); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1000)); + drop(s); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn forget_blocked_sender() { + let (s1, r) = bounded(2); + let s2 = s1.clone(); + + Parallel::new() + .add(move || { + assert!(future::block_on(s1.send(3)).is_ok()); + assert!(future::block_on(s1.send(7)).is_ok()); + let s1_fut = s1.send(13); + futures_lite::pin!(s1_fut); + // Poll but keep the future alive. + assert_eq!(future::block_on(future::poll_once(s1_fut)), None); + sleep(ms(500)); + }) + .add(move || { + sleep(ms(100)); + assert!(future::block_on(s2.send(42)).is_ok()); + }) + .add(move || { + sleep(ms(200)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + assert_eq!(future::block_on(r.recv()), Ok(7)); + sleep(ms(100)); + assert_eq!(r.try_recv(), Ok(42)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn forget_blocked_receiver() { + let (s, r1) = bounded(2); + let r2 = r1.clone(); + + Parallel::new() + .add(move || { + let r1_fut = r1.recv(); + // Poll but keep the future alive. + futures_lite::pin!(r1_fut); + assert_eq!(future::block_on(future::poll_once(&mut r1_fut)), None); + sleep(ms(500)); + }) + .add(move || { + sleep(ms(100)); + assert_eq!(future::block_on(r2.recv()), Ok(3)); + }) + .add(move || { + sleep(ms(200)); + assert!(future::block_on(s.send(3)).is_ok()); + assert!(future::block_on(s.send(7)).is_ok()); + sleep(ms(100)); + assert!(s.try_send(42).is_ok()); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = 100_000; + + let (s, r) = bounded(3); + + Parallel::new() + .add(move || { + for i in 0..COUNT { + assert_eq!(future::block_on(r.recv()), Ok(i)); + } + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = bounded::(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = future::block_on(r.recv()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_stream() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = bounded::(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + let v = &v; + + Parallel::new() + .each(0..THREADS, { + let r = r; + move |_| { + futures_lite::pin!(r); + for _ in 0..COUNT { + let n = future::block_on(r.next()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn weak() { + let (s, r) = bounded::(3); + + // Create a weak sender/receiver pair. + let (weak_s, weak_r) = (s.downgrade(), r.downgrade()); + + // Upgrade and send. + { + let s = weak_s.upgrade().unwrap(); + s.send_blocking(3).unwrap(); + let r = weak_r.upgrade().unwrap(); + assert_eq!(r.recv_blocking(), Ok(3)); + } + + // Drop the original sender/receiver pair. + drop((s, r)); + + // Try to upgrade again. + { + assert!(weak_s.upgrade().is_none()); + assert!(weak_r.upgrade().is_none()); + } +} diff --git a/external/vendor/async-channel/tests/unbounded.rs b/external/vendor/async-channel/tests/unbounded.rs new file mode 100644 index 0000000000..90cb375844 --- /dev/null +++ b/external/vendor/async-channel/tests/unbounded.rs @@ -0,0 +1,356 @@ +#![allow(clippy::bool_assert_comparison, unused_imports)] + +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; + +use async_channel::{unbounded, RecvError, SendError, TryRecvError, TrySendError}; +use easy_parallel::Parallel; +use futures_lite::{future, prelude::*}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[cfg(not(target_family = "wasm"))] +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + let (s, r) = unbounded(); + + s.try_send(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + future::block_on(s.send(8)).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn smoke_blocking() { + let (s, r) = unbounded(); + + s.send_blocking(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + s.send_blocking(8).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + + future::block_on(s.send(9)).unwrap(); + assert_eq!(r.recv_blocking(), Ok(9)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn capacity() { + let (s, r) = unbounded::<()>(); + assert_eq!(s.capacity(), None); + assert_eq!(r.capacity(), None); +} + +#[test] +fn len_empty_full() { + let (s, r) = unbounded(); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + future::block_on(s.send(())).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); + + future::block_on(r.recv()).unwrap(); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn try_recv() { + let (s, r) = unbounded(); + + Parallel::new() + .add(move || { + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + sleep(ms(1500)); + assert_eq!(r.try_recv(), Ok(7)); + sleep(ms(500)); + assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); + }) + .add(move || { + sleep(ms(1000)); + future::block_on(s.send(7)).unwrap(); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn recv() { + let (s, r) = unbounded(); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Ok(7)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1500)); + future::block_on(s.send(7)).unwrap(); + future::block_on(s.send(8)).unwrap(); + future::block_on(s.send(9)).unwrap(); + }) + .run(); +} + +#[test] +fn try_send() { + let (s, r) = unbounded(); + for i in 0..1000 { + assert_eq!(s.try_send(i), Ok(())); + } + + drop(r); + assert_eq!(s.try_send(777), Err(TrySendError::Closed(777))); +} + +#[test] +fn send() { + let (s, r) = unbounded(); + for i in 0..1000 { + assert_eq!(future::block_on(s.send(i)), Ok(())); + } + + drop(r); + assert_eq!(future::block_on(s.send(777)), Err(SendError(777))); +} + +#[test] +fn send_after_close() { + let (s, r) = unbounded(); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(r); + + assert_eq!(future::block_on(s.send(4)), Err(SendError(4))); + assert_eq!(s.try_send(5), Err(TrySendError::Closed(5))); +} + +#[test] +fn recv_after_close() { + let (s, r) = unbounded(); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(s); + + assert_eq!(future::block_on(r.recv()), Ok(1)); + assert_eq!(future::block_on(r.recv()), Ok(2)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); +} + +#[test] +fn len() { + let (s, r) = unbounded(); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for i in 0..50 { + future::block_on(s.send(i)).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for i in 0..50 { + future::block_on(r.recv()).unwrap(); + assert_eq!(r.len(), 50 - i - 1); + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); +} + +#[test] +fn receiver_count() { + let (s, r) = unbounded::<()>(); + let receiver_clones: Vec<_> = (0..20).map(|_| r.clone()).collect(); + + assert_eq!(s.receiver_count(), 21); + assert_eq!(r.receiver_count(), 21); + + drop(receiver_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[test] +fn sender_count() { + let (s, r) = unbounded::<()>(); + let sender_clones: Vec<_> = (0..20).map(|_| s.clone()).collect(); + + assert_eq!(s.sender_count(), 21); + assert_eq!(r.sender_count(), 21); + + drop(sender_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn close_wakes_receiver() { + let (s, r) = unbounded::<()>(); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1000)); + drop(s); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = 100_000; + + let (s, r) = unbounded(); + + Parallel::new() + .add(move || { + for i in 0..COUNT { + assert_eq!(future::block_on(r.recv()), Ok(i)); + } + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = unbounded::(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = future::block_on(r.recv()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_stream() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = unbounded::(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + let v = &v; + + Parallel::new() + .each(0..THREADS, { + let r = r.clone(); + move |_| { + futures_lite::pin!(r); + for _ in 0..COUNT { + let n = future::block_on(r.next()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn weak() { + let (s, r) = unbounded::(); + + // Create a weak sender/receiver pair. + let (weak_s, weak_r) = (s.downgrade(), r.downgrade()); + + // Upgrade and send. + { + let s = weak_s.upgrade().unwrap(); + s.send_blocking(3).unwrap(); + let r = weak_r.upgrade().unwrap(); + assert_eq!(r.recv_blocking(), Ok(3)); + } + + // Drop the original sender/receiver pair. + drop((s, r)); + + // Try to upgrade again. + { + assert!(weak_s.upgrade().is_none()); + assert!(weak_r.upgrade().is_none()); + } +} diff --git a/external/vendor/async-task/.cargo-checksum.json b/external/vendor/async-task/.cargo-checksum.json new file mode 100644 index 0000000000..09c77268b8 --- /dev/null +++ b/external/vendor/async-task/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"06e5796b3db8767807a59ab26c05cc4cd16b3be327edea4c6407f172db41ce7b","CHANGELOG.md":"8433fdce250bdc1864320a7eaab940261a9d74c9fdd42d97591d4708a81831f6","Cargo.lock":"724f4923c3a914b938ee5769bbf9bc35cd7261d129406fae00777a5e66e79416","Cargo.toml":"1ee0e30526e13349dafcd286676747fbbe1b6e876ec28dafa735ae0f603600e7","Cargo.toml.orig":"d94de6be0f8ab5af18a17337a6d93c6adeb47f28cf917b9ba0c709cff9095390","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"91a65b44a1692a16fa2d2f5bb424e78177f0c0ca0051e93db20443568377d5e5","benches/spawn.rs":"9272992dbc058734be04e4b1c0c92fa16f62eebe062c2ce74bbedfae1ce36689","examples/spawn-local.rs":"729d6b8ef55fad9387e8912567160766fb00c3880a7acbdcab7316aefe7811c3","examples/spawn-on-thread.rs":"90c5ffcbe11b847a9201a83ac2f036bf1721dfe971708781a0769ef8354e87c9","examples/spawn.rs":"360c86d609eea98333ba6284ebf8eeb73acc651f07d30b0dd5879757b4bf6125","examples/with-metadata.rs":"c5cb1f101d7320df6d7b6e3a82d5483b94b2e47523387a910facf5c390e535e3","src/header.rs":"d7486c5528b7fb4b372e66e73923747efe052d1318edc6a0797f00fa20b0b2e3","src/lib.rs":"195df753931d1259f6325789f111e79b833432c2925b7f8a90d94ab82b6cd9fe","src/raw.rs":"5879172e761591fee44293537ed134f794c42c873afdd5ccb1107a8dc7ccce6c","src/runnable.rs":"e12bec98bc1aa2f10194c85c7b022807cd5e1abd95a37db93bd5b06ced8d1a28","src/state.rs":"73ec4b98e8519faad882c1ee19711066a9e2a9b2cf9441436f145c8597e2b93d","src/task.rs":"8899dc897b21220a19134ae3755eefefeda55f18a56e32145c2a97d69be60fb6","src/utils.rs":"bc6a0073b07f50f3495962b77afd64c38a0b4ec4e5f2d7e875f72e92254a7dd3","tests/basic.rs":"081729ff928214edcdc2550644708e2571f127e59afc24fdc8af94e2e2cc441c","tests/cancel.rs":"6ffd2e52e3846e20b5221b1509fe718e187652302e8723c9751c1c7921109201","tests/join.rs":"47ae65d842d658a0d8f821535e67160a906f8182373f1780f364314371f99bae","tests/metadata.rs":"33b1d406d4f834671524cbc0c27edb6d6fb95ef16440c24329dfb1aff0db5e76","tests/panic.rs":"d5bd73f8d697277ed0a8193769e3a88989285deff08a111f3b149fd1aa760e65","tests/ready.rs":"45c8562bbbe3837f22129b42ffef734be4572366ff3c3ce6aae1e593f558d429","tests/waker_panic.rs":"6f1ac597ab8df2a8452eface16dec48a150d87770fd6afc3965f36f6c84a7dbb","tests/waker_pending.rs":"8b65a64d00fb3f2e33b8ed9db296b9a2aa8110a44eba7a899bab8dfecb902f5a","tests/waker_ready.rs":"abbc78ecb291e894f6805b18ca3c2945b3f2bc9da6ec918ffa5ab9d27a759b5a"},"package":"8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de"} \ No newline at end of file diff --git a/external/vendor/async-task/.cargo_vcs_info.json b/external/vendor/async-task/.cargo_vcs_info.json new file mode 100644 index 0000000000..0f2f0f8925 --- /dev/null +++ b/external/vendor/async-task/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "3065c372e1ef1611230195ad7f3aae80ffde8261" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/async-task/CHANGELOG.md b/external/vendor/async-task/CHANGELOG.md new file mode 100644 index 0000000000..0324dca415 --- /dev/null +++ b/external/vendor/async-task/CHANGELOG.md @@ -0,0 +1,112 @@ +# Version 4.7.1 + +- Improve the panic message for when a task is polled after completion. (#73) + +# Version 4.7.0 + +- Add `from_raw` and `into_raw` functions for `Runnable` to ease passing it + across an FFI boundary. (#65) + +# Version 4.6.0 + +- Bump MSRV to 1.57. (#63) +- Task layout computation failures are now a compile-time error instead of a + runtime abort. (#63) + +# Version 4.5.0 + +- Add a `portable-atomic` feature that enables the usage of fallback primitives for CPUs without atomics. (#58) + +# Version 4.4.1 + +- Clarify safety documentation for `spawn_unchecked`. (#49) + +# Version 4.4.0 + +- Ensure that the allocation doesn't exceed `isize::MAX` (#32) +- Add `FallibleTask::is_finished()` (#34) +- Add a metadata generic parameter to tasks (#33) +- Add panic propagation to tasks (#37) +- Add a way to tell if the task was woken while running from the schedule function (#42) + +# Version 4.3.0 + +- Bump MSRV to Rust 1.47. (#30) +- Evaluate the layouts for the tasks at compile time. (#30) +- Add layout_info field to TaskVTable so that debuggers can decode raw tasks. (#29) + +# Version 4.2.0 + +- Add `Task::is_finished`. (#19) + +# Version 4.1.0 + +- Add `FallibleTask`. (#21) + +# Version 4.0.3 + +- Document the return value of `Runnable::run()` better. + +# Version 4.0.2 + +- Nits in the docs. + +# Version 4.0.1 + +- Nits in the docs. + +# Version 4.0.0 + +- Rename `Task` to `Runnable`. +- Rename `JoinHandle` to `Task`. +- Cancel `Task` on drop. +- Add `Task::detach()` and `Task::cancel()`. +- Add `spawn_unchecked()`. + +# Version 3.0.0 + +- Use `ThreadId` in `spawn_local` because OS-provided IDs can get recycled. +- Add `std` feature to `Cargo.toml`. + +# Version 2.1.1 + +- Allocate large futures on the heap. + +# Version 2.1.0 + +- `JoinHandle` now only evaluates after the task's future has been dropped. + +# Version 2.0.0 + +- Return `true` in `Task::run()`. + +# Version 1.3.1 + +- Make `spawn_local` available only on unix and windows. + +# Version 1.3.0 + +- Add `waker_fn`. + +# Version 1.2.1 + +- Add the `no-std` category to the package. + +# Version 1.2.0 + +- The crate is now marked with `#![no_std]`. +- Add `Task::waker` and `JoinHandle::waker`. +- Add `Task::into_raw` and `Task::from_raw`. + +# Version 1.1.1 + +- Fix a use-after-free bug where the schedule function is dropped while running. + +# Version 1.1.0 + +- If a task is dropped or canceled outside the `run` method, it gets re-scheduled. +- Add `spawn_local` constructor. + +# Version 1.0.0 + +- Initial release diff --git a/external/vendor/async-task/Cargo.lock b/external/vendor/async-task/Cargo.lock new file mode 100644 index 0000000000..109a5808cb --- /dev/null +++ b/external/vendor/async-task/Cargo.lock @@ -0,0 +1,665 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "async-channel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +dependencies = [ + "concurrent-queue", + "event-listener 4.0.1", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.2.0", + "async-task 4.7.0", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] + +[[package]] +name = "async-fs" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc19683171f287921f2405677dd2ed2549c3b3bda697a563ebc3a121ace2aba1" +dependencies = [ + "async-lock 3.2.0", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-io" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" +dependencies = [ + "async-lock 3.2.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +dependencies = [ + "event-listener 4.0.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-process" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a53fc6301894e04a92cb2584fedde80cb25ba8e02d9dc39d4a87d036e22f397d" +dependencies = [ + "async-channel", + "async-io", + "async-lock 3.2.0", + "async-signal", + "async-task 4.7.0", + "blocking", + "cfg-if", + "event-listener 5.3.0", + "futures-lite", + "rustix", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-signal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +dependencies = [ + "async-io", + "async-lock 2.8.0", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + +[[package]] +name = "async-task" +version = "4.7.1" +dependencies = [ + "atomic-waker", + "easy-parallel", + "flaky_test", + "flume", + "futures-lite", + "once_cell", + "pin-project-lite", + "portable-atomic", + "smol", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel", + "async-lock 3.2.0", + "async-task 4.7.0", + "fastrand", + "futures-io", + "futures-lite", + "piper", + "tracing", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "easy-parallel" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2afbb9b0aef60e4f0d2b18129b6c0dff035a6f7dbbd17c2f38c1432102ee223c" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f2cdcf274580f2d63697192d744727b3198894b1bf02923643bf59e2c26712" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.1", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "flaky_test" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046caa1f23d7f751fc4ead3d6669a77fa5fc6cf6074960ddeb6a0b0a5b83c8da" +dependencies = [ + "flaky_test_impl", + "futures-util", +] + +[[package]] +name = "flaky_test_impl" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e35909c2f0552fdae5b40f1e95a7da12afb58c1f2f455a12c216c58d869abe" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "spin", +] + +[[package]] +name = "futures-core" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" + +[[package]] +name = "futures-io" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" + +[[package]] +name = "futures-lite" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "libc" +version = "0.2.151" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" + +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + +[[package]] +name = "polling" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustix" +version = "0.38.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smol" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e635339259e51ef85ac7aa29a1cd991b957047507288697a690e80ab97d07cad" +dependencies = [ + "async-channel", + "async-executor", + "async-fs", + "async-io", + "async-lock 3.2.0", + "async-net", + "async-process", + "blocking", + "futures-lite", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" diff --git a/external/vendor/async-task/Cargo.toml b/external/vendor/async-task/Cargo.toml new file mode 100644 index 0000000000..7719f420ac --- /dev/null +++ b/external/vendor/async-task/Cargo.toml @@ -0,0 +1,67 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.57" +name = "async-task" +version = "4.7.1" +authors = ["Stjepan Glavina "] +exclude = ["/.*"] +description = "Task abstraction for building executors" +readme = "README.md" +keywords = [ + "futures", + "task", + "executor", + "spawn", +] +categories = [ + "asynchronous", + "concurrency", + "no-std", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/async-task" + +[dependencies.portable-atomic] +version = "1" +optional = true +default-features = false + +[dev-dependencies.atomic-waker] +version = "1" + +[dev-dependencies.easy-parallel] +version = "3" + +[dev-dependencies.flaky_test] +version = "0.2" + +[dev-dependencies.flume] +version = "0.11" +default-features = false + +[dev-dependencies.futures-lite] +version = "2.0.0" + +[dev-dependencies.once_cell] +version = "1" + +[dev-dependencies.pin-project-lite] +version = "0.2.10" + +[dev-dependencies.smol] +version = "2" + +[features] +default = ["std"] +std = [] diff --git a/external/vendor/async-task/Cargo.toml.orig b/external/vendor/async-task/Cargo.toml.orig new file mode 100644 index 0000000000..7779a6a34e --- /dev/null +++ b/external/vendor/async-task/Cargo.toml.orig @@ -0,0 +1,37 @@ +[package] +name = "async-task" +# When publishing a new version: +# - Update CHANGELOG.md +# - Create "v4.x.y" git tag +version = "4.7.1" +authors = ["Stjepan Glavina "] +edition = "2021" +rust-version = "1.57" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/async-task" +description = "Task abstraction for building executors" +keywords = ["futures", "task", "executor", "spawn"] +categories = ["asynchronous", "concurrency", "no-std"] +exclude = ["/.*"] + +[features] +default = ["std"] +std = [] + +[dependencies] +# Uses portable-atomic polyfill atomics on targets without them +portable-atomic = { version = "1", optional = true, default-features = false } + +[dev-dependencies] +atomic-waker = "1" +easy-parallel = "3" +flaky_test = "0.2" +flume = { version = "0.11", default-features = false } +futures-lite = "2.0.0" +once_cell = "1" +pin-project-lite = "0.2.10" +smol = "2" + +# rewrite dependencies to use the this version of async-task when running tests +[patch.crates-io] +async-task = { path = "." } diff --git a/external/vendor/async-task/LICENSE-APACHE b/external/vendor/async-task/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/async-task/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/async-task/LICENSE-MIT b/external/vendor/async-task/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/async-task/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/async-task/README.md b/external/vendor/async-task/README.md new file mode 100644 index 0000000000..7044c9dfd9 --- /dev/null +++ b/external/vendor/async-task/README.md @@ -0,0 +1,69 @@ +# async-task + +[![Build](https://github.com/smol-rs/async-task/workflows/Build%20and%20test/badge.svg)]( +https://github.com/smol-rs/async-task/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/async-task) +[![Cargo](https://img.shields.io/crates/v/async-task.svg)]( +https://crates.io/crates/async-task) +[![Documentation](https://docs.rs/async-task/badge.svg)]( +https://docs.rs/async-task) + +Task abstraction for building executors. + +To spawn a future onto an executor, we first need to allocate it on the heap and keep some +state attached to it. The state indicates whether the future is ready for polling, waiting to +be woken up, or completed. Such a stateful future is called a *task*. + +All executors have a queue that holds scheduled tasks: + +```rust +let (sender, receiver) = flume::unbounded(); +``` + +A task is created using either `spawn()`, `spawn_local()`, or `spawn_unchecked()` which +return a `Runnable` and a `Task`: + +```rust +// A future that will be spawned. +let future = async { 1 + 2 }; + +// A function that schedules the task when it gets woken up. +let schedule = move |runnable| sender.send(runnable).unwrap(); + +// Construct a task. +let (runnable, task) = async_task::spawn(future, schedule); + +// Push the task into the queue by invoking its schedule function. +runnable.schedule(); +``` + +The `Runnable` is used to poll the task's future, and the `Task` is used to await its +output. + +Finally, we need a loop that takes scheduled tasks from the queue and runs them: + +```rust +for runnable in receiver { + runnable.run(); +} +``` + +Method `run()` polls the task's future once. Then, the `Runnable` +vanishes and only reappears when its `Waker` wakes the task, thus +scheduling it to be run again. + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/async-task/benches/spawn.rs b/external/vendor/async-task/benches/spawn.rs new file mode 100644 index 0000000000..75d059ecec --- /dev/null +++ b/external/vendor/async-task/benches/spawn.rs @@ -0,0 +1,22 @@ +#![feature(test)] + +extern crate test; + +use smol::future; +use test::Bencher; + +#[bench] +fn task_create(b: &mut Bencher) { + b.iter(|| { + let _ = async_task::spawn(async {}, drop); + }); +} + +#[bench] +fn task_run(b: &mut Bencher) { + b.iter(|| { + let (runnable, task) = async_task::spawn(async {}, drop); + runnable.run(); + future::block_on(task); + }); +} diff --git a/external/vendor/async-task/examples/spawn-local.rs b/external/vendor/async-task/examples/spawn-local.rs new file mode 100644 index 0000000000..a9da1b4de9 --- /dev/null +++ b/external/vendor/async-task/examples/spawn-local.rs @@ -0,0 +1,73 @@ +//! A simple single-threaded executor that can spawn non-`Send` futures. + +use std::cell::Cell; +use std::future::Future; +use std::rc::Rc; + +use async_task::{Runnable, Task}; + +thread_local! { + // A queue that holds scheduled tasks. + static QUEUE: (flume::Sender, flume::Receiver) = flume::unbounded(); +} + +/// Spawns a future on the executor. +fn spawn(future: F) -> Task +where + F: Future + 'static, + T: 'static, +{ + // Create a task that is scheduled by pushing itself into the queue. + let schedule = |runnable| QUEUE.with(|(s, _)| s.send(runnable).unwrap()); + let (runnable, task) = async_task::spawn_local(future, schedule); + + // Schedule the task by pushing it into the queue. + runnable.schedule(); + + task +} + +/// Runs a future to completion. +fn run(future: F) -> T +where + F: Future + 'static, + T: 'static, +{ + // Spawn a task that sends its result through a channel. + let (s, r) = flume::unbounded(); + spawn(async move { drop(s.send(future.await)) }).detach(); + + loop { + // If the original task has completed, return its result. + if let Ok(val) = r.try_recv() { + return val; + } + + // Otherwise, take a task from the queue and run it. + QUEUE.with(|(_, r)| r.recv().unwrap().run()); + } +} + +fn main() { + let val = Rc::new(Cell::new(0)); + + // Run a future that increments a non-`Send` value. + run({ + let val = val.clone(); + async move { + // Spawn a future that increments the value. + let task = spawn({ + let val = val.clone(); + async move { + val.set(dbg!(val.get()) + 1); + } + }); + + val.set(dbg!(val.get()) + 1); + task.await; + } + }); + + // The value should be 2 at the end of the program. + dbg!(val.get()); +} diff --git a/external/vendor/async-task/examples/spawn-on-thread.rs b/external/vendor/async-task/examples/spawn-on-thread.rs new file mode 100644 index 0000000000..b0ec2f20a7 --- /dev/null +++ b/external/vendor/async-task/examples/spawn-on-thread.rs @@ -0,0 +1,53 @@ +//! A function that runs a future to completion on a dedicated thread. + +use std::future::Future; +use std::sync::Arc; +use std::thread; + +use async_task::Task; +use smol::future; + +/// Spawns a future on a new dedicated thread. +/// +/// The returned task can be used to await the output of the future. +fn spawn_on_thread(future: F) -> Task +where + F: Future + Send + 'static, + T: Send + 'static, +{ + // Create a channel that holds the task when it is scheduled for running. + let (sender, receiver) = flume::unbounded(); + let sender = Arc::new(sender); + let s = Arc::downgrade(&sender); + + // Wrap the future into one that disconnects the channel on completion. + let future = async move { + // When the inner future completes, the sender gets dropped and disconnects the channel. + let _sender = sender; + future.await + }; + + // Create a task that is scheduled by sending it into the channel. + let schedule = move |runnable| s.upgrade().unwrap().send(runnable).unwrap(); + let (runnable, task) = async_task::spawn(future, schedule); + + // Schedule the task by sending it into the channel. + runnable.schedule(); + + // Spawn a thread running the task to completion. + thread::spawn(move || { + // Keep taking the task from the channel and running it until completion. + for runnable in receiver { + runnable.run(); + } + }); + + task +} + +fn main() { + // Spawn a future on a dedicated thread. + future::block_on(spawn_on_thread(async { + println!("Hello, world!"); + })); +} diff --git a/external/vendor/async-task/examples/spawn.rs b/external/vendor/async-task/examples/spawn.rs new file mode 100644 index 0000000000..3a648114c9 --- /dev/null +++ b/external/vendor/async-task/examples/spawn.rs @@ -0,0 +1,48 @@ +//! A simple single-threaded executor. + +use std::future::Future; +use std::panic::catch_unwind; +use std::thread; + +use async_task::{Runnable, Task}; +use once_cell::sync::Lazy; +use smol::future; + +/// Spawns a future on the executor. +fn spawn(future: F) -> Task +where + F: Future + Send + 'static, + T: Send + 'static, +{ + // A queue that holds scheduled tasks. + static QUEUE: Lazy> = Lazy::new(|| { + let (sender, receiver) = flume::unbounded::(); + + // Start the executor thread. + thread::spawn(|| { + for runnable in receiver { + // Ignore panics inside futures. + let _ignore_panic = catch_unwind(|| runnable.run()); + } + }); + + sender + }); + + // Create a task that is scheduled by pushing it into the queue. + let schedule = |runnable| QUEUE.send(runnable).unwrap(); + let (runnable, task) = async_task::spawn(future, schedule); + + // Schedule the task by pushing it into the queue. + runnable.schedule(); + + task +} + +fn main() { + // Spawn a future and await its result. + let task = spawn(async { + println!("Hello, world!"); + }); + future::block_on(task); +} diff --git a/external/vendor/async-task/examples/with-metadata.rs b/external/vendor/async-task/examples/with-metadata.rs new file mode 100644 index 0000000000..ed84e31f25 --- /dev/null +++ b/external/vendor/async-task/examples/with-metadata.rs @@ -0,0 +1,145 @@ +//! A single threaded executor that uses shortest-job-first scheduling. + +use std::cell::RefCell; +use std::collections::BinaryHeap; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::thread; +use std::time::{Duration, Instant}; +use std::{cell::Cell, future::Future}; + +use async_task::{Builder, Runnable, Task}; +use pin_project_lite::pin_project; +use smol::{channel, future}; + +struct ByDuration(Runnable); + +impl ByDuration { + fn duration(&self) -> Duration { + self.0.metadata().inner.get() + } +} + +impl PartialEq for ByDuration { + fn eq(&self, other: &Self) -> bool { + self.duration() == other.duration() + } +} + +impl Eq for ByDuration {} + +impl PartialOrd for ByDuration { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ByDuration { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.duration().cmp(&other.duration()).reverse() + } +} + +pin_project! { + #[must_use = "futures do nothing unless you `.await` or poll them"] + struct MeasureRuntime<'a, F> { + #[pin] + f: F, + duration: &'a Cell + } +} + +impl<'a, F: Future> Future for MeasureRuntime<'a, F> { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let duration_cell: &Cell = this.duration; + let start = Instant::now(); + let res = F::poll(this.f, cx); + let new_duration = Instant::now() - start; + duration_cell.set(duration_cell.get() / 2 + new_duration / 2); + res + } +} + +pub struct DurationMetadata { + inner: Cell, +} + +thread_local! { + // A queue that holds scheduled tasks. + static QUEUE: RefCell> = RefCell::new(BinaryHeap::new()); +} + +fn make_future_fn<'a, F>( + future: F, +) -> impl (FnOnce(&'a DurationMetadata) -> MeasureRuntime<'a, F>) { + move |duration_meta| MeasureRuntime { + f: future, + duration: &duration_meta.inner, + } +} + +fn ensure_safe_schedule(f: F) -> F { + f +} + +/// Spawns a future on the executor. +pub fn spawn(future: F) -> Task +where + F: Future + 'static, + T: 'static, +{ + let spawn_thread_id = thread::current().id(); + // Create a task that is scheduled by pushing it into the queue. + let schedule = ensure_safe_schedule(move |runnable| { + if thread::current().id() != spawn_thread_id { + panic!("Task would be run on a different thread than spawned on."); + } + QUEUE.with(move |queue| queue.borrow_mut().push(ByDuration(runnable))); + }); + let future_fn = make_future_fn(future); + let (runnable, task) = unsafe { + Builder::new() + .metadata(DurationMetadata { + inner: Cell::new(Duration::default()), + }) + .spawn_unchecked(future_fn, schedule) + }; + + // Schedule the task by pushing it into the queue. + runnable.schedule(); + + task +} + +pub fn block_on(future: F) +where + F: Future + 'static, +{ + let task = spawn(future); + while !task.is_finished() { + let Some(runnable) = QUEUE.with(|queue| queue.borrow_mut().pop()) else { + thread::yield_now(); + continue; + }; + runnable.0.run(); + } +} + +fn main() { + // Spawn a future and await its result. + block_on(async { + let (sender, receiver) = channel::bounded(1); + let world = spawn(async move { + receiver.recv().await.unwrap(); + println!("world.") + }); + let hello = spawn(async move { + sender.send(()).await.unwrap(); + print!("Hello, ") + }); + future::zip(hello, world).await; + }); +} diff --git a/external/vendor/async-task/src/header.rs b/external/vendor/async-task/src/header.rs new file mode 100644 index 0000000000..ee84035b83 --- /dev/null +++ b/external/vendor/async-task/src/header.rs @@ -0,0 +1,177 @@ +use core::cell::UnsafeCell; +use core::fmt; +use core::task::Waker; + +#[cfg(not(feature = "portable-atomic"))] +use core::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; +#[cfg(feature = "portable-atomic")] +use portable_atomic::AtomicUsize; + +use crate::raw::TaskVTable; +use crate::state::*; +use crate::utils::abort_on_panic; + +/// The header of a task. +/// +/// This header is stored in memory at the beginning of the heap-allocated task. +pub(crate) struct Header { + /// Current state of the task. + /// + /// Contains flags representing the current state and the reference count. + pub(crate) state: AtomicUsize, + + /// The task that is blocked on the `Task` handle. + /// + /// This waker needs to be woken up once the task completes or is closed. + pub(crate) awaiter: UnsafeCell>, + + /// The virtual table. + /// + /// In addition to the actual waker virtual table, it also contains pointers to several other + /// methods necessary for bookkeeping the heap-allocated task. + pub(crate) vtable: &'static TaskVTable, + + /// Metadata associated with the task. + /// + /// This metadata may be provided to the user. + pub(crate) metadata: M, + + /// Whether or not a panic that occurs in the task should be propagated. + #[cfg(feature = "std")] + pub(crate) propagate_panic: bool, +} + +impl Header { + /// Notifies the awaiter blocked on this task. + /// + /// If the awaiter is the same as the current waker, it will not be notified. + #[inline] + pub(crate) fn notify(&self, current: Option<&Waker>) { + if let Some(w) = self.take(current) { + abort_on_panic(|| w.wake()); + } + } + + /// Takes the awaiter blocked on this task. + /// + /// If there is no awaiter or if it is the same as the current waker, returns `None`. + #[inline] + pub(crate) fn take(&self, current: Option<&Waker>) -> Option { + // Set the bit indicating that the task is notifying its awaiter. + let state = self.state.fetch_or(NOTIFYING, Ordering::AcqRel); + + // If the task was not notifying or registering an awaiter... + if state & (NOTIFYING | REGISTERING) == 0 { + // Take the waker out. + let waker = unsafe { (*self.awaiter.get()).take() }; + + // Unset the bit indicating that the task is notifying its awaiter. + self.state + .fetch_and(!NOTIFYING & !AWAITER, Ordering::Release); + + // Finally, notify the waker if it's different from the current waker. + if let Some(w) = waker { + match current { + None => return Some(w), + Some(c) if !w.will_wake(c) => return Some(w), + Some(_) => abort_on_panic(|| drop(w)), + } + } + } + + None + } + + /// Registers a new awaiter blocked on this task. + /// + /// This method is called when `Task` is polled and it has not yet completed. + #[inline] + pub(crate) fn register(&self, waker: &Waker) { + // Load the state and synchronize with it. + let mut state = self.state.fetch_or(0, Ordering::Acquire); + + loop { + // There can't be two concurrent registrations because `Task` can only be polled + // by a unique pinned reference. + debug_assert!(state & REGISTERING == 0); + + // If we're in the notifying state at this moment, just wake and return without + // registering. + if state & NOTIFYING != 0 { + abort_on_panic(|| waker.wake_by_ref()); + return; + } + + // Mark the state to let other threads know we're registering a new awaiter. + match self.state.compare_exchange_weak( + state, + state | REGISTERING, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + state |= REGISTERING; + break; + } + Err(s) => state = s, + } + } + + // Put the waker into the awaiter field. + unsafe { + abort_on_panic(|| (*self.awaiter.get()) = Some(waker.clone())); + } + + // This variable will contain the newly registered waker if a notification comes in before + // we complete registration. + let mut waker = None; + + loop { + // If there was a notification, take the waker out of the awaiter field. + if state & NOTIFYING != 0 { + if let Some(w) = unsafe { (*self.awaiter.get()).take() } { + abort_on_panic(|| waker = Some(w)); + } + } + + // The new state is not being notified nor registered, but there might or might not be + // an awaiter depending on whether there was a concurrent notification. + let new = if waker.is_none() { + (state & !NOTIFYING & !REGISTERING) | AWAITER + } else { + state & !NOTIFYING & !REGISTERING & !AWAITER + }; + + match self + .state + .compare_exchange_weak(state, new, Ordering::AcqRel, Ordering::Acquire) + { + Ok(_) => break, + Err(s) => state = s, + } + } + + // If there was a notification during registration, wake the awaiter now. + if let Some(w) = waker { + abort_on_panic(|| w.wake()); + } + } +} + +impl fmt::Debug for Header { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let state = self.state.load(Ordering::SeqCst); + + f.debug_struct("Header") + .field("scheduled", &(state & SCHEDULED != 0)) + .field("running", &(state & RUNNING != 0)) + .field("completed", &(state & COMPLETED != 0)) + .field("closed", &(state & CLOSED != 0)) + .field("awaiter", &(state & AWAITER != 0)) + .field("task", &(state & TASK != 0)) + .field("ref_count", &(state / REFERENCE)) + .field("metadata", &self.metadata) + .finish() + } +} diff --git a/external/vendor/async-task/src/lib.rs b/external/vendor/async-task/src/lib.rs new file mode 100644 index 0000000000..c8f67028e8 --- /dev/null +++ b/external/vendor/async-task/src/lib.rs @@ -0,0 +1,118 @@ +//! Task abstraction for building executors. +//! +//! To spawn a future onto an executor, we first need to allocate it on the heap and keep some +//! state attached to it. The state indicates whether the future is ready for polling, waiting to +//! be woken up, or completed. Such a stateful future is called a *task*. +//! +//! All executors have a queue that holds scheduled tasks: +//! +//! ``` +//! let (sender, receiver) = flume::unbounded(); +//! # +//! # // A future that will get spawned. +//! # let future = async { 1 + 2 }; +//! # +//! # // A function that schedules the task when it gets woken up. +//! # let schedule = move |runnable| sender.send(runnable).unwrap(); +//! # +//! # // Create a task. +//! # let (runnable, task) = async_task::spawn(future, schedule); +//! ``` +//! +//! A task is created using either [`spawn()`], [`spawn_local()`], or [`spawn_unchecked()`] which +//! return a [`Runnable`] and a [`Task`]: +//! +//! ``` +//! # let (sender, receiver) = flume::unbounded(); +//! # +//! // A future that will be spawned. +//! let future = async { 1 + 2 }; +//! +//! // A function that schedules the task when it gets woken up. +//! let schedule = move |runnable| sender.send(runnable).unwrap(); +//! +//! // Construct a task. +//! let (runnable, task) = async_task::spawn(future, schedule); +//! +//! // Push the task into the queue by invoking its schedule function. +//! runnable.schedule(); +//! ``` +//! +//! The [`Runnable`] is used to poll the task's future, and the [`Task`] is used to await its +//! output. +//! +//! Finally, we need a loop that takes scheduled tasks from the queue and runs them: +//! +//! ```no_run +//! # let (sender, receiver) = flume::unbounded(); +//! # +//! # // A future that will get spawned. +//! # let future = async { 1 + 2 }; +//! # +//! # // A function that schedules the task when it gets woken up. +//! # let schedule = move |runnable| sender.send(runnable).unwrap(); +//! # +//! # // Create a task. +//! # let (runnable, task) = async_task::spawn(future, schedule); +//! # +//! # // Push the task into the queue by invoking its schedule function. +//! # runnable.schedule(); +//! # +//! for runnable in receiver { +//! runnable.run(); +//! } +//! ``` +//! +//! Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] +//! vanishes and only reappears when its [`Waker`][`core::task::Waker`] wakes the task, thus +//! scheduling it to be run again. + +#![no_std] +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc(test(attr(deny(rust_2018_idioms, warnings))))] +#![doc(test(attr(allow(unused_extern_crates, unused_variables))))] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +/// We can't use `?` in const contexts yet, so this macro acts +/// as a workaround. +macro_rules! leap { + ($x: expr) => {{ + match ($x) { + Some(val) => val, + None => return None, + } + }}; +} + +macro_rules! leap_unwrap { + ($x: expr) => {{ + match ($x) { + Some(val) => val, + None => panic!("called `Option::unwrap()` on a `None` value"), + } + }}; +} + +mod header; +mod raw; +mod runnable; +mod state; +mod task; +mod utils; + +pub use crate::runnable::{ + spawn, spawn_unchecked, Builder, Runnable, Schedule, ScheduleInfo, WithInfo, +}; +pub use crate::task::{FallibleTask, Task}; + +#[cfg(feature = "std")] +pub use crate::runnable::spawn_local; diff --git a/external/vendor/async-task/src/raw.rs b/external/vendor/async-task/src/raw.rs new file mode 100644 index 0000000000..7a45dadb6e --- /dev/null +++ b/external/vendor/async-task/src/raw.rs @@ -0,0 +1,756 @@ +use alloc::alloc::Layout as StdLayout; +use core::cell::UnsafeCell; +use core::future::Future; +use core::mem::{self, ManuallyDrop}; +use core::pin::Pin; +use core::ptr::NonNull; +use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; + +#[cfg(not(feature = "portable-atomic"))] +use core::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; +#[cfg(feature = "portable-atomic")] +use portable_atomic::AtomicUsize; + +use crate::header::Header; +use crate::runnable::{Schedule, ScheduleInfo}; +use crate::state::*; +use crate::utils::{abort, abort_on_panic, max, Layout}; +use crate::Runnable; + +#[cfg(feature = "std")] +pub(crate) type Panic = alloc::boxed::Box; + +#[cfg(not(feature = "std"))] +pub(crate) type Panic = core::convert::Infallible; + +/// The vtable for a task. +pub(crate) struct TaskVTable { + /// Schedules the task. + pub(crate) schedule: unsafe fn(*const (), ScheduleInfo), + + /// Drops the future inside the task. + pub(crate) drop_future: unsafe fn(*const ()), + + /// Returns a pointer to the output stored after completion. + pub(crate) get_output: unsafe fn(*const ()) -> *const (), + + /// Drops the task reference (`Runnable` or `Waker`). + pub(crate) drop_ref: unsafe fn(ptr: *const ()), + + /// Destroys the task. + pub(crate) destroy: unsafe fn(*const ()), + + /// Runs the task. + pub(crate) run: unsafe fn(*const ()) -> bool, + + /// Creates a new waker associated with the task. + pub(crate) clone_waker: unsafe fn(ptr: *const ()) -> RawWaker, + + /// The memory layout of the task. This information enables + /// debuggers to decode raw task memory blobs. Do not remove + /// the field, even if it appears to be unused. + #[allow(unused)] + pub(crate) layout_info: &'static TaskLayout, +} + +/// Memory layout of a task. +/// +/// This struct contains the following information: +/// +/// 1. How to allocate and deallocate the task. +/// 2. How to access the fields inside the task. +#[derive(Clone, Copy)] +pub(crate) struct TaskLayout { + /// Memory layout of the whole task. + pub(crate) layout: StdLayout, + + /// Offset into the task at which the schedule function is stored. + pub(crate) offset_s: usize, + + /// Offset into the task at which the future is stored. + pub(crate) offset_f: usize, + + /// Offset into the task at which the output is stored. + pub(crate) offset_r: usize, +} + +/// Raw pointers to the fields inside a task. +pub(crate) struct RawTask { + /// The task header. + pub(crate) header: *const Header, + + /// The schedule function. + pub(crate) schedule: *const S, + + /// The future. + pub(crate) future: *mut F, + + /// The output of the future. + pub(crate) output: *mut Result, +} + +impl Copy for RawTask {} + +impl Clone for RawTask { + fn clone(&self) -> Self { + *self + } +} + +impl RawTask { + const TASK_LAYOUT: TaskLayout = Self::eval_task_layout(); + + /// Computes the memory layout for a task. + #[inline] + const fn eval_task_layout() -> TaskLayout { + // Compute the layouts for `Header`, `S`, `F`, and `T`. + let layout_header = Layout::new::>(); + let layout_s = Layout::new::(); + let layout_f = Layout::new::(); + let layout_r = Layout::new::>(); + + // Compute the layout for `union { F, T }`. + let size_union = max(layout_f.size(), layout_r.size()); + let align_union = max(layout_f.align(), layout_r.align()); + let layout_union = Layout::from_size_align(size_union, align_union); + + // Compute the layout for `Header` followed `S` and `union { F, T }`. + let layout = layout_header; + let (layout, offset_s) = leap_unwrap!(layout.extend(layout_s)); + let (layout, offset_union) = leap_unwrap!(layout.extend(layout_union)); + let offset_f = offset_union; + let offset_r = offset_union; + + TaskLayout { + layout: unsafe { layout.into_std() }, + offset_s, + offset_f, + offset_r, + } + } +} + +impl RawTask +where + F: Future, + S: Schedule, +{ + const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( + Self::clone_waker, + Self::wake, + Self::wake_by_ref, + Self::drop_waker, + ); + + /// Allocates a task with the given `future` and `schedule` function. + /// + /// It is assumed that initially only the `Runnable` and the `Task` exist. + pub(crate) fn allocate<'a, Gen: FnOnce(&'a M) -> F>( + future: Gen, + schedule: S, + builder: crate::Builder, + ) -> NonNull<()> + where + F: 'a, + M: 'a, + { + // Compute the layout of the task for allocation. Abort if the computation fails. + // + // n.b. notgull: task_layout now automatically aborts instead of panicking + let task_layout = Self::task_layout(); + + unsafe { + // Allocate enough space for the entire task. + let ptr = match NonNull::new(alloc::alloc::alloc(task_layout.layout) as *mut ()) { + None => abort(), + Some(p) => p, + }; + + let raw = Self::from_ptr(ptr.as_ptr()); + + let crate::Builder { + metadata, + #[cfg(feature = "std")] + propagate_panic, + } = builder; + + // Write the header as the first field of the task. + (raw.header as *mut Header).write(Header { + state: AtomicUsize::new(SCHEDULED | TASK | REFERENCE), + awaiter: UnsafeCell::new(None), + vtable: &TaskVTable { + schedule: Self::schedule, + drop_future: Self::drop_future, + get_output: Self::get_output, + drop_ref: Self::drop_ref, + destroy: Self::destroy, + run: Self::run, + clone_waker: Self::clone_waker, + layout_info: &Self::TASK_LAYOUT, + }, + metadata, + #[cfg(feature = "std")] + propagate_panic, + }); + + // Write the schedule function as the third field of the task. + (raw.schedule as *mut S).write(schedule); + + // Generate the future, now that the metadata has been pinned in place. + let future = abort_on_panic(|| future(&(*raw.header).metadata)); + + // Write the future as the fourth field of the task. + raw.future.write(future); + + ptr + } + } + + /// Creates a `RawTask` from a raw task pointer. + #[inline] + pub(crate) fn from_ptr(ptr: *const ()) -> Self { + let task_layout = Self::task_layout(); + let p = ptr as *const u8; + + unsafe { + Self { + header: p as *const Header, + schedule: p.add(task_layout.offset_s) as *const S, + future: p.add(task_layout.offset_f) as *mut F, + output: p.add(task_layout.offset_r) as *mut Result, + } + } + } + + /// Returns the layout of the task. + #[inline] + fn task_layout() -> TaskLayout { + Self::TASK_LAYOUT + } + /// Wakes a waker. + unsafe fn wake(ptr: *const ()) { + // This is just an optimization. If the schedule function has captured variables, then + // we'll do less reference counting if we wake the waker by reference and then drop it. + if mem::size_of::() > 0 { + Self::wake_by_ref(ptr); + Self::drop_waker(ptr); + return; + } + + let raw = Self::from_ptr(ptr); + + let mut state = (*raw.header).state.load(Ordering::Acquire); + + loop { + // If the task is completed or closed, it can't be woken up. + if state & (COMPLETED | CLOSED) != 0 { + // Drop the waker. + Self::drop_waker(ptr); + break; + } + + // If the task is already scheduled, we just need to synchronize with the thread that + // will run the task by "publishing" our current view of the memory. + if state & SCHEDULED != 0 { + // Update the state without actually modifying it. + match (*raw.header).state.compare_exchange_weak( + state, + state, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Drop the waker. + Self::drop_waker(ptr); + break; + } + Err(s) => state = s, + } + } else { + // Mark the task as scheduled. + match (*raw.header).state.compare_exchange_weak( + state, + state | SCHEDULED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the task is not yet scheduled and isn't currently running, now is the + // time to schedule it. + if state & RUNNING == 0 { + // Schedule the task. + Self::schedule(ptr, ScheduleInfo::new(false)); + } else { + // Drop the waker. + Self::drop_waker(ptr); + } + + break; + } + Err(s) => state = s, + } + } + } + } + + /// Wakes a waker by reference. + unsafe fn wake_by_ref(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + let mut state = (*raw.header).state.load(Ordering::Acquire); + + loop { + // If the task is completed or closed, it can't be woken up. + if state & (COMPLETED | CLOSED) != 0 { + break; + } + + // If the task is already scheduled, we just need to synchronize with the thread that + // will run the task by "publishing" our current view of the memory. + if state & SCHEDULED != 0 { + // Update the state without actually modifying it. + match (*raw.header).state.compare_exchange_weak( + state, + state, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => break, + Err(s) => state = s, + } + } else { + // If the task is not running, we can schedule right away. + let new = if state & RUNNING == 0 { + (state | SCHEDULED) + REFERENCE + } else { + state | SCHEDULED + }; + + // Mark the task as scheduled. + match (*raw.header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the task is not running, now is the time to schedule. + if state & RUNNING == 0 { + // If the reference count overflowed, abort. + if state > isize::MAX as usize { + abort(); + } + + // Schedule the task. There is no need to call `Self::schedule(ptr)` + // because the schedule function cannot be destroyed while the waker is + // still alive. + let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); + (*raw.schedule).schedule(task, ScheduleInfo::new(false)); + } + + break; + } + Err(s) => state = s, + } + } + } + } + + /// Clones a waker. + unsafe fn clone_waker(ptr: *const ()) -> RawWaker { + let raw = Self::from_ptr(ptr); + + // Increment the reference count. With any kind of reference-counted data structure, + // relaxed ordering is appropriate when incrementing the counter. + let state = (*raw.header).state.fetch_add(REFERENCE, Ordering::Relaxed); + + // If the reference count overflowed, abort. + if state > isize::MAX as usize { + abort(); + } + + RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE) + } + + /// Drops a waker. + /// + /// This function will decrement the reference count. If it drops down to zero, the associated + /// `Task` has been dropped too, and the task has not been completed, then it will get + /// scheduled one more time so that its future gets dropped by the executor. + #[inline] + unsafe fn drop_waker(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + // Decrement the reference count. + let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; + + // If this was the last reference to the task and the `Task` has been dropped too, + // then we need to decide how to destroy the task. + if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { + if new & (COMPLETED | CLOSED) == 0 { + // If the task was not completed nor closed, close it and schedule one more time so + // that its future gets dropped by the executor. + (*raw.header) + .state + .store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release); + Self::schedule(ptr, ScheduleInfo::new(false)); + } else { + // Otherwise, destroy the task right away. + Self::destroy(ptr); + } + } + } + + /// Drops a task reference (`Runnable` or `Waker`). + /// + /// This function will decrement the reference count. If it drops down to zero and the + /// associated `Task` handle has been dropped too, then the task gets destroyed. + #[inline] + unsafe fn drop_ref(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + // Decrement the reference count. + let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; + + // If this was the last reference to the task and the `Task` has been dropped too, + // then destroy the task. + if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { + Self::destroy(ptr); + } + } + + /// Schedules a task for running. + /// + /// This function doesn't modify the state of the task. It only passes the task reference to + /// its schedule function. + unsafe fn schedule(ptr: *const (), info: ScheduleInfo) { + let raw = Self::from_ptr(ptr); + + // If the schedule function has captured variables, create a temporary waker that prevents + // the task from getting deallocated while the function is being invoked. + let _waker; + if mem::size_of::() > 0 { + _waker = Waker::from_raw(Self::clone_waker(ptr)); + } + + let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); + (*raw.schedule).schedule(task, info); + } + + /// Drops the future inside a task. + #[inline] + unsafe fn drop_future(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + // We need a safeguard against panics because the destructor can panic. + abort_on_panic(|| { + raw.future.drop_in_place(); + }) + } + + /// Returns a pointer to the output inside a task. + unsafe fn get_output(ptr: *const ()) -> *const () { + let raw = Self::from_ptr(ptr); + raw.output as *const () + } + + /// Cleans up task's resources and deallocates it. + /// + /// The schedule function will be dropped, and the task will then get deallocated. + /// The task must be closed before this function is called. + #[inline] + unsafe fn destroy(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + let task_layout = Self::task_layout(); + + // We need a safeguard against panics because destructors can panic. + abort_on_panic(|| { + // Drop the header along with the metadata. + (raw.header as *mut Header).drop_in_place(); + + // Drop the schedule function. + (raw.schedule as *mut S).drop_in_place(); + }); + + // Finally, deallocate the memory reserved by the task. + alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout); + } + + /// Runs a task. + /// + /// If polling its future panics, the task will be closed and the panic will be propagated into + /// the caller. + unsafe fn run(ptr: *const ()) -> bool { + let raw = Self::from_ptr(ptr); + + // Create a context from the raw task pointer and the vtable inside the its header. + let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE))); + let cx = &mut Context::from_waker(&waker); + + let mut state = (*raw.header).state.load(Ordering::Acquire); + + // Update the task's state before polling its future. + loop { + // If the task has already been closed, drop the task reference and return. + if state & CLOSED != 0 { + // Drop the future. + Self::drop_future(ptr); + + // Mark the task as unscheduled. + let state = (*raw.header).state.fetch_and(!SCHEDULED, Ordering::AcqRel); + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + Self::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + return false; + } + + // Mark the task as unscheduled and running. + match (*raw.header).state.compare_exchange_weak( + state, + (state & !SCHEDULED) | RUNNING, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Update the state because we're continuing with polling the future. + state = (state & !SCHEDULED) | RUNNING; + break; + } + Err(s) => state = s, + } + } + + // Poll the inner future, but surround it with a guard that closes the task in case polling + // panics. + // If available, we should also try to catch the panic so that it is propagated correctly. + let guard = Guard(raw); + + // Panic propagation is not available for no_std. + #[cfg(not(feature = "std"))] + let poll = ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok); + + #[cfg(feature = "std")] + let poll = { + // Check if we should propagate panics. + if (*raw.header).propagate_panic { + // Use catch_unwind to catch the panic. + match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + ::poll(Pin::new_unchecked(&mut *raw.future), cx) + })) { + Ok(Poll::Ready(v)) => Poll::Ready(Ok(v)), + Ok(Poll::Pending) => Poll::Pending, + Err(e) => Poll::Ready(Err(e)), + } + } else { + ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok) + } + }; + + mem::forget(guard); + + match poll { + Poll::Ready(out) => { + // Replace the future with its output. + Self::drop_future(ptr); + raw.output.write(out); + + // The task is now completed. + loop { + // If the `Task` is dropped, we'll need to close it and drop the output. + let new = if state & TASK == 0 { + (state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED + } else { + (state & !RUNNING & !SCHEDULED) | COMPLETED + }; + + // Mark the task as not running and completed. + match (*raw.header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the `Task` is dropped or if the task was closed while running, + // now it's time to drop the output. + if state & TASK == 0 || state & CLOSED != 0 { + // Drop the output. + abort_on_panic(|| raw.output.drop_in_place()); + } + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + Self::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + break; + } + Err(s) => state = s, + } + } + } + Poll::Pending => { + let mut future_dropped = false; + + // The task is still not completed. + loop { + // If the task was closed while running, we'll need to unschedule in case it + // was woken up and then destroy it. + let new = if state & CLOSED != 0 { + state & !RUNNING & !SCHEDULED + } else { + state & !RUNNING + }; + + if state & CLOSED != 0 && !future_dropped { + // The thread that closed the task didn't drop the future because it was + // running so now it's our responsibility to do so. + Self::drop_future(ptr); + future_dropped = true; + } + + // Mark the task as not running. + match (*raw.header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(state) => { + // If the task was closed while running, we need to notify the awaiter. + // If the task was woken up while running, we need to schedule it. + // Otherwise, we just drop the task reference. + if state & CLOSED != 0 { + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + Self::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + } else if state & SCHEDULED != 0 { + // The thread that woke the task up didn't reschedule it because + // it was running so now it's our responsibility to do so. + Self::schedule(ptr, ScheduleInfo::new(true)); + return true; + } else { + // Drop the task reference. + Self::drop_ref(ptr); + } + break; + } + Err(s) => state = s, + } + } + } + } + + return false; + + /// A guard that closes the task if polling its future panics. + struct Guard(RawTask) + where + F: Future, + S: Schedule; + + impl Drop for Guard + where + F: Future, + S: Schedule, + { + fn drop(&mut self) { + let raw = self.0; + let ptr = raw.header as *const (); + + unsafe { + let mut state = (*raw.header).state.load(Ordering::Acquire); + + loop { + // If the task was closed while running, then unschedule it, drop its + // future, and drop the task reference. + if state & CLOSED != 0 { + // The thread that closed the task didn't drop the future because it + // was running so now it's our responsibility to do so. + RawTask::::drop_future(ptr); + + // Mark the task as not running and not scheduled. + (*raw.header) + .state + .fetch_and(!RUNNING & !SCHEDULED, Ordering::AcqRel); + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + RawTask::::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + break; + } + + // Mark the task as not running, not scheduled, and closed. + match (*raw.header).state.compare_exchange_weak( + state, + (state & !RUNNING & !SCHEDULED) | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(state) => { + // Drop the future because the task is now closed. + RawTask::::drop_future(ptr); + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + RawTask::::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + break; + } + Err(s) => state = s, + } + } + } + } + } + } +} diff --git a/external/vendor/async-task/src/runnable.rs b/external/vendor/async-task/src/runnable.rs new file mode 100644 index 0000000000..25d44dced7 --- /dev/null +++ b/external/vendor/async-task/src/runnable.rs @@ -0,0 +1,945 @@ +use core::fmt; +use core::future::Future; +use core::marker::PhantomData; +use core::mem; +use core::ptr::NonNull; +use core::sync::atomic::Ordering; +use core::task::Waker; + +use alloc::boxed::Box; + +use crate::header::Header; +use crate::raw::RawTask; +use crate::state::*; +use crate::Task; + +mod sealed { + use super::*; + pub trait Sealed {} + + impl Sealed for F where F: Fn(Runnable) {} + + impl Sealed for WithInfo where F: Fn(Runnable, ScheduleInfo) {} +} + +/// A builder that creates a new task. +#[derive(Debug)] +pub struct Builder { + /// The metadata associated with the task. + pub(crate) metadata: M, + + /// Whether or not a panic that occurs in the task should be propagated. + #[cfg(feature = "std")] + pub(crate) propagate_panic: bool, +} + +impl Default for Builder { + fn default() -> Self { + Builder::new().metadata(M::default()) + } +} + +/// Extra scheduling information that can be passed to the scheduling function. +/// +/// The data source of this struct is directly from the actual implementation +/// of the crate itself, different from [`Runnable`]'s metadata, which is +/// managed by the caller. +/// +/// # Examples +/// +/// ``` +/// use async_task::{Runnable, ScheduleInfo, WithInfo}; +/// use std::sync::{Arc, Mutex}; +/// +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // If the task gets woken up while running, it will be sent into this channel. +/// let (s, r) = flume::unbounded(); +/// // Otherwise, it will be placed into this slot. +/// let lifo_slot = Arc::new(Mutex::new(None)); +/// let schedule = move |runnable: Runnable, info: ScheduleInfo| { +/// if info.woken_while_running { +/// s.send(runnable).unwrap() +/// } else { +/// let last = lifo_slot.lock().unwrap().replace(runnable); +/// if let Some(last) = last { +/// s.send(last).unwrap() +/// } +/// } +/// }; +/// +/// // Create the actual scheduler to be spawned with some future. +/// let scheduler = WithInfo(schedule); +/// // Create a task with the future and the scheduler. +/// let (runnable, task) = async_task::spawn(future, scheduler); +/// ``` +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub struct ScheduleInfo { + /// Indicates whether the task gets woken up while running. + /// + /// It is set to true usually because the task has yielded itself to the + /// scheduler. + pub woken_while_running: bool, +} + +impl ScheduleInfo { + pub(crate) fn new(woken_while_running: bool) -> Self { + ScheduleInfo { + woken_while_running, + } + } +} + +/// The trait for scheduling functions. +pub trait Schedule: sealed::Sealed { + /// The actual scheduling procedure. + fn schedule(&self, runnable: Runnable, info: ScheduleInfo); +} + +impl Schedule for F +where + F: Fn(Runnable), +{ + fn schedule(&self, runnable: Runnable, _: ScheduleInfo) { + self(runnable) + } +} + +/// Pass a scheduling function with more scheduling information - a.k.a. +/// [`ScheduleInfo`]. +/// +/// Sometimes, it's useful to pass the runnable's state directly to the +/// scheduling function, such as whether it's woken up while running. The +/// scheduler can thus use the information to determine its scheduling +/// strategy. +/// +/// The data source of [`ScheduleInfo`] is directly from the actual +/// implementation of the crate itself, different from [`Runnable`]'s metadata, +/// which is managed by the caller. +/// +/// # Examples +/// +/// ``` +/// use async_task::{ScheduleInfo, WithInfo}; +/// use std::sync::{Arc, Mutex}; +/// +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // If the task gets woken up while running, it will be sent into this channel. +/// let (s, r) = flume::unbounded(); +/// // Otherwise, it will be placed into this slot. +/// let lifo_slot = Arc::new(Mutex::new(None)); +/// let schedule = move |runnable, info: ScheduleInfo| { +/// if info.woken_while_running { +/// s.send(runnable).unwrap() +/// } else { +/// let last = lifo_slot.lock().unwrap().replace(runnable); +/// if let Some(last) = last { +/// s.send(last).unwrap() +/// } +/// } +/// }; +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = async_task::spawn(future, WithInfo(schedule)); +/// ``` +#[derive(Debug)] +pub struct WithInfo(pub F); + +impl From for WithInfo { + fn from(value: F) -> Self { + WithInfo(value) + } +} + +impl Schedule for WithInfo +where + F: Fn(Runnable, ScheduleInfo), +{ + fn schedule(&self, runnable: Runnable, info: ScheduleInfo) { + (self.0)(runnable, info) + } +} + +impl Builder<()> { + /// Creates a new task builder. + /// + /// By default, this task builder has no metadata. Use the [`metadata`] method to + /// set the metadata. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// + /// let (runnable, task) = Builder::new().spawn(|()| async {}, |_| {}); + /// ``` + pub fn new() -> Builder<()> { + Builder { + metadata: (), + #[cfg(feature = "std")] + propagate_panic: false, + } + } + + /// Adds metadata to the task. + /// + /// In certain cases, it may be useful to associate some metadata with a task. For instance, + /// you may want to associate a name with a task, or a priority for a priority queue. This + /// method allows the user to attach arbitrary metadata to a task that is available through + /// the [`Runnable`] or the [`Task`]. + /// + /// # Examples + /// + /// This example creates an executor that associates a "priority" number with each task, and + /// then runs the tasks in order of priority. + /// + /// ``` + /// use async_task::{Builder, Runnable}; + /// use once_cell::sync::Lazy; + /// use std::cmp; + /// use std::collections::BinaryHeap; + /// use std::sync::Mutex; + /// + /// # smol::future::block_on(async { + /// /// A wrapper around a `Runnable` that implements `Ord` so that it can be used in a + /// /// priority queue. + /// struct TaskWrapper(Runnable); + /// + /// impl PartialEq for TaskWrapper { + /// fn eq(&self, other: &Self) -> bool { + /// self.0.metadata() == other.0.metadata() + /// } + /// } + /// + /// impl Eq for TaskWrapper {} + /// + /// impl PartialOrd for TaskWrapper { + /// fn partial_cmp(&self, other: &Self) -> Option { + /// Some(self.cmp(other)) + /// } + /// } + /// + /// impl Ord for TaskWrapper { + /// fn cmp(&self, other: &Self) -> cmp::Ordering { + /// self.0.metadata().cmp(other.0.metadata()) + /// } + /// } + /// + /// static EXECUTOR: Lazy>> = Lazy::new(|| { + /// Mutex::new(BinaryHeap::new()) + /// }); + /// + /// let schedule = |runnable| { + /// EXECUTOR.lock().unwrap().push(TaskWrapper(runnable)); + /// }; + /// + /// // Spawn a few tasks with different priorities. + /// let spawn_task = move |priority| { + /// let (runnable, task) = Builder::new().metadata(priority).spawn( + /// move |_| async move { priority }, + /// schedule, + /// ); + /// runnable.schedule(); + /// task + /// }; + /// + /// let t1 = spawn_task(1); + /// let t2 = spawn_task(2); + /// let t3 = spawn_task(3); + /// + /// // Run the tasks in order of priority. + /// let mut metadata_seen = vec![]; + /// while let Some(TaskWrapper(runnable)) = EXECUTOR.lock().unwrap().pop() { + /// metadata_seen.push(*runnable.metadata()); + /// runnable.run(); + /// } + /// + /// assert_eq!(metadata_seen, vec![3, 2, 1]); + /// assert_eq!(t1.await, 1); + /// assert_eq!(t2.await, 2); + /// assert_eq!(t3.await, 3); + /// # }); + /// ``` + pub fn metadata(self, metadata: M) -> Builder { + Builder { + metadata, + #[cfg(feature = "std")] + propagate_panic: self.propagate_panic, + } + } +} + +impl Builder { + /// Propagates panics that occur in the task. + /// + /// When this is `true`, panics that occur in the task will be propagated to the caller of + /// the [`Task`]. When this is false, no special action is taken when a panic occurs in the + /// task, meaning that the caller of [`Runnable::run`] will observe a panic. + /// + /// This is only available when the `std` feature is enabled. By default, this is `false`. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// use futures_lite::future::poll_fn; + /// use std::future::Future; + /// use std::panic; + /// use std::pin::Pin; + /// use std::task::{Context, Poll}; + /// + /// fn did_panic(f: F) -> bool { + /// panic::catch_unwind(panic::AssertUnwindSafe(f)).is_err() + /// } + /// + /// # smol::future::block_on(async { + /// let (runnable1, mut task1) = Builder::new() + /// .propagate_panic(true) + /// .spawn(|()| async move { panic!() }, |_| {}); + /// + /// let (runnable2, mut task2) = Builder::new() + /// .propagate_panic(false) + /// .spawn(|()| async move { panic!() }, |_| {}); + /// + /// assert!(!did_panic(|| { runnable1.run(); })); + /// assert!(did_panic(|| { runnable2.run(); })); + /// + /// let waker = poll_fn(|cx| Poll::Ready(cx.waker().clone())).await; + /// let mut cx = Context::from_waker(&waker); + /// assert!(did_panic(|| { let _ = Pin::new(&mut task1).poll(&mut cx); })); + /// assert!(did_panic(|| { let _ = Pin::new(&mut task2).poll(&mut cx); })); + /// # }); + /// ``` + #[cfg(feature = "std")] + pub fn propagate_panic(self, propagate_panic: bool) -> Builder { + Builder { + metadata: self.metadata, + propagate_panic, + } + } + + /// Creates a new task. + /// + /// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its + /// output. + /// + /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] + /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run + /// again. + /// + /// When the task is woken, its [`Runnable`] is passed to the `schedule` function. + /// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it + /// should push it into a task queue so that it can be processed later. + /// + /// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider + /// using [`spawn_local()`] or [`spawn_unchecked()`] instead. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// + /// // The future inside the task. + /// let future = async { + /// println!("Hello, world!"); + /// }; + /// + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = Builder::new().spawn(|()| future, schedule); + /// ``` + pub fn spawn(self, future: F, schedule: S) -> (Runnable, Task) + where + F: FnOnce(&M) -> Fut, + Fut: Future + Send + 'static, + Fut::Output: Send + 'static, + S: Schedule + Send + Sync + 'static, + { + unsafe { self.spawn_unchecked(future, schedule) } + } + + /// Creates a new thread-local task. + /// + /// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the + /// [`Runnable`] is used or dropped on another thread, a panic will occur. + /// + /// This function is only available when the `std` feature for this crate is enabled. + /// + /// # Examples + /// + /// ``` + /// use async_task::{Builder, Runnable}; + /// use flume::{Receiver, Sender}; + /// use std::rc::Rc; + /// + /// thread_local! { + /// // A queue that holds scheduled tasks. + /// static QUEUE: (Sender, Receiver) = flume::unbounded(); + /// } + /// + /// // Make a non-Send future. + /// let msg: Rc = "Hello, world!".into(); + /// let future = async move { + /// println!("{}", msg); + /// }; + /// + /// // A function that schedules the task when it gets woken up. + /// let s = QUEUE.with(|(s, _)| s.clone()); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = Builder::new().spawn_local(move |()| future, schedule); + /// ``` + #[cfg(feature = "std")] + pub fn spawn_local( + self, + future: F, + schedule: S, + ) -> (Runnable, Task) + where + F: FnOnce(&M) -> Fut, + Fut: Future + 'static, + Fut::Output: 'static, + S: Schedule + Send + Sync + 'static, + { + use std::mem::ManuallyDrop; + use std::pin::Pin; + use std::task::{Context, Poll}; + use std::thread::{self, ThreadId}; + + #[inline] + fn thread_id() -> ThreadId { + std::thread_local! { + static ID: ThreadId = thread::current().id(); + } + ID.try_with(|id| *id) + .unwrap_or_else(|_| thread::current().id()) + } + + struct Checked { + id: ThreadId, + inner: ManuallyDrop, + } + + impl Drop for Checked { + fn drop(&mut self) { + assert!( + self.id == thread_id(), + "local task dropped by a thread that didn't spawn it" + ); + unsafe { + ManuallyDrop::drop(&mut self.inner); + } + } + } + + impl Future for Checked { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + assert!( + self.id == thread_id(), + "local task polled by a thread that didn't spawn it" + ); + unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) } + } + } + + // Wrap the future into one that checks which thread it's on. + let future = move |meta| { + let future = future(meta); + + Checked { + id: thread_id(), + inner: ManuallyDrop::new(future), + } + }; + + unsafe { self.spawn_unchecked(future, schedule) } + } + + /// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. + /// + /// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and + /// `'static` on `future` and `schedule`. + /// + /// # Safety + /// + /// - If `Fut` is not [`Send`], its [`Runnable`] must be used and dropped on the original + /// thread. + /// - If `Fut` is not `'static`, borrowed non-metadata variables must outlive its [`Runnable`]. + /// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] + /// must be used and dropped on the original thread. + /// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the + /// [`Runnable`]'s [`Waker`]. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// + /// // The future inside the task. + /// let future = async { + /// println!("Hello, world!"); + /// }; + /// + /// // If the task gets woken up, it will be sent into this channel. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = unsafe { Builder::new().spawn_unchecked(move |()| future, schedule) }; + /// ``` + pub unsafe fn spawn_unchecked<'a, F, Fut, S>( + self, + future: F, + schedule: S, + ) -> (Runnable, Task) + where + F: FnOnce(&'a M) -> Fut, + Fut: Future + 'a, + S: Schedule, + M: 'a, + { + // Allocate large futures on the heap. + let ptr = if mem::size_of::() >= 2048 { + let future = |meta| { + let future = future(meta); + Box::pin(future) + }; + + RawTask::<_, Fut::Output, S, M>::allocate(future, schedule, self) + } else { + RawTask::::allocate(future, schedule, self) + }; + + let runnable = Runnable::from_raw(ptr); + let task = Task { + ptr, + _marker: PhantomData, + }; + (runnable, task) + } +} + +/// Creates a new task. +/// +/// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its +/// output. +/// +/// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] +/// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run +/// again. +/// +/// When the task is woken, its [`Runnable`] is passed to the `schedule` function. +/// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it +/// should push it into a task queue so that it can be processed later. +/// +/// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider +/// using [`spawn_local()`] or [`spawn_unchecked()`] instead. +/// +/// # Examples +/// +/// ``` +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // A function that schedules the task when it gets woken up. +/// let (s, r) = flume::unbounded(); +/// let schedule = move |runnable| s.send(runnable).unwrap(); +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = async_task::spawn(future, schedule); +/// ``` +pub fn spawn(future: F, schedule: S) -> (Runnable, Task) +where + F: Future + Send + 'static, + F::Output: Send + 'static, + S: Schedule + Send + Sync + 'static, +{ + unsafe { spawn_unchecked(future, schedule) } +} + +/// Creates a new thread-local task. +/// +/// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the +/// [`Runnable`] is used or dropped on another thread, a panic will occur. +/// +/// This function is only available when the `std` feature for this crate is enabled. +/// +/// # Examples +/// +/// ``` +/// use async_task::Runnable; +/// use flume::{Receiver, Sender}; +/// use std::rc::Rc; +/// +/// thread_local! { +/// // A queue that holds scheduled tasks. +/// static QUEUE: (Sender, Receiver) = flume::unbounded(); +/// } +/// +/// // Make a non-Send future. +/// let msg: Rc = "Hello, world!".into(); +/// let future = async move { +/// println!("{}", msg); +/// }; +/// +/// // A function that schedules the task when it gets woken up. +/// let s = QUEUE.with(|(s, _)| s.clone()); +/// let schedule = move |runnable| s.send(runnable).unwrap(); +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = async_task::spawn_local(future, schedule); +/// ``` +#[cfg(feature = "std")] +pub fn spawn_local(future: F, schedule: S) -> (Runnable, Task) +where + F: Future + 'static, + F::Output: 'static, + S: Schedule + Send + Sync + 'static, +{ + Builder::new().spawn_local(move |()| future, schedule) +} + +/// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. +/// +/// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and +/// `'static` on `future` and `schedule`. +/// +/// # Safety +/// +/// - If `future` is not [`Send`], its [`Runnable`] must be used and dropped on the original +/// thread. +/// - If `future` is not `'static`, borrowed variables must outlive its [`Runnable`]. +/// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] +/// must be used and dropped on the original thread. +/// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the +/// [`Runnable`]'s [`Waker`]. +/// +/// # Examples +/// +/// ``` +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // If the task gets woken up, it will be sent into this channel. +/// let (s, r) = flume::unbounded(); +/// let schedule = move |runnable| s.send(runnable).unwrap(); +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = unsafe { async_task::spawn_unchecked(future, schedule) }; +/// ``` +pub unsafe fn spawn_unchecked(future: F, schedule: S) -> (Runnable, Task) +where + F: Future, + S: Schedule, +{ + Builder::new().spawn_unchecked(move |()| future, schedule) +} + +/// A handle to a runnable task. +/// +/// Every spawned task has a single [`Runnable`] handle, which only exists when the task is +/// scheduled for running. +/// +/// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] +/// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run +/// again. +/// +/// Dropping a [`Runnable`] cancels the task, which means its future won't be polled again, and +/// awaiting the [`Task`] after that will result in a panic. +/// +/// # Examples +/// +/// ``` +/// use async_task::Runnable; +/// use once_cell::sync::Lazy; +/// use std::{panic, thread}; +/// +/// // A simple executor. +/// static QUEUE: Lazy> = Lazy::new(|| { +/// let (sender, receiver) = flume::unbounded::(); +/// thread::spawn(|| { +/// for runnable in receiver { +/// let _ignore_panic = panic::catch_unwind(|| runnable.run()); +/// } +/// }); +/// sender +/// }); +/// +/// // Create a task with a simple future. +/// let schedule = |runnable| QUEUE.send(runnable).unwrap(); +/// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); +/// +/// // Schedule the task and await its output. +/// runnable.schedule(); +/// assert_eq!(smol::future::block_on(task), 3); +/// ``` +pub struct Runnable { + /// A pointer to the heap-allocated task. + pub(crate) ptr: NonNull<()>, + + /// A marker capturing generic type `M`. + pub(crate) _marker: PhantomData, +} + +unsafe impl Send for Runnable {} +unsafe impl Sync for Runnable {} + +#[cfg(feature = "std")] +impl std::panic::UnwindSafe for Runnable {} +#[cfg(feature = "std")] +impl std::panic::RefUnwindSafe for Runnable {} + +impl Runnable { + /// Get the metadata associated with this task. + /// + /// Tasks can be created with a metadata object associated with them; by default, this + /// is a `()` value. See the [`Builder::metadata()`] method for more information. + pub fn metadata(&self) -> &M { + &self.header().metadata + } + + /// Schedules the task. + /// + /// This is a convenience method that passes the [`Runnable`] to the schedule function. + /// + /// # Examples + /// + /// ``` + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with a simple future and the schedule function. + /// let (runnable, task) = async_task::spawn(async {}, schedule); + /// + /// // Schedule the task. + /// assert_eq!(r.len(), 0); + /// runnable.schedule(); + /// assert_eq!(r.len(), 1); + /// ``` + pub fn schedule(self) { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + mem::forget(self); + + unsafe { + ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); + } + } + + /// Runs the task by polling its future. + /// + /// Returns `true` if the task was woken while running, in which case the [`Runnable`] gets + /// rescheduled at the end of this method invocation. Otherwise, returns `false` and the + /// [`Runnable`] vanishes until the task is woken. + /// The return value is just a hint: `true` usually indicates that the task has yielded, i.e. + /// it woke itself and then gave the control back to the executor. + /// + /// If the [`Task`] handle was dropped or if [`cancel()`][`Task::cancel()`] was called, then + /// this method simply destroys the task. + /// + /// If the polled future panics, this method propagates the panic, and awaiting the [`Task`] + /// after that will also result in a panic. + /// + /// # Examples + /// + /// ``` + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with a simple future and the schedule function. + /// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); + /// + /// // Run the task and check its output. + /// runnable.run(); + /// assert_eq!(smol::future::block_on(task), 3); + /// ``` + pub fn run(self) -> bool { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + mem::forget(self); + + unsafe { ((*header).vtable.run)(ptr) } + } + + /// Returns a waker associated with this task. + /// + /// # Examples + /// + /// ``` + /// use smol::future; + /// + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with a simple future and the schedule function. + /// let (runnable, task) = async_task::spawn(future::pending::<()>(), schedule); + /// + /// // Take a waker and run the task. + /// let waker = runnable.waker(); + /// runnable.run(); + /// + /// // Reschedule the task by waking it. + /// assert_eq!(r.len(), 0); + /// waker.wake(); + /// assert_eq!(r.len(), 1); + /// ``` + pub fn waker(&self) -> Waker { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let raw_waker = ((*header).vtable.clone_waker)(ptr); + Waker::from_raw(raw_waker) + } + } + + fn header(&self) -> &Header { + unsafe { &*(self.ptr.as_ptr() as *const Header) } + } + + /// Converts this task into a raw pointer. + /// + /// To avoid a memory leak the pointer must be converted back to a Runnable using [`Runnable::from_raw`][from_raw]. + /// + /// `into_raw` does not change the state of the [`Task`], but there is no guarantee that it will be in the same state after calling [`Runnable::from_raw`][from_raw], + /// as the corresponding [`Task`] might have been dropped or cancelled. + /// + /// # Examples + /// + /// ```rust + /// use async_task::{Runnable, spawn}; + + /// let (runnable, task) = spawn(async {}, |_| {}); + /// let runnable_pointer = runnable.into_raw(); + /// + /// unsafe { + /// // Convert back to an `Runnable` to prevent leak. + /// let runnable = Runnable::<()>::from_raw(runnable_pointer); + /// runnable.run(); + /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. + /// } + /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! + /// ``` + /// [from_raw]: #method.from_raw + pub fn into_raw(self) -> NonNull<()> { + let ptr = self.ptr; + mem::forget(self); + ptr + } + + /// Converts a raw pointer into a Runnable. + /// + /// # Safety + /// + /// This method should only be used with raw pointers returned from [`Runnable::into_raw`][into_raw]. + /// It is not safe to use the provided pointer once it is passed to `from_raw`. + /// Crucially, it is unsafe to call `from_raw` multiple times with the same pointer - even if the resulting [`Runnable`] is not used - + /// as internally `async-task` uses reference counting. + /// + /// It is however safe to call [`Runnable::into_raw`][into_raw] on a [`Runnable`] created with `from_raw` or + /// after the [`Task`] associated with a given Runnable has been dropped or cancelled. + /// + /// The state of the [`Runnable`] created with `from_raw` is not specified. + /// + /// # Examples + /// + /// ```rust + /// use async_task::{Runnable, spawn}; + + /// let (runnable, task) = spawn(async {}, |_| {}); + /// let runnable_pointer = runnable.into_raw(); + /// + /// drop(task); + /// unsafe { + /// // Convert back to an `Runnable` to prevent leak. + /// let runnable = Runnable::<()>::from_raw(runnable_pointer); + /// let did_poll = runnable.run(); + /// assert!(!did_poll); + /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. + /// } + /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! + /// ``` + + /// [into_raw]: #method.into_raw + pub unsafe fn from_raw(ptr: NonNull<()>) -> Self { + Self { + ptr, + _marker: Default::default(), + } + } +} + +impl Drop for Runnable { + fn drop(&mut self) { + let ptr = self.ptr.as_ptr(); + let header = self.header(); + + unsafe { + let mut state = header.state.load(Ordering::Acquire); + + loop { + // If the task has been completed or closed, it can't be canceled. + if state & (COMPLETED | CLOSED) != 0 { + break; + } + + // Mark the task as closed. + match header.state.compare_exchange_weak( + state, + state | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => break, + Err(s) => state = s, + } + } + + // Drop the future. + (header.vtable.drop_future)(ptr); + + // Mark the task as unscheduled. + let state = header.state.fetch_and(!SCHEDULED, Ordering::AcqRel); + + // Notify the awaiter that the future has been dropped. + if state & AWAITER != 0 { + (*header).notify(None); + } + + // Drop the task reference. + (header.vtable.drop_ref)(ptr); + } + } +} + +impl fmt::Debug for Runnable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + f.debug_struct("Runnable") + .field("header", unsafe { &(*header) }) + .finish() + } +} diff --git a/external/vendor/async-task/src/state.rs b/external/vendor/async-task/src/state.rs new file mode 100644 index 0000000000..2fc6cf3711 --- /dev/null +++ b/external/vendor/async-task/src/state.rs @@ -0,0 +1,69 @@ +/// Set if the task is scheduled for running. +/// +/// A task is considered to be scheduled whenever its `Runnable` exists. +/// +/// This flag can't be set when the task is completed. However, it can be set while the task is +/// running, in which case it will be rescheduled as soon as polling finishes. +pub(crate) const SCHEDULED: usize = 1 << 0; + +/// Set if the task is running. +/// +/// A task is in running state while its future is being polled. +/// +/// This flag can't be set when the task is completed. However, it can be in scheduled state while +/// it is running, in which case it will be rescheduled as soon as polling finishes. +pub(crate) const RUNNING: usize = 1 << 1; + +/// Set if the task has been completed. +/// +/// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored +/// inside the task until it becomes closed. In fact, `Task` picks up the output by marking +/// the task as closed. +/// +/// This flag can't be set when the task is scheduled or running. +pub(crate) const COMPLETED: usize = 1 << 2; + +/// Set if the task is closed. +/// +/// If a task is closed, that means it's either canceled or its output has been consumed by the +/// `Task`. A task becomes closed in the following cases: +/// +/// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. +/// 2. Its output gets awaited by the `Task`. +/// 3. It panics while polling the future. +/// 4. It is completed and the `Task` gets dropped. +pub(crate) const CLOSED: usize = 1 << 3; + +/// Set if the `Task` still exists. +/// +/// The `Task` is a special case in that it is only tracked by this flag, while all other +/// task references (`Runnable` and `Waker`s) are tracked by the reference count. +pub(crate) const TASK: usize = 1 << 4; + +/// Set if the `Task` is awaiting the output. +/// +/// This flag is set while there is a registered awaiter of type `Waker` inside the task. When the +/// task gets closed or completed, we need to wake the awaiter. This flag can be used as a fast +/// check that tells us if we need to wake anyone. +pub(crate) const AWAITER: usize = 1 << 5; + +/// Set if an awaiter is being registered. +/// +/// This flag is set when `Task` is polled and we are registering a new awaiter. +pub(crate) const REGISTERING: usize = 1 << 6; + +/// Set if the awaiter is being notified. +/// +/// This flag is set when notifying the awaiter. If an awaiter is concurrently registered and +/// notified, whichever side came first will take over the reposibility of resolving the race. +pub(crate) const NOTIFYING: usize = 1 << 7; + +/// A single reference. +/// +/// The lower bits in the state contain various flags representing the task state, while the upper +/// bits contain the reference count. The value of `REFERENCE` represents a single reference in the +/// total reference count. +/// +/// Note that the reference counter only tracks the `Runnable` and `Waker`s. The `Task` is +/// tracked separately by the `TASK` flag. +pub(crate) const REFERENCE: usize = 1 << 8; diff --git a/external/vendor/async-task/src/task.rs b/external/vendor/async-task/src/task.rs new file mode 100644 index 0000000000..da45cd8d06 --- /dev/null +++ b/external/vendor/async-task/src/task.rs @@ -0,0 +1,565 @@ +use core::fmt; +use core::future::Future; +use core::marker::PhantomData; +use core::mem; +use core::pin::Pin; +use core::ptr::NonNull; +use core::sync::atomic::Ordering; +use core::task::{Context, Poll}; + +use crate::header::Header; +use crate::raw::Panic; +use crate::runnable::ScheduleInfo; +use crate::state::*; + +/// A spawned task. +/// +/// A [`Task`] can be awaited to retrieve the output of its future. +/// +/// Dropping a [`Task`] cancels it, which means its future won't be polled again. To drop the +/// [`Task`] handle without canceling it, use [`detach()`][`Task::detach()`] instead. To cancel a +/// task gracefully and wait until it is fully destroyed, use the [`cancel()`][Task::cancel()] +/// method. +/// +/// Note that canceling a task actually wakes it and reschedules one last time. Then, the executor +/// can destroy the task by simply dropping its [`Runnable`][`super::Runnable`] or by invoking +/// [`run()`][`super::Runnable::run()`]. +/// +/// # Examples +/// +/// ``` +/// use smol::{future, Executor}; +/// use std::thread; +/// +/// let ex = Executor::new(); +/// +/// // Spawn a future onto the executor. +/// let task = ex.spawn(async { +/// println!("Hello from a task!"); +/// 1 + 2 +/// }); +/// +/// // Run an executor thread. +/// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); +/// +/// // Wait for the task's output. +/// assert_eq!(future::block_on(task), 3); +/// ``` +#[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] +pub struct Task { + /// A raw task pointer. + pub(crate) ptr: NonNull<()>, + + /// A marker capturing generic types `T` and `M`. + pub(crate) _marker: PhantomData<(T, M)>, +} + +unsafe impl Send for Task {} +unsafe impl Sync for Task {} + +impl Unpin for Task {} + +#[cfg(feature = "std")] +impl std::panic::UnwindSafe for Task {} +#[cfg(feature = "std")] +impl std::panic::RefUnwindSafe for Task {} + +impl Task { + /// Detaches the task to let it keep running in the background. + /// + /// # Examples + /// + /// ``` + /// use smol::{Executor, Timer}; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// ex.spawn(async { + /// loop { + /// println!("I'm a daemon task looping forever."); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }) + /// .detach(); + /// ``` + pub fn detach(self) { + let mut this = self; + let _out = this.set_detached(); + mem::forget(this); + } + + /// Cancels the task and waits for it to stop running. + /// + /// Returns the task's output if it was completed just before it got canceled, or [`None`] if + /// it didn't complete. + /// + /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of + /// canceling because it also waits for the task to stop running. + /// + /// # Examples + /// + /// ``` + /// # if cfg!(miri) { return; } // Miri does not support epoll + /// use smol::{future, Executor, Timer}; + /// use std::thread; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// let task = ex.spawn(async { + /// loop { + /// println!("Even though I'm in an infinite loop, you can still cancel me!"); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }); + /// + /// // Run an executor thread. + /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); + /// + /// future::block_on(async { + /// Timer::after(Duration::from_secs(3)).await; + /// task.cancel().await; + /// }); + /// ``` + pub async fn cancel(self) -> Option { + let mut this = self; + this.set_canceled(); + this.fallible().await + } + + /// Converts this task into a [`FallibleTask`]. + /// + /// Like [`Task`], a fallible task will poll the task's output until it is + /// completed or cancelled due to its [`Runnable`][`super::Runnable`] being + /// dropped without being run. Resolves to the task's output when completed, + /// or [`None`] if it didn't complete. + /// + /// # Examples + /// + /// ``` + /// use smol::{future, Executor}; + /// use std::thread; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a future onto the executor. + /// let task = ex.spawn(async { + /// println!("Hello from a task!"); + /// 1 + 2 + /// }) + /// .fallible(); + /// + /// // Run an executor thread. + /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); + /// + /// // Wait for the task's output. + /// assert_eq!(future::block_on(task), Some(3)); + /// ``` + /// + /// ``` + /// use smol::future; + /// + /// // Schedule function which drops the runnable without running it. + /// let schedule = move |runnable| drop(runnable); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = async_task::spawn(async { + /// println!("Hello from a task!"); + /// 1 + 2 + /// }, schedule); + /// runnable.schedule(); + /// + /// // Wait for the task's output. + /// assert_eq!(future::block_on(task.fallible()), None); + /// ``` + pub fn fallible(self) -> FallibleTask { + FallibleTask { task: self } + } + + /// Puts the task in canceled state. + fn set_canceled(&mut self) { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let mut state = (*header).state.load(Ordering::Acquire); + + loop { + // If the task has been completed or closed, it can't be canceled. + if state & (COMPLETED | CLOSED) != 0 { + break; + } + + // If the task is not scheduled nor running, we'll need to schedule it. + let new = if state & (SCHEDULED | RUNNING) == 0 { + (state | SCHEDULED | CLOSED) + REFERENCE + } else { + state | CLOSED + }; + + // Mark the task as closed. + match (*header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the task is not scheduled nor running, schedule it one more time so + // that its future gets dropped by the executor. + if state & (SCHEDULED | RUNNING) == 0 { + ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); + } + + // Notify the awaiter that the task has been closed. + if state & AWAITER != 0 { + (*header).notify(None); + } + + break; + } + Err(s) => state = s, + } + } + } + } + + /// Puts the task in detached state. + fn set_detached(&mut self) -> Option> { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + // A place where the output will be stored in case it needs to be dropped. + let mut output = None; + + // Optimistically assume the `Task` is being detached just after creating the task. + // This is a common case so if the `Task` is datached, the overhead of it is only one + // compare-exchange operation. + if let Err(mut state) = (*header).state.compare_exchange_weak( + SCHEDULED | TASK | REFERENCE, + SCHEDULED | REFERENCE, + Ordering::AcqRel, + Ordering::Acquire, + ) { + loop { + // If the task has been completed but not yet closed, that means its output + // must be dropped. + if state & COMPLETED != 0 && state & CLOSED == 0 { + // Mark the task as closed in order to grab its output. + match (*header).state.compare_exchange_weak( + state, + state | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Read the output. + output = Some( + (((*header).vtable.get_output)(ptr) as *mut Result) + .read(), + ); + + // Update the state variable because we're continuing the loop. + state |= CLOSED; + } + Err(s) => state = s, + } + } else { + // If this is the last reference to the task and it's not closed, then + // close it and schedule one more time so that its future gets dropped by + // the executor. + let new = if state & (!(REFERENCE - 1) | CLOSED) == 0 { + SCHEDULED | CLOSED | REFERENCE + } else { + state & !TASK + }; + + // Unset the `TASK` flag. + match (*header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If this is the last reference to the task, we need to either + // schedule dropping its future or destroy it. + if state & !(REFERENCE - 1) == 0 { + if state & CLOSED == 0 { + ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); + } else { + ((*header).vtable.destroy)(ptr); + } + } + + break; + } + Err(s) => state = s, + } + } + } + } + + output + } + } + + /// Polls the task to retrieve its output. + /// + /// Returns `Some` if the task has completed or `None` if it was closed. + /// + /// A task becomes closed in the following cases: + /// + /// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. + /// 2. Its output gets awaited by the `Task`. + /// 3. It panics while polling the future. + /// 4. It is completed and the `Task` gets dropped. + fn poll_task(&mut self, cx: &mut Context<'_>) -> Poll> { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let mut state = (*header).state.load(Ordering::Acquire); + + loop { + // If the task has been closed, notify the awaiter and return `None`. + if state & CLOSED != 0 { + // If the task is scheduled or running, we need to wait until its future is + // dropped. + if state & (SCHEDULED | RUNNING) != 0 { + // Replace the waker with one associated with the current task. + (*header).register(cx.waker()); + + // Reload the state after registering. It is possible changes occurred just + // before registration so we need to check for that. + state = (*header).state.load(Ordering::Acquire); + + // If the task is still scheduled or running, we need to wait because its + // future is not dropped yet. + if state & (SCHEDULED | RUNNING) != 0 { + return Poll::Pending; + } + } + + // Even though the awaiter is most likely the current task, it could also be + // another task. + (*header).notify(Some(cx.waker())); + return Poll::Ready(None); + } + + // If the task is not completed, register the current task. + if state & COMPLETED == 0 { + // Replace the waker with one associated with the current task. + (*header).register(cx.waker()); + + // Reload the state after registering. It is possible that the task became + // completed or closed just before registration so we need to check for that. + state = (*header).state.load(Ordering::Acquire); + + // If the task has been closed, restart. + if state & CLOSED != 0 { + continue; + } + + // If the task is still not completed, we're blocked on it. + if state & COMPLETED == 0 { + return Poll::Pending; + } + } + + // Since the task is now completed, mark it as closed in order to grab its output. + match (*header).state.compare_exchange( + state, + state | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Notify the awaiter. Even though the awaiter is most likely the current + // task, it could also be another task. + if state & AWAITER != 0 { + (*header).notify(Some(cx.waker())); + } + + // Take the output from the task. + let output = ((*header).vtable.get_output)(ptr) as *mut Result; + let output = output.read(); + + // Propagate the panic if the task panicked. + let output = match output { + Ok(output) => output, + Err(panic) => { + #[cfg(feature = "std")] + std::panic::resume_unwind(panic); + + #[cfg(not(feature = "std"))] + match panic {} + } + }; + + return Poll::Ready(Some(output)); + } + Err(s) => state = s, + } + } + } + } + + fn header(&self) -> &Header { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + unsafe { &*header } + } + + /// Returns `true` if the current task is finished. + /// + /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. + pub fn is_finished(&self) -> bool { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let state = (*header).state.load(Ordering::Acquire); + state & (CLOSED | COMPLETED) != 0 + } + } + + /// Get the metadata associated with this task. + /// + /// Tasks can be created with a metadata object associated with them; by default, this + /// is a `()` value. See the [`Builder::metadata()`] method for more information. + pub fn metadata(&self) -> &M { + &self.header().metadata + } +} + +impl Drop for Task { + fn drop(&mut self) { + self.set_canceled(); + self.set_detached(); + } +} + +impl Future for Task { + type Output = T; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.poll_task(cx) { + Poll::Ready(t) => Poll::Ready(t.expect("Task polled after completion")), + Poll::Pending => Poll::Pending, + } + } +} + +impl fmt::Debug for Task { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Task") + .field("header", self.header()) + .finish() + } +} + +/// A spawned task with a fallible response. +/// +/// This type behaves like [`Task`], however it produces an `Option` when +/// polled and will return `None` if the executor dropped its +/// [`Runnable`][`super::Runnable`] without being run. +/// +/// This can be useful to avoid the panic produced when polling the `Task` +/// future if the executor dropped its `Runnable`. +#[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] +pub struct FallibleTask { + task: Task, +} + +impl FallibleTask { + /// Detaches the task to let it keep running in the background. + /// + /// # Examples + /// + /// ``` + /// use smol::{Executor, Timer}; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// ex.spawn(async { + /// loop { + /// println!("I'm a daemon task looping forever."); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }) + /// .fallible() + /// .detach(); + /// ``` + pub fn detach(self) { + self.task.detach() + } + + /// Cancels the task and waits for it to stop running. + /// + /// Returns the task's output if it was completed just before it got canceled, or [`None`] if + /// it didn't complete. + /// + /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of + /// canceling because it also waits for the task to stop running. + /// + /// # Examples + /// + /// ``` + /// # if cfg!(miri) { return; } // Miri does not support epoll + /// use smol::{future, Executor, Timer}; + /// use std::thread; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// let task = ex.spawn(async { + /// loop { + /// println!("Even though I'm in an infinite loop, you can still cancel me!"); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }) + /// .fallible(); + /// + /// // Run an executor thread. + /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); + /// + /// future::block_on(async { + /// Timer::after(Duration::from_secs(3)).await; + /// task.cancel().await; + /// }); + /// ``` + pub async fn cancel(self) -> Option { + self.task.cancel().await + } + + /// Returns `true` if the current task is finished. + /// + /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. + pub fn is_finished(&self) -> bool { + self.task.is_finished() + } +} + +impl Future for FallibleTask { + type Output = Option; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.task.poll_task(cx) + } +} + +impl fmt::Debug for FallibleTask { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FallibleTask") + .field("header", self.task.header()) + .finish() + } +} diff --git a/external/vendor/async-task/src/utils.rs b/external/vendor/async-task/src/utils.rs new file mode 100644 index 0000000000..5c2170c89e --- /dev/null +++ b/external/vendor/async-task/src/utils.rs @@ -0,0 +1,127 @@ +use core::alloc::Layout as StdLayout; +use core::mem; + +/// Aborts the process. +/// +/// To abort, this function simply panics while panicking. +pub(crate) fn abort() -> ! { + struct Panic; + + impl Drop for Panic { + fn drop(&mut self) { + panic!("aborting the process"); + } + } + + let _panic = Panic; + panic!("aborting the process"); +} + +/// Calls a function and aborts if it panics. +/// +/// This is useful in unsafe code where we can't recover from panics. +#[inline] +pub(crate) fn abort_on_panic(f: impl FnOnce() -> T) -> T { + struct Bomb; + + impl Drop for Bomb { + fn drop(&mut self) { + abort(); + } + } + + let bomb = Bomb; + let t = f(); + mem::forget(bomb); + t +} + +/// A version of `alloc::alloc::Layout` that can be used in the const +/// position. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Layout { + size: usize, + align: usize, +} + +impl Layout { + /// Creates a new `Layout` with the given size and alignment. + #[inline] + pub(crate) const fn from_size_align(size: usize, align: usize) -> Self { + Self { size, align } + } + + /// Creates a new `Layout` for the given sized type. + #[inline] + pub(crate) const fn new() -> Self { + Self::from_size_align(mem::size_of::(), mem::align_of::()) + } + + /// Convert this into the standard library's layout type. + /// + /// # Safety + /// + /// - `align` must be non-zero and a power of two. + /// - When rounded up to the nearest multiple of `align`, the size + /// must not overflow. + #[inline] + pub(crate) const unsafe fn into_std(self) -> StdLayout { + StdLayout::from_size_align_unchecked(self.size, self.align) + } + + /// Get the alignment of this layout. + #[inline] + pub(crate) const fn align(&self) -> usize { + self.align + } + + /// Get the size of this layout. + #[inline] + pub(crate) const fn size(&self) -> usize { + self.size + } + + /// Returns the layout for `a` followed by `b` and the offset of `b`. + /// + /// This function was adapted from the `Layout::extend()`: + /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.extend + #[inline] + pub(crate) const fn extend(self, other: Layout) -> Option<(Layout, usize)> { + let new_align = max(self.align(), other.align()); + let pad = self.padding_needed_for(other.align()); + + let offset = leap!(self.size().checked_add(pad)); + let new_size = leap!(offset.checked_add(other.size())); + + // return None if any of the following are true: + // - align is 0 (implied false by is_power_of_two()) + // - align is not a power of 2 + // - size rounded up to align overflows + if !new_align.is_power_of_two() || new_size > isize::MAX as usize - (new_align - 1) { + return None; + } + + let layout = Layout::from_size_align(new_size, new_align); + Some((layout, offset)) + } + + /// Returns the padding after `layout` that aligns the following address to `align`. + /// + /// This function was adapted from the `Layout::padding_needed_for()`: + /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.padding_needed_for + #[inline] + pub(crate) const fn padding_needed_for(self, align: usize) -> usize { + let len = self.size(); + let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); + len_rounded_up.wrapping_sub(len) + } +} + +#[inline] +pub(crate) const fn max(left: usize, right: usize) -> usize { + if left > right { + left + } else { + right + } +} diff --git a/external/vendor/async-task/tests/basic.rs b/external/vendor/async-task/tests/basic.rs new file mode 100644 index 0000000000..727a05ee1f --- /dev/null +++ b/external/vendor/async-task/tests/basic.rs @@ -0,0 +1,325 @@ +use std::future::Future; +use std::pin::Pin; +use std::ptr::NonNull; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use async_task::Runnable; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP)` +// +// The future `f` always returns `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Box; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + Poll::Ready(Box::new(0)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |_runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn try_await(f: impl Future) -> Option { + future::block_on(future::poll_once(f)) +} + +#[test] +fn drop_and_detach() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(runnable); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn detach_and_drop() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(runnable); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn detach_and_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn run_and_detach() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn cancel_and_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn run_and_cancel() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn cancel_join() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + assert!(try_await(&mut task).is_none()); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(try_await(&mut task).is_some()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn schedule() { + let (s, r) = flume::unbounded(); + let schedule = move |runnable| s.send(runnable).unwrap(); + let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); + + assert!(r.is_empty()); + runnable.schedule(); + + let runnable = r.recv().unwrap(); + assert!(r.is_empty()); + runnable.schedule(); + + let runnable = r.recv().unwrap(); + assert!(r.is_empty()); + runnable.schedule(); + + r.recv().unwrap(); +} + +#[test] +fn schedule_counter() { + static COUNT: AtomicUsize = AtomicUsize::new(0); + + let (s, r) = flume::unbounded(); + let schedule = move |runnable: Runnable| { + COUNT.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); + runnable.schedule(); + + r.recv().unwrap().schedule(); + r.recv().unwrap().schedule(); + assert_eq!(COUNT.load(Ordering::SeqCst), 3); + r.recv().unwrap(); +} + +#[test] +fn drop_inside_schedule() { + struct DropGuard(AtomicUsize); + impl Drop for DropGuard { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let guard = DropGuard(AtomicUsize::new(0)); + + let (runnable, _) = async_task::spawn(async {}, move |runnable| { + assert_eq!(guard.0.load(Ordering::SeqCst), 0); + drop(runnable); + assert_eq!(guard.0.load(Ordering::SeqCst), 0); + }); + runnable.schedule(); +} + +#[test] +fn waker() { + let (s, r) = flume::unbounded(); + let schedule = move |runnable| s.send(runnable).unwrap(); + let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); + + assert!(r.is_empty()); + let waker = runnable.waker(); + runnable.run(); + waker.wake_by_ref(); + + let runnable = r.recv().unwrap(); + runnable.run(); + waker.wake(); + r.recv().unwrap(); +} + +#[test] +fn raw() { + // Dispatch schedules a function for execution at a later point. For tests, we execute it straight away. + fn dispatch(trampoline: extern "C" fn(NonNull<()>), context: NonNull<()>) { + trampoline(context) + } + extern "C" fn trampoline(runnable: NonNull<()>) { + let task = unsafe { Runnable::<()>::from_raw(runnable) }; + task.run(); + } + + let task_got_executed = Arc::new(AtomicBool::new(false)); + let (runnable, _handle) = async_task::spawn( + { + let task_got_executed = task_got_executed.clone(); + async move { task_got_executed.store(true, Ordering::SeqCst) } + }, + |runnable: Runnable<()>| dispatch(trampoline, runnable.into_raw()), + ); + runnable.schedule(); + + assert!(task_got_executed.load(Ordering::SeqCst)); +} diff --git a/external/vendor/async-task/tests/cancel.rs b/external/vendor/async-task/tests/cancel.rs new file mode 100644 index 0000000000..033336762c --- /dev/null +++ b/external/vendor/async-task/tests/cancel.rs @@ -0,0 +1,183 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP_F, DROP_T)` +// +// The future `f` outputs `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP_F` is incremented. +// When the output gets dropped, `DROP_T` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop_f: AtomicUsize = AtomicUsize::new(0); + static $drop_t: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Out; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + Poll::Ready(Out(Box::new(0), true)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop_f.fetch_add(1, Ordering::SeqCst); + } + } + + #[derive(Default)] + struct Out(#[allow(dead_code)] Box, bool); + + impl Drop for Out { + fn drop(&mut self) { + if self.1 { + $drop_t.fetch_add(1, Ordering::SeqCst); + } + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |runnable: Runnable| { + let _ = &guard; + runnable.schedule(); + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn run_and_cancel() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(future::block_on(task.cancel()).is_some()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn cancel_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + runnable.run(); + + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + assert!(future::block_on(task.cancel()).is_none()); + + thread::sleep(ms(200)); + + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn cancel_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + + thread::sleep(ms(200)); + + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + assert!(future::block_on(task.cancel()).is_none()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} diff --git a/external/vendor/async-task/tests/join.rs b/external/vendor/async-task/tests/join.rs new file mode 100644 index 0000000000..089b5c10e5 --- /dev/null +++ b/external/vendor/async-task/tests/join.rs @@ -0,0 +1,386 @@ +use std::cell::Cell; +use std::future::Future; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP_F, DROP_T)` +// +// The future `f` outputs `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP_F` is incremented. +// When the output gets dropped, `DROP_T` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop_f: AtomicUsize = AtomicUsize::new(0); + static $drop_t: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Out; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + Poll::Ready(Out(Box::new(0), true)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop_f.fetch_add(1, Ordering::SeqCst); + } + } + + #[derive(Default)] + struct Out(#[allow(dead_code)] Box, bool); + + impl Drop for Out { + fn drop(&mut self) { + if self.1 { + $drop_t.fetch_add(1, Ordering::SeqCst); + } + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |runnable: Runnable| { + let _ = &guard; + runnable.schedule(); + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn drop_and_join() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + drop(runnable); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); +} + +#[test] +fn run_and_join() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_ok()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); +} + +#[test] +fn detach_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + task.detach(); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); +} + +#[test] +fn join_twice() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + future::block_on(&mut task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + assert!(catch_unwind(AssertUnwindSafe(|| future::block_on(&mut task))).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + task.detach(); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn join_and_cancel() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + drop(runnable); + + thread::sleep(ms(400)); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + + thread::sleep(ms(200)); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn join_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(400)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + future::block_on(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_and_run_and_join() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(400)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + future::block_on(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_and_cancel_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + }) + .run(); +} + +#[test] +fn try_join_and_run_and_cancel() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + }) + .add(|| { + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(400)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn await_output() { + struct Fut(Cell>); + + impl Fut { + fn new(t: T) -> Fut { + Fut(Cell::new(Some(t))) + } + } + + impl Future for Fut { + type Output = T; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + Poll::Ready(self.0.take().unwrap()) + } + } + + for i in 0..10 { + let (runnable, task) = async_task::spawn(Fut::new(i), drop); + runnable.run(); + assert_eq!(future::block_on(task), i); + } + + for i in 0..10 { + let (runnable, task) = async_task::spawn(Fut::new(vec![7; i]), drop); + runnable.run(); + assert_eq!(future::block_on(task), vec![7; i]); + } + + let (runnable, task) = async_task::spawn(Fut::new("foo".to_string()), drop); + runnable.run(); + assert_eq!(future::block_on(task), "foo"); +} diff --git a/external/vendor/async-task/tests/metadata.rs b/external/vendor/async-task/tests/metadata.rs new file mode 100644 index 0000000000..d3d8d53483 --- /dev/null +++ b/external/vendor/async-task/tests/metadata.rs @@ -0,0 +1,58 @@ +use async_task::{Builder, Runnable}; +use flume::unbounded; +use smol::future; + +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[test] +fn metadata_use_case() { + // Each future has a counter that is incremented every time it is scheduled. + let (sender, receiver) = unbounded::>(); + let schedule = move |runnable: Runnable| { + runnable.metadata().fetch_add(1, Ordering::SeqCst); + sender.send(runnable).ok(); + }; + + async fn my_future(counter: &AtomicUsize) { + loop { + // Loop until we've been scheduled five times. + let count = counter.load(Ordering::SeqCst); + if count < 5 { + // Make sure that we are immediately scheduled again. + future::yield_now().await; + continue; + } + + // We've been scheduled five times, so we're done. + break; + } + } + + let make_task = || { + // SAFETY: We are spawning a non-'static future, so we need to use the unsafe API. + // The borrowed variables, in this case the metadata, are guaranteed to outlive the runnable. + let (runnable, task) = unsafe { + Builder::new() + .metadata(AtomicUsize::new(0)) + .spawn_unchecked(my_future, schedule.clone()) + }; + + runnable.schedule(); + task + }; + + // Make tasks. + let t1 = make_task(); + let t2 = make_task(); + + // Run the tasks. + while let Ok(runnable) = receiver.try_recv() { + runnable.run(); + } + + // Unwrap the tasks. + smol::future::block_on(async move { + t1.await; + t2.await; + }); +} diff --git a/external/vendor/async-task/tests/panic.rs b/external/vendor/async-task/tests/panic.rs new file mode 100644 index 0000000000..726e385d46 --- /dev/null +++ b/external/vendor/async-task/tests/panic.rs @@ -0,0 +1,234 @@ +use std::future::Future; +use std::panic::catch_unwind; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP)` +// +// The future `f` sleeps for 200 ms and then panics. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = (); + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + panic!() + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |_runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn cancel_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + }) + .run(); +} + +#[test] +fn run_and_join() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn try_join_and_run_and_join() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + future::block_on(future::or(&mut task, future::ready(()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn join_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + future::block_on(future::or(&mut task, future::ready(()))); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + drop(task); + }) + .run(); +} + +#[test] +fn detach_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + }) + .run(); +} diff --git a/external/vendor/async-task/tests/ready.rs b/external/vendor/async-task/tests/ready.rs new file mode 100644 index 0000000000..aefb36e8f8 --- /dev/null +++ b/external/vendor/async-task/tests/ready.rs @@ -0,0 +1,225 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP_F, DROP_T)` +// +// The future `f` sleeps for 200 ms and outputs `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP_F` is incremented. +// When the output gets dropped, `DROP_T` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop_f: AtomicUsize = AtomicUsize::new(0); + static $drop_t: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Out; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + Poll::Ready(Out(Box::new(0), true)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop_f.fetch_add(1, Ordering::SeqCst); + } + } + + #[derive(Default)] + struct Out(#[allow(dead_code)] Box, bool); + + impl Drop for Out { + fn drop(&mut self) { + if self.1 { + $drop_t.fetch_add(1, Ordering::SeqCst); + } + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |_runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn cancel_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn join_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + future::block_on(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + drop(task); + }) + .run(); +} + +#[test] +fn detach_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + }) + .run(); +} diff --git a/external/vendor/async-task/tests/waker_panic.rs b/external/vendor/async-task/tests/waker_panic.rs new file mode 100644 index 0000000000..5b54f9dbe9 --- /dev/null +++ b/external/vendor/async-task/tests/waker_panic.rs @@ -0,0 +1,330 @@ +use std::cell::Cell; +use std::future::Future; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use atomic_waker::AtomicWaker; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, get_waker, POLL, DROP)` +// +// The future `f` always sleeps for 200 ms, and panics the second time it is polled. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Every time the future is run, it stores the waker into a global variable. +// This waker can be extracted using the `get_waker()` function. +macro_rules! future { + ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + static WAKER: AtomicWaker = AtomicWaker::new(); + + let ($name, $get_waker) = { + struct Fut(Cell, #[allow(dead_code)] Box); + + impl Future for Fut { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + WAKER.register(cx.waker()); + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + + if self.0.get() { + panic!() + } else { + self.0.set(true); + Poll::Pending + } + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, chan, SCHED, DROP)` +// +// The schedule function `s` pushes the task into `chan`. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Receiver `chan` extracts the task when it is scheduled. +macro_rules! schedule { + ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let ($name, $chan) = { + let (s, r) = flume::unbounded(); + + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + let sched = move |runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + + (sched, r) + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +fn try_await(f: impl Future) -> Option { + future::block_on(future::poll_once(f)) +} + +#[test] +fn wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake(); + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn wake_and_cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[flaky_test::flaky_test] +fn cancel_and_wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + POLL.store(0, Ordering::SeqCst); + DROP_F.store(0, Ordering::SeqCst); + SCHEDULE.store(0, Ordering::SeqCst); + DROP_S.store(0, Ordering::SeqCst); + + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn panic_and_poll() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + get_waker().wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + let mut task = task; + assert!(try_await(&mut task).is_none()); + + let runnable = chan.recv().unwrap(); + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(AssertUnwindSafe(|| try_await(&mut task))).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(get_waker()); + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} diff --git a/external/vendor/async-task/tests/waker_pending.rs b/external/vendor/async-task/tests/waker_pending.rs new file mode 100644 index 0000000000..ccd540b4ae --- /dev/null +++ b/external/vendor/async-task/tests/waker_pending.rs @@ -0,0 +1,365 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use atomic_waker::AtomicWaker; +use easy_parallel::Parallel; + +// Creates a future with event counters. +// +// Usage: `future!(f, get_waker, POLL, DROP)` +// +// The future `f` always sleeps for 200 ms and returns `Poll::Pending`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Every time the future is run, it stores the waker into a global variable. +// This waker can be extracted using the `get_waker()` function. +macro_rules! future { + ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + static WAKER: AtomicWaker = AtomicWaker::new(); + + let ($name, $get_waker) = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + WAKER.register(cx.waker()); + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + Poll::Pending + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + (Fut(Box::new(0)), || WAKER.take().unwrap()) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, chan, SCHED, DROP)` +// +// The schedule function `s` pushes the task into `chan`. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Receiver `chan` extracts the task when it is scheduled. +macro_rules! schedule { + ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let ($name, $chan) = { + let (s, r) = flume::unbounded(); + + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + let sched = move |runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + + (sched, r) + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, _task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake_by_ref(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + }) + .run(); + + chan.recv().unwrap(); + drop(get_waker()); +} + +#[test] +fn cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn wake_and_cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn cancel_and_wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn drop_last_waker() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(waker); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn cancel_last_task() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn drop_last_task() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} diff --git a/external/vendor/async-task/tests/waker_ready.rs b/external/vendor/async-task/tests/waker_ready.rs new file mode 100644 index 0000000000..b6f6b5fdf8 --- /dev/null +++ b/external/vendor/async-task/tests/waker_ready.rs @@ -0,0 +1,279 @@ +use std::cell::Cell; +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use atomic_waker::AtomicWaker; + +// Creates a future with event counters. +// +// Usage: `future!(f, get_waker, POLL, DROP)` +// +// The future `f` always sleeps for 200 ms, and returns `Poll::Ready` the second time it is polled. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Every time the future is run, it stores the waker into a global variable. +// This waker can be extracted using the `get_waker()` function. +macro_rules! future { + ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + static WAKER: AtomicWaker = AtomicWaker::new(); + + let ($name, $get_waker) = { + struct Fut(Cell, #[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Box; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + WAKER.register(cx.waker()); + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(200)); + + if self.0.get() { + Poll::Ready(Box::new(0)) + } else { + self.0.set(true); + Poll::Pending + } + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, chan, SCHED, DROP)` +// +// The schedule function `s` pushes the task into `chan`. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Receiver `chan` extracts the task when it is scheduled. +macro_rules! schedule { + ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let ($name, $chan) = { + let (s, r) = flume::unbounded(); + + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + let sched = move |runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + + (sched, r) + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn wake() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (mut runnable, task) = async_task::spawn(f, s); + task.detach(); + + assert!(chan.is_empty()); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake(); + runnable = chan.recv().unwrap(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn wake_by_ref() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (mut runnable, task) = async_task::spawn(f, s); + task.detach(); + + assert!(chan.is_empty()); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake_by_ref(); + runnable = chan.recv().unwrap(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake_by_ref(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[allow(clippy::redundant_clone)] // This is intentional +#[test] +fn clone() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (mut runnable, task) = async_task::spawn(f, s); + task.detach(); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + let w2 = get_waker().clone(); + let w3 = w2.clone(); + let w4 = w3.clone(); + w4.wake(); + + runnable = chan.recv().unwrap(); + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + w3.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(w2); + drop(get_waker()); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn wake_dropped() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + task.detach(); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + let waker = get_waker(); + + waker.wake_by_ref(); + drop(chan.recv().unwrap()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn wake_completed() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + task.detach(); + + runnable.run(); + let waker = get_waker(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} diff --git a/external/vendor/concurrent-queue/.cargo-checksum.json b/external/vendor/concurrent-queue/.cargo-checksum.json new file mode 100644 index 0000000000..ec22307e94 --- /dev/null +++ b/external/vendor/concurrent-queue/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a70c1490e6daa375beb4928e5f4f97a6d9d6ba215fb4ff8b03ebc02a58a16439","CHANGELOG.md":"e9a4a11edce8b62146fdade24e1a74ee624601b2efcaa7035359c464a1ff7ff7","Cargo.toml":"d14f713829a83746178dd8a52732e1d106c895b3b4370bb9436fb190a2d763b2","Cargo.toml.orig":"18cfe5d32e53cdd29e2566612323ba36657b9cf2de41f7fae2a13e3880b9f458","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"d7a326783ef72b063a5a237b8c64e209e80fe61b9dda20e4686b5d8b19b16fb1","benches/bench.rs":"6bac7fcdfbd1b1caa2b36089a347fb120091b95ca7bd399249a96f1271e1bf08","src/bounded.rs":"f161cc0e03f59cc764a44dc0782f7fcef7325fc328dfc8cb8c7fd608fc259cc8","src/lib.rs":"dc69f8a48cc28fe73ea1be88d77cd1aba98947d5a673019e61c630cc04c537ad","src/single.rs":"610671ffb6f3b3bc9d375b99f4e004c61eece74caa29c2a3af6977d4764185f4","src/sync.rs":"7dc9bba96eda875ee3a1e5b808e4e2317cdd03293a38492a214e26e538159eef","src/unbounded.rs":"e90ea841f3f1eac5503b1c3cd2949de64956fc6a164ca65150c6c2bba02d0e16","tests/bounded.rs":"07a357eae995a79c5b6ac586037a86ed49df754ef3893d16891dc3c686299c6b","tests/loom.rs":"63e40d2598f80c97cada351c8db9c8d5e79d97bae870bdf9fe510d2b21510616","tests/single.rs":"7866f94d1c350e9a860aab550165806a8422649845ac6e9c95045886ce3e7659","tests/unbounded.rs":"3f49e41c33c14ab7ac255ef48c0af4f0f1cfcc9352fc73f21918df3039ff10d9"},"package":"4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"} \ No newline at end of file diff --git a/external/vendor/concurrent-queue/.cargo_vcs_info.json b/external/vendor/concurrent-queue/.cargo_vcs_info.json new file mode 100644 index 0000000000..33d501db65 --- /dev/null +++ b/external/vendor/concurrent-queue/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "e874f701f8413de01948c4903f894e5c845d8950" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/concurrent-queue/CHANGELOG.md b/external/vendor/concurrent-queue/CHANGELOG.md new file mode 100644 index 0000000000..f4f8bfd27f --- /dev/null +++ b/external/vendor/concurrent-queue/CHANGELOG.md @@ -0,0 +1,69 @@ +# Version 2.5.0 + +- Add a `force_push` method that can be used to add an element to the queue by displacing another. (#58) +- Make `ConcurrentQueue::unbounded()` into a `const` function. (#67) +- Fix a compilation error in the Loom implementation. (#65) + +# Version 2.4.0 + +- Remove unnecessary heap allocations from inside of the `ConcurrentQueue` type. (#53) + +# Version 2.3.0 + +- Implement `UnwindSafe` without libstd. (#49) +- Bump `fastrand` to `v2.0.0`. (#43) +- Use inline assembly in the `full_fence` funtion. (#47) + +# Version 2.2.0 + +- Add the try_iter method. (#36) + +# Version 2.1.0 + +- Update `portable-atomic` to 1.0. (#33) + +# Version 2.0.0 + +- Add support for the `portable-atomic` and `loom` crates. (#27) +- **Breaking:** Add an `std` feature that can be disabled to use this crate on `no_std` platforms. (#22) +- Replace usage of `cache-padded` with `crossbeam-utils`. (#26) + +# Version 1.2.4 + +- Fix fence on x86 and miri. (#18) +- Revert 1.2.3. (#18) + +# Version 1.2.3 + +**Note:** This release has been yanked, see #17 for details. + +- Fix fence on non-x86 architectures and miri. (#16) + +# Version 1.2.2 + +- Add a special, efficient `bounded(1)` implementation. + +# Version 1.2.1 + +- In the bounded queue, use boxed slice instead of raw pointers. + +# Version 1.2.0 + +- Update dependencies. +- Implement `UnwindSafe` and `RefUnwindSafe` for `ConcurrentQueue`. + +# Version 1.1.2 + +- Optimize `SeqCst` fences. + +# Version 1.1.1 + +- Clarify errors in docs. + +# Version 1.1.0 + +- Add extra methods to error types. + +# Version 1.0.0 + +- Initial version diff --git a/external/vendor/concurrent-queue/Cargo.toml b/external/vendor/concurrent-queue/Cargo.toml new file mode 100644 index 0000000000..cdce2b4b6b --- /dev/null +++ b/external/vendor/concurrent-queue/Cargo.toml @@ -0,0 +1,72 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "concurrent-queue" +version = "2.5.0" +authors = [ + "Stjepan Glavina ", + "Taiki Endo ", + "John Nunley ", +] +exclude = ["/.*"] +description = "Concurrent multi-producer multi-consumer queue" +readme = "README.md" +keywords = [ + "channel", + "mpmc", + "spsc", + "spmc", + "mpsc", +] +categories = ["concurrency"] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/concurrent-queue" + +[lib] +bench = false + +[[bench]] +name = "bench" +harness = false + +[dependencies.crossbeam-utils] +version = "0.8.11" +default-features = false + +[dependencies.portable-atomic] +version = "1" +optional = true +default-features = false + +[dev-dependencies.criterion] +version = "0.5" +features = ["cargo_bench_support"] +default-features = false + +[dev-dependencies.easy-parallel] +version = "3.1.0" + +[dev-dependencies.fastrand] +version = "2.0.0" + +[features] +default = ["std"] +std = [] + +[target."cfg(loom)".dependencies.loom] +version = "0.7" +optional = true + +[target."cfg(target_family = \"wasm\")".dev-dependencies.wasm-bindgen-test] +version = "0.3" diff --git a/external/vendor/concurrent-queue/Cargo.toml.orig b/external/vendor/concurrent-queue/Cargo.toml.orig new file mode 100644 index 0000000000..462958cb74 --- /dev/null +++ b/external/vendor/concurrent-queue/Cargo.toml.orig @@ -0,0 +1,47 @@ +[package] +name = "concurrent-queue" +# When publishing a new version: +# - Update CHANGELOG.md +# - Create "v2.x.y" git tag +version = "2.5.0" +authors = [ + "Stjepan Glavina ", + "Taiki Endo ", + "John Nunley " +] +edition = "2021" +rust-version = "1.60" +description = "Concurrent multi-producer multi-consumer queue" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/concurrent-queue" +keywords = ["channel", "mpmc", "spsc", "spmc", "mpsc"] +categories = ["concurrency"] +exclude = ["/.*"] + +[lib] +bench = false + +[dependencies] +crossbeam-utils = { version = "0.8.11", default-features = false } +portable-atomic = { version = "1", default-features = false, optional = true } + +# Enables loom testing. This feature is permanently unstable and the API may +# change at any time. +[target.'cfg(loom)'.dependencies] +loom = { version = "0.7", optional = true } + +[[bench]] +name = "bench" +harness = false + +[dev-dependencies] +criterion = { version = "0.5", features = ["cargo_bench_support"], default-features = false } +easy-parallel = "3.1.0" +fastrand = "2.0.0" + +[target.'cfg(target_family = "wasm")'.dev-dependencies] +wasm-bindgen-test = "0.3" + +[features] +default = ["std"] +std = [] diff --git a/external/vendor/concurrent-queue/LICENSE-APACHE b/external/vendor/concurrent-queue/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/concurrent-queue/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/concurrent-queue/LICENSE-MIT b/external/vendor/concurrent-queue/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/concurrent-queue/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/concurrent-queue/README.md b/external/vendor/concurrent-queue/README.md new file mode 100644 index 0000000000..dfa9871d99 --- /dev/null +++ b/external/vendor/concurrent-queue/README.md @@ -0,0 +1,51 @@ +# concurrent-queue + +[![Build](https://github.com/smol-rs/concurrent-queue/workflows/Build%20and%20test/badge.svg)]( +https://github.com/smol-rs/concurrent-queue/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/concurrent-queue) +[![Cargo](https://img.shields.io/crates/v/concurrent-queue.svg)]( +https://crates.io/crates/concurrent-queue) +[![Documentation](https://docs.rs/concurrent-queue/badge.svg)]( +https://docs.rs/concurrent-queue) + +A concurrent multi-producer multi-consumer queue. + +There are two kinds of queues: + +1. Bounded queue with limited capacity. +2. Unbounded queue with unlimited capacity. + +Queues also have the capability to get closed at any point. When closed, no more items can be +pushed into the queue, although the remaining items can still be popped. + +These features make it easy to build channels similar to `std::sync::mpsc` on top of this +crate. + +## Examples + +```rust +use concurrent_queue::ConcurrentQueue; + +let q = ConcurrentQueue::unbounded(); +q.push(1).unwrap(); +q.push(2).unwrap(); + +assert_eq!(q.pop(), Ok(1)); +assert_eq!(q.pop(), Ok(2)); +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/concurrent-queue/benches/bench.rs b/external/vendor/concurrent-queue/benches/bench.rs new file mode 100644 index 0000000000..6e82019dda --- /dev/null +++ b/external/vendor/concurrent-queue/benches/bench.rs @@ -0,0 +1,93 @@ +use std::{any::type_name, fmt::Debug}; + +use concurrent_queue::{ConcurrentQueue, PopError}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use easy_parallel::Parallel; + +const COUNT: usize = 100_000; +const THREADS: usize = 7; + +fn spsc(recv: &ConcurrentQueue, send: &ConcurrentQueue) { + Parallel::new() + .add(|| loop { + match recv.pop() { + Ok(_) => (), + Err(PopError::Empty) => (), + Err(PopError::Closed) => break, + } + }) + .add(|| { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + send.close(); + }) + .run(); +} + +fn mpsc(recv: &ConcurrentQueue, send: &ConcurrentQueue) { + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + }) + .add(|| { + let mut recieved = 0; + while recieved < THREADS * COUNT { + match recv.pop() { + Ok(_) => recieved += 1, + Err(PopError::Empty) => (), + Err(PopError::Closed) => unreachable!(), + } + } + }) + .run(); +} + +fn single_thread( + recv: &ConcurrentQueue, + send: &ConcurrentQueue, +) { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + for _ in 0..COUNT { + recv.pop().unwrap(); + } +} + +// Because we can't pass generic functions as const parameters. +macro_rules! bench_all( + ($name:ident, $f:ident) => { + fn $name(c: &mut Criterion) { + fn helper(c: &mut Criterion) { + let name = format!("unbounded_{}_{}", stringify!($f), type_name::()); + + c.bench_function(&name, |b| b.iter(|| { + let q = ConcurrentQueue::unbounded(); + $f::(black_box(&q), black_box(&q)); + })); + + let name = format!("bounded_{}_{}", stringify!($f), type_name::()); + + c.bench_function(&name, |b| b.iter(|| { + let q = ConcurrentQueue::bounded(THREADS * COUNT); + $f::(black_box(&q), black_box(&q)); + })); + } + helper::(c); + helper::(c); + helper::(c); + helper::(c); + helper::(c); + } + } +); + +bench_all!(bench_spsc, spsc); +bench_all!(bench_mpsc, mpsc); +bench_all!(bench_single_thread, single_thread); + +criterion_group!(generic_group, bench_single_thread, bench_spsc, bench_mpsc); +criterion_main!(generic_group); diff --git a/external/vendor/concurrent-queue/src/bounded.rs b/external/vendor/concurrent-queue/src/bounded.rs new file mode 100644 index 0000000000..dab3a2953b --- /dev/null +++ b/external/vendor/concurrent-queue/src/bounded.rs @@ -0,0 +1,408 @@ +use alloc::{boxed::Box, vec::Vec}; +use core::mem::MaybeUninit; + +use crossbeam_utils::CachePadded; + +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, ForcePushError, PopError, PushError}; + +/// A slot in a queue. +struct Slot { + /// The current stamp. + stamp: AtomicUsize, + + /// The value in this slot. + value: UnsafeCell>, +} + +/// A bounded queue. +pub struct Bounded { + /// The head of the queue. + /// + /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but + /// packed into a single `usize`. The lower bits represent the index, while the upper bits + /// represent the lap. The mark bit in the head is always zero. + /// + /// Values are popped from the head of the queue. + head: CachePadded, + + /// The tail of the queue. + /// + /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but + /// packed into a single `usize`. The lower bits represent the index, while the upper bits + /// represent the lap. The mark bit indicates that the queue is closed. + /// + /// Values are pushed into the tail of the queue. + tail: CachePadded, + + /// The buffer holding slots. + buffer: Box<[Slot]>, + + /// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`. + one_lap: usize, + + /// If this bit is set in the tail, that means the queue is closed. + mark_bit: usize, +} + +impl Bounded { + /// Creates a new bounded queue. + pub fn new(cap: usize) -> Bounded { + assert!(cap > 0, "capacity must be positive"); + + // Head is initialized to `{ lap: 0, mark: 0, index: 0 }`. + let head = 0; + // Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`. + let tail = 0; + + // Allocate a buffer of `cap` slots initialized with stamps. + let mut buffer = Vec::with_capacity(cap); + for i in 0..cap { + // Set the stamp to `{ lap: 0, mark: 0, index: i }`. + buffer.push(Slot { + stamp: AtomicUsize::new(i), + value: UnsafeCell::new(MaybeUninit::uninit()), + }); + } + + // Compute constants `mark_bit` and `one_lap`. + let mark_bit = (cap + 1).next_power_of_two(); + let one_lap = mark_bit * 2; + + Bounded { + buffer: buffer.into(), + one_lap, + mark_bit, + head: CachePadded::new(AtomicUsize::new(head)), + tail: CachePadded::new(AtomicUsize::new(tail)), + } + } + + /// Attempts to push an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + self.push_or_else(value, |value, tail, _, _| { + let head = self.head.load(Ordering::Relaxed); + + // If the head lags one lap behind the tail as well... + if head.wrapping_add(self.one_lap) == tail { + // ...then the queue is full. + Err(PushError::Full(value)) + } else { + Ok(value) + } + }) + } + + /// Pushes an item into the queue, displacing another item if needed. + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + let result = self.push_or_else(value, |value, tail, new_tail, slot| { + let head = tail.wrapping_sub(self.one_lap); + let new_head = new_tail.wrapping_sub(self.one_lap); + + // Try to move the head. + if self + .head + .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Relaxed) + .is_ok() + { + // Move the tail. + self.tail.store(new_tail, Ordering::SeqCst); + + // Swap out the old value. + // SAFETY: We know this is initialized, since it's covered by the current queue. + let old = unsafe { + slot.value + .with_mut(|slot| slot.replace(MaybeUninit::new(value)).assume_init()) + }; + + // Update the stamp. + slot.stamp.store(tail + 1, Ordering::Release); + + // Return a PushError. + Err(PushError::Full(old)) + } else { + Ok(value) + } + }); + + match result { + Ok(()) => Ok(None), + Err(PushError::Full(old_value)) => Ok(Some(old_value)), + Err(PushError::Closed(value)) => Err(ForcePushError(value)), + } + } + + /// Attempts to push an item into the queue, running a closure on failure. + /// + /// `fail` is run when there is no more room left in the tail of the queue. The parameters of + /// this function are as follows: + /// + /// - The item that failed to push. + /// - The value of `self.tail` before the new value would be inserted. + /// - The value of `self.tail` after the new value would be inserted. + /// - The slot that we attempted to push into. + /// + /// If `fail` returns `Ok(val)`, we will try pushing `val` to the head of the queue. Otherwise, + /// this function will return the error. + fn push_or_else(&self, mut value: T, mut fail: F) -> Result<(), PushError> + where + F: FnMut(T, usize, usize, &Slot) -> Result>, + { + let mut tail = self.tail.load(Ordering::Relaxed); + + loop { + // Check if the queue is closed. + if tail & self.mark_bit != 0 { + return Err(PushError::Closed(value)); + } + + // Deconstruct the tail. + let index = tail & (self.mark_bit - 1); + let lap = tail & !(self.one_lap - 1); + + // Calculate the new location of the tail. + let new_tail = if index + 1 < self.buffer.len() { + // Same lap, incremented index. + // Set to `{ lap: lap, mark: 0, index: index + 1 }`. + tail + 1 + } else { + // One lap forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. + lap.wrapping_add(self.one_lap) + }; + + // Inspect the corresponding slot. + let slot = &self.buffer[index]; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the tail and the stamp match, we may attempt to push. + if tail == stamp { + // Try moving the tail. + match self.tail.compare_exchange_weak( + tail, + new_tail, + Ordering::SeqCst, + Ordering::Relaxed, + ) { + Ok(_) => { + // Write the value into the slot and update the stamp. + slot.value.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + slot.stamp.store(tail + 1, Ordering::Release); + return Ok(()); + } + Err(t) => { + tail = t; + } + } + } else if stamp.wrapping_add(self.one_lap) == tail + 1 { + crate::full_fence(); + + // We've failed to push; run our failure closure. + value = fail(value, tail, new_tail, slot)?; + + // Loom complains if there isn't an explicit busy wait here. + #[cfg(loom)] + busy_wait(); + + tail = self.tail.load(Ordering::Relaxed); + } else { + // Yield because we need to wait for the stamp to get updated. + busy_wait(); + tail = self.tail.load(Ordering::Relaxed); + } + } + } + + /// Attempts to pop an item from the queue. + pub fn pop(&self) -> Result { + let mut head = self.head.load(Ordering::Relaxed); + + loop { + // Deconstruct the head. + let index = head & (self.mark_bit - 1); + let lap = head & !(self.one_lap - 1); + + // Inspect the corresponding slot. + let slot = &self.buffer[index]; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the the stamp is ahead of the head by 1, we may attempt to pop. + if head + 1 == stamp { + let new = if index + 1 < self.buffer.len() { + // Same lap, incremented index. + // Set to `{ lap: lap, mark: 0, index: index + 1 }`. + head + 1 + } else { + // One lap forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. + lap.wrapping_add(self.one_lap) + }; + + // Try moving the head. + match self.head.compare_exchange_weak( + head, + new, + Ordering::SeqCst, + Ordering::Relaxed, + ) { + Ok(_) => { + // Read the value from the slot and update the stamp. + let value = slot + .value + .with_mut(|slot| unsafe { slot.read().assume_init() }); + slot.stamp + .store(head.wrapping_add(self.one_lap), Ordering::Release); + return Ok(value); + } + Err(h) => { + head = h; + } + } + } else if stamp == head { + crate::full_fence(); + let tail = self.tail.load(Ordering::Relaxed); + + // If the tail equals the head, that means the queue is empty. + if (tail & !self.mark_bit) == head { + // Check if the queue is closed. + if tail & self.mark_bit != 0 { + return Err(PopError::Closed); + } else { + return Err(PopError::Empty); + } + } + + // Loom complains if there isn't a busy-wait here. + #[cfg(loom)] + busy_wait(); + + head = self.head.load(Ordering::Relaxed); + } else { + // Yield because we need to wait for the stamp to get updated. + busy_wait(); + head = self.head.load(Ordering::Relaxed); + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + loop { + // Load the tail, then load the head. + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // If the tail didn't change, we've got consistent values to work with. + if self.tail.load(Ordering::SeqCst) == tail { + let hix = head & (self.mark_bit - 1); + let tix = tail & (self.mark_bit - 1); + + return if hix < tix { + tix - hix + } else if hix > tix { + self.buffer.len() - hix + tix + } else if (tail & !self.mark_bit) == head { + 0 + } else { + self.buffer.len() + }; + } + } + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + let head = self.head.load(Ordering::SeqCst); + let tail = self.tail.load(Ordering::SeqCst); + + // Is the tail equal to the head? + // + // Note: If the head changes just before we load the tail, that means there was a moment + // when the queue was not empty, so it is safe to just return `false`. + (tail & !self.mark_bit) == head + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // Is the head lagging one lap behind tail? + // + // Note: If the tail changes just before we load the head, that means there was a moment + // when the queue was not full, so it is safe to just return `false`. + head.wrapping_add(self.one_lap) == tail & !self.mark_bit + } + + /// Returns the capacity of the queue. + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst); + tail & self.mark_bit == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.tail.load(Ordering::SeqCst) & self.mark_bit != 0 + } +} + +impl Drop for Bounded { + fn drop(&mut self) { + // Get the index of the head. + let Self { + head, + tail, + buffer, + mark_bit, + .. + } = self; + + let mark_bit = *mark_bit; + + head.with_mut(|&mut head| { + tail.with_mut(|&mut tail| { + let hix = head & (mark_bit - 1); + let tix = tail & (mark_bit - 1); + + let len = if hix < tix { + tix - hix + } else if hix > tix { + buffer.len() - hix + tix + } else if (tail & !mark_bit) == head { + 0 + } else { + buffer.len() + }; + + // Loop over all slots that hold a value and drop them. + for i in 0..len { + // Compute the index of the next slot holding a value. + let index = if hix + i < buffer.len() { + hix + i + } else { + hix + i - buffer.len() + }; + + // Drop the value in the slot. + let slot = &buffer[index]; + slot.value.with_mut(|slot| unsafe { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + } + }); + }); + } +} diff --git a/external/vendor/concurrent-queue/src/lib.rs b/external/vendor/concurrent-queue/src/lib.rs new file mode 100644 index 0000000000..a4d26b501f --- /dev/null +++ b/external/vendor/concurrent-queue/src/lib.rs @@ -0,0 +1,660 @@ +//! A concurrent multi-producer multi-consumer queue. +//! +//! There are two kinds of queues: +//! +//! 1. [Bounded] queue with limited capacity. +//! 2. [Unbounded] queue with unlimited capacity. +//! +//! Queues also have the capability to get [closed] at any point. When closed, no more items can be +//! pushed into the queue, although the remaining items can still be popped. +//! +//! These features make it easy to build channels similar to [`std::sync::mpsc`] on top of this +//! crate. +//! +//! # Examples +//! +//! ``` +//! use concurrent_queue::ConcurrentQueue; +//! +//! let q = ConcurrentQueue::unbounded(); +//! q.push(1).unwrap(); +//! q.push(2).unwrap(); +//! +//! assert_eq!(q.pop(), Ok(1)); +//! assert_eq!(q.pop(), Ok(2)); +//! ``` +//! +//! # Features +//! +//! `concurrent-queue` uses an `std` default feature. With this feature enabled, this crate will +//! use [`std::thread::yield_now`] to avoid busy waiting in tight loops. However, with this +//! feature disabled, [`core::hint::spin_loop`] will be used instead. Disabling `std` will allow +//! this crate to be used on `no_std` platforms at the potential expense of more busy waiting. +//! +//! There is also a `portable-atomic` feature, which uses a polyfill from the +//! [`portable-atomic`] crate to provide atomic operations on platforms that do not support them. +//! See the [`README`] for the [`portable-atomic`] crate for more information on how to use it. +//! Note that even with this feature enabled, `concurrent-queue` still requires a global allocator +//! to be available. See the documentation for the [`std::alloc::GlobalAlloc`] trait for more +//! information. +//! +//! [Bounded]: `ConcurrentQueue::bounded()` +//! [Unbounded]: `ConcurrentQueue::unbounded()` +//! [closed]: `ConcurrentQueue::close()` +//! [`portable-atomic`]: https://crates.io/crates/portable-atomic +//! [`README`]: https://github.com/taiki-e/portable-atomic/blob/main/README.md#optional-cfg + +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![no_std] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +use core::fmt; +use core::panic::{RefUnwindSafe, UnwindSafe}; +use sync::atomic::{self, Ordering}; + +#[cfg(feature = "std")] +use std::error; + +use crate::bounded::Bounded; +use crate::single::Single; +use crate::sync::busy_wait; +use crate::unbounded::Unbounded; + +mod bounded; +mod single; +mod unbounded; + +mod sync; + +/// Make the given function const if the given condition is true. +macro_rules! const_fn { + ( + const_if: #[cfg($($cfg:tt)+)]; + $(#[$($attr:tt)*])* + $vis:vis const fn $($rest:tt)* + ) => { + #[cfg($($cfg)+)] + $(#[$($attr)*])* + $vis const fn $($rest)* + #[cfg(not($($cfg)+))] + $(#[$($attr)*])* + $vis fn $($rest)* + }; +} + +pub(crate) use const_fn; + +/// A concurrent queue. +/// +/// # Examples +/// +/// ``` +/// use concurrent_queue::{ConcurrentQueue, PopError, PushError}; +/// +/// let q = ConcurrentQueue::bounded(2); +/// +/// assert_eq!(q.push('a'), Ok(())); +/// assert_eq!(q.push('b'), Ok(())); +/// assert_eq!(q.push('c'), Err(PushError::Full('c'))); +/// +/// assert_eq!(q.pop(), Ok('a')); +/// assert_eq!(q.pop(), Ok('b')); +/// assert_eq!(q.pop(), Err(PopError::Empty)); +/// ``` +pub struct ConcurrentQueue(Inner); + +unsafe impl Send for ConcurrentQueue {} +unsafe impl Sync for ConcurrentQueue {} + +impl UnwindSafe for ConcurrentQueue {} +impl RefUnwindSafe for ConcurrentQueue {} + +#[allow(clippy::large_enum_variant)] +enum Inner { + Single(Single), + Bounded(Bounded), + Unbounded(Unbounded), +} + +impl ConcurrentQueue { + /// Creates a new bounded queue. + /// + /// The queue allocates enough space for `cap` items. + /// + /// # Panics + /// + /// If the capacity is zero, this constructor will panic. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::bounded(100); + /// ``` + pub fn bounded(cap: usize) -> ConcurrentQueue { + if cap == 1 { + ConcurrentQueue(Inner::Single(Single::new())) + } else { + ConcurrentQueue(Inner::Bounded(Bounded::new(cap))) + } + } + + const_fn!( + const_if: #[cfg(not(loom))]; + /// Creates a new unbounded queue. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// ``` + pub const fn unbounded() -> ConcurrentQueue { + ConcurrentQueue(Inner::Unbounded(Unbounded::new())) + } + ); + + /// Attempts to push an item into the queue. + /// + /// If the queue is full or closed, the item is returned back as an error. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PushError}; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// // Push succeeds because there is space in the queue. + /// assert_eq!(q.push(10), Ok(())); + /// + /// // Push errors because the queue is now full. + /// assert_eq!(q.push(20), Err(PushError::Full(20))); + /// + /// // Close the queue, which will prevent further pushes. + /// q.close(); + /// + /// // Pushing now errors indicating the queue is closed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// + /// // Pop the single item in the queue. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // Even though there is space, no more items can be pushed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// ``` + pub fn push(&self, value: T) -> Result<(), PushError> { + match &self.0 { + Inner::Single(q) => q.push(value), + Inner::Bounded(q) => q.push(value), + Inner::Unbounded(q) => q.push(value), + } + } + + /// Push an element into the queue, potentially displacing another element. + /// + /// Attempts to push an element into the queue. If the queue is full, one item from the + /// queue is replaced with the provided item. The displaced item is returned as `Some(T)`. + /// If the queue is closed, an error is returned. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, ForcePushError, PushError}; + /// + /// let q = ConcurrentQueue::bounded(3); + /// + /// // We can push to the queue. + /// for i in 1..=3 { + /// assert_eq!(q.force_push(i), Ok(None)); + /// } + /// + /// // Push errors because the queue is now full. + /// assert_eq!(q.push(4), Err(PushError::Full(4))); + /// + /// // Pushing a new value replaces the old ones. + /// assert_eq!(q.force_push(5), Ok(Some(1))); + /// assert_eq!(q.force_push(6), Ok(Some(2))); + /// + /// // Close the queue to stop further pushes. + /// q.close(); + /// + /// // Pushing will return an error. + /// assert_eq!(q.force_push(7), Err(ForcePushError(7))); + /// + /// // Popping items will return the force-pushed ones. + /// assert_eq!(q.pop(), Ok(3)); + /// assert_eq!(q.pop(), Ok(5)); + /// assert_eq!(q.pop(), Ok(6)); + /// ``` + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + match &self.0 { + Inner::Single(q) => q.force_push(value), + Inner::Bounded(q) => q.force_push(value), + Inner::Unbounded(q) => match q.push(value) { + Ok(()) => Ok(None), + Err(PushError::Closed(value)) => Err(ForcePushError(value)), + Err(PushError::Full(_)) => unreachable!(), + }, + } + } + + /// Attempts to pop an item from the queue. + /// + /// If the queue is empty, an error is returned. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PopError}; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// // Pop errors when the queue is empty. + /// assert_eq!(q.pop(), Err(PopError::Empty)); + /// + /// // Push one item and close the queue. + /// assert_eq!(q.push(10), Ok(())); + /// q.close(); + /// + /// // Remaining items can be popped. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // Again, pop errors when the queue is empty, + /// // but now also indicates that the queue is closed. + /// assert_eq!(q.pop(), Err(PopError::Closed)); + /// ``` + pub fn pop(&self) -> Result { + match &self.0 { + Inner::Single(q) => q.pop(), + Inner::Bounded(q) => q.pop(), + Inner::Unbounded(q) => q.pop(), + } + } + + /// Get an iterator over the items in the queue. + /// + /// The iterator will continue until the queue is empty or closed. It will never block; + /// if the queue is empty, the iterator will return `None`. If new items are pushed into + /// the queue, the iterator may return `Some` in the future after returning `None`. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::bounded(5); + /// q.push(1).unwrap(); + /// q.push(2).unwrap(); + /// q.push(3).unwrap(); + /// + /// let mut iter = q.try_iter(); + /// assert_eq!(iter.by_ref().sum::(), 6); + /// assert_eq!(iter.next(), None); + /// + /// // Pushing more items will make them available to the iterator. + /// q.push(4).unwrap(); + /// assert_eq!(iter.next(), Some(4)); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn try_iter(&self) -> TryIter<'_, T> { + TryIter { queue: self } + } + + /// Returns `true` if the queue is empty. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// + /// assert!(q.is_empty()); + /// q.push(1).unwrap(); + /// assert!(!q.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_empty(), + Inner::Bounded(q) => q.is_empty(), + Inner::Unbounded(q) => q.is_empty(), + } + } + + /// Returns `true` if the queue is full. + /// + /// An unbounded queue is never full. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// assert!(!q.is_full()); + /// q.push(1).unwrap(); + /// assert!(q.is_full()); + /// ``` + pub fn is_full(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_full(), + Inner::Bounded(q) => q.is_full(), + Inner::Unbounded(q) => q.is_full(), + } + } + + /// Returns the number of items in the queue. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::unbounded(); + /// assert_eq!(q.len(), 0); + /// + /// assert_eq!(q.push(10), Ok(())); + /// assert_eq!(q.len(), 1); + /// + /// assert_eq!(q.push(20), Ok(())); + /// assert_eq!(q.len(), 2); + /// ``` + pub fn len(&self) -> usize { + match &self.0 { + Inner::Single(q) => q.len(), + Inner::Bounded(q) => q.len(), + Inner::Unbounded(q) => q.len(), + } + } + + /// Returns the capacity of the queue. + /// + /// Unbounded queues have infinite capacity, represented as [`None`]. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::bounded(7); + /// assert_eq!(q.capacity(), Some(7)); + /// + /// let q = ConcurrentQueue::::unbounded(); + /// assert_eq!(q.capacity(), None); + /// ``` + pub fn capacity(&self) -> Option { + match &self.0 { + Inner::Single(_) => Some(1), + Inner::Bounded(q) => Some(q.capacity()), + Inner::Unbounded(_) => None, + } + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue, or `false` if it was already closed. + /// + /// When a queue is closed, no more items can be pushed but the remaining items can still be + /// popped. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PopError, PushError}; + /// + /// let q = ConcurrentQueue::unbounded(); + /// assert_eq!(q.push(10), Ok(())); + /// + /// assert!(q.close()); // `true` because this call closes the queue. + /// assert!(!q.close()); // `false` because the queue is already closed. + /// + /// // Cannot push any more items when closed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// + /// // Remaining items can still be popped. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // When no more items are present, the error is `Closed`. + /// assert_eq!(q.pop(), Err(PopError::Closed)); + /// ``` + pub fn close(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.close(), + Inner::Bounded(q) => q.close(), + Inner::Unbounded(q) => q.close(), + } + } + + /// Returns `true` if the queue is closed. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// + /// assert!(!q.is_closed()); + /// q.close(); + /// assert!(q.is_closed()); + /// ``` + pub fn is_closed(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_closed(), + Inner::Bounded(q) => q.is_closed(), + Inner::Unbounded(q) => q.is_closed(), + } + } +} + +impl fmt::Debug for ConcurrentQueue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ConcurrentQueue") + .field("len", &self.len()) + .field("capacity", &self.capacity()) + .field("is_closed", &self.is_closed()) + .finish() + } +} + +/// An iterator that pops items from a [`ConcurrentQueue`]. +/// +/// This iterator will never block; it will return `None` once the queue has +/// been exhausted. Calling `next` after `None` may yield `Some(item)` if more items +/// are pushed to the queue. +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone)] +pub struct TryIter<'a, T> { + queue: &'a ConcurrentQueue, +} + +impl fmt::Debug for TryIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Iter").field(&self.queue).finish() + } +} + +impl Iterator for TryIter<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.queue.pop().ok() + } +} + +/// Error which occurs when popping from an empty queue. +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum PopError { + /// The queue is empty but not closed. + Empty, + + /// The queue is empty and closed. + Closed, +} + +impl PopError { + /// Returns `true` if the queue is empty but not closed. + pub fn is_empty(&self) -> bool { + match self { + PopError::Empty => true, + PopError::Closed => false, + } + } + + /// Returns `true` if the queue is empty and closed. + pub fn is_closed(&self) -> bool { + match self { + PopError::Empty => false, + PopError::Closed => true, + } + } +} + +#[cfg(feature = "std")] +impl error::Error for PopError {} + +impl fmt::Debug for PopError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PopError::Empty => write!(f, "Empty"), + PopError::Closed => write!(f, "Closed"), + } + } +} + +impl fmt::Display for PopError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PopError::Empty => write!(f, "Empty"), + PopError::Closed => write!(f, "Closed"), + } + } +} + +/// Error which occurs when pushing into a full or closed queue. +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum PushError { + /// The queue is full but not closed. + Full(T), + + /// The queue is closed. + Closed(T), +} + +impl PushError { + /// Unwraps the item that couldn't be pushed. + pub fn into_inner(self) -> T { + match self { + PushError::Full(t) => t, + PushError::Closed(t) => t, + } + } + + /// Returns `true` if the queue is full but not closed. + pub fn is_full(&self) -> bool { + match self { + PushError::Full(_) => true, + PushError::Closed(_) => false, + } + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + match self { + PushError::Full(_) => false, + PushError::Closed(_) => true, + } + } +} + +#[cfg(feature = "std")] +impl error::Error for PushError {} + +impl fmt::Debug for PushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PushError::Full(t) => f.debug_tuple("Full").field(t).finish(), + PushError::Closed(t) => f.debug_tuple("Closed").field(t).finish(), + } + } +} + +impl fmt::Display for PushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PushError::Full(_) => write!(f, "Full"), + PushError::Closed(_) => write!(f, "Closed"), + } + } +} + +/// Error that occurs when force-pushing into a full queue. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct ForcePushError(pub T); + +impl ForcePushError { + /// Return the inner value that failed to be force-pushed. + pub fn into_inner(self) -> T { + self.0 + } +} + +impl fmt::Debug for ForcePushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("ForcePushError").field(&self.0).finish() + } +} + +impl fmt::Display for ForcePushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Closed") + } +} + +#[cfg(feature = "std")] +impl error::Error for ForcePushError {} + +/// Equivalent to `atomic::fence(Ordering::SeqCst)`, but in some cases faster. +#[inline] +fn full_fence() { + #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri), not(loom)))] + { + use core::{arch::asm, cell::UnsafeCell}; + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. A `lock ` instruction. + // + // Both instructions have the effect of a full barrier, but empirical benchmarks have shown + // that the second one is sometimes a bit faster. + let a = UnsafeCell::new(0_usize); + // It is common to use `lock or` here, but when using a local variable, `lock not`, which + // does not change the flag, should be slightly more efficient. + // Refs: https://www.felixcloutier.com/x86/not + unsafe { + #[cfg(target_pointer_width = "64")] + asm!("lock not qword ptr [{0}]", in(reg) a.get(), options(nostack, preserves_flags)); + #[cfg(target_pointer_width = "32")] + asm!("lock not dword ptr [{0:e}]", in(reg) a.get(), options(nostack, preserves_flags)); + } + return; + } + #[allow(unreachable_code)] + { + atomic::fence(Ordering::SeqCst); + } +} diff --git a/external/vendor/concurrent-queue/src/single.rs b/external/vendor/concurrent-queue/src/single.rs new file mode 100644 index 0000000000..f88c4783a0 --- /dev/null +++ b/external/vendor/concurrent-queue/src/single.rs @@ -0,0 +1,187 @@ +use core::mem::MaybeUninit; +use core::ptr; + +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, ForcePushError, PopError, PushError}; + +const LOCKED: usize = 1 << 0; +const PUSHED: usize = 1 << 1; +const CLOSED: usize = 1 << 2; + +/// A single-element queue. +pub struct Single { + state: AtomicUsize, + slot: UnsafeCell>, +} + +impl Single { + /// Creates a new single-element queue. + pub fn new() -> Single { + Single { + state: AtomicUsize::new(0), + slot: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Attempts to push an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + // Lock and fill the slot. + let state = self + .state + .compare_exchange(0, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst) + .unwrap_or_else(|x| x); + + if state == 0 { + // Write the value and unlock. + self.slot.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + self.state.fetch_and(!LOCKED, Ordering::Release); + Ok(()) + } else if state & CLOSED != 0 { + Err(PushError::Closed(value)) + } else { + Err(PushError::Full(value)) + } + } + + /// Attempts to push an item into the queue, displacing another if necessary. + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + // Attempt to lock the slot. + let mut state = 0; + + loop { + // Lock the slot. + let prev = self + .state + .compare_exchange(state, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst) + .unwrap_or_else(|x| x); + + if prev & CLOSED != 0 { + return Err(ForcePushError(value)); + } + + if prev == state { + // If the value was pushed, swap out the value. + let prev_value = if prev & PUSHED == 0 { + // SAFETY: write is safe because we have locked the state. + self.slot.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + None + } else { + // SAFETY: replace is safe because we have locked the state, and + // assume_init is safe because we have checked that the value was pushed. + let prev_value = unsafe { + self.slot.with_mut(move |slot| { + ptr::replace(slot, MaybeUninit::new(value)).assume_init() + }) + }; + Some(prev_value) + }; + + // We can unlock the slot now. + self.state.fetch_and(!LOCKED, Ordering::Release); + + // Return the old value. + return Ok(prev_value); + } + + // Try to go for the current (pushed) state. + if prev & LOCKED == 0 { + state = prev; + } else { + // State is locked. + busy_wait(); + state = prev & !LOCKED; + } + } + } + + /// Attempts to pop an item from the queue. + pub fn pop(&self) -> Result { + let mut state = PUSHED; + loop { + // Lock and empty the slot. + let prev = self + .state + .compare_exchange( + state, + (state | LOCKED) & !PUSHED, + Ordering::SeqCst, + Ordering::SeqCst, + ) + .unwrap_or_else(|x| x); + + if prev == state { + // Read the value and unlock. + let value = self + .slot + .with_mut(|slot| unsafe { slot.read().assume_init() }); + self.state.fetch_and(!LOCKED, Ordering::Release); + return Ok(value); + } + + if prev & PUSHED == 0 { + if prev & CLOSED == 0 { + return Err(PopError::Empty); + } else { + return Err(PopError::Closed); + } + } + + if prev & LOCKED == 0 { + state = prev; + } else { + busy_wait(); + state = prev & !LOCKED; + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + usize::from(self.state.load(Ordering::SeqCst) & PUSHED != 0) + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + self.len() == 1 + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let state = self.state.fetch_or(CLOSED, Ordering::SeqCst); + state & CLOSED == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.state.load(Ordering::SeqCst) & CLOSED != 0 + } +} + +impl Drop for Single { + fn drop(&mut self) { + // Drop the value in the slot. + let Self { state, slot } = self; + state.with_mut(|state| { + if *state & PUSHED != 0 { + slot.with_mut(|slot| unsafe { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + } + }); + } +} diff --git a/external/vendor/concurrent-queue/src/sync.rs b/external/vendor/concurrent-queue/src/sync.rs new file mode 100644 index 0000000000..d1b0a89a1b --- /dev/null +++ b/external/vendor/concurrent-queue/src/sync.rs @@ -0,0 +1,114 @@ +//! Synchronization facade to choose between `core` primitives and `loom` primitives. + +#[cfg(all(feature = "portable-atomic", not(loom)))] +mod sync_impl { + pub(crate) use core::cell; + pub(crate) use portable_atomic as atomic; + + #[cfg(not(feature = "std"))] + pub(crate) use atomic::hint::spin_loop; + + #[cfg(feature = "std")] + pub(crate) use std::thread::yield_now; +} + +#[cfg(all(not(feature = "portable-atomic"), not(loom)))] +mod sync_impl { + pub(crate) use core::cell; + pub(crate) use core::sync::atomic; + + #[cfg(not(feature = "std"))] + #[inline] + pub(crate) fn spin_loop() { + #[allow(deprecated)] + atomic::spin_loop_hint(); + } + + #[cfg(feature = "std")] + pub(crate) use std::thread::yield_now; +} + +#[cfg(loom)] +mod sync_impl { + pub(crate) use loom::cell; + + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::*; + } + + #[cfg(not(feature = "std"))] + pub(crate) use loom::hint::spin_loop; + #[cfg(feature = "std")] + pub(crate) use loom::thread::yield_now; +} + +pub(crate) use sync_impl::*; + +/// Notify the CPU that we are currently busy-waiting. +#[inline] +pub(crate) fn busy_wait() { + #[cfg(feature = "std")] + yield_now(); + + #[cfg(not(feature = "std"))] + spin_loop(); +} + +#[cfg(loom)] +pub(crate) mod prelude {} + +#[cfg(not(loom))] +pub(crate) mod prelude { + use super::{atomic, cell}; + + /// Emulate `loom::UnsafeCell`'s API. + pub(crate) trait UnsafeCellExt { + type Value; + + fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut Self::Value) -> R; + } + + impl UnsafeCellExt for cell::UnsafeCell { + type Value = T; + + fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut Self::Value) -> R, + { + f(self.get()) + } + } + + /// Emulate `loom::Atomic*`'s API. + pub(crate) trait AtomicExt { + type Value; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R; + } + + impl AtomicExt for atomic::AtomicUsize { + type Value = usize; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R, + { + f(self.get_mut()) + } + } + + impl AtomicExt for atomic::AtomicPtr { + type Value = *mut T; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R, + { + f(self.get_mut()) + } + } +} diff --git a/external/vendor/concurrent-queue/src/unbounded.rs b/external/vendor/concurrent-queue/src/unbounded.rs new file mode 100644 index 0000000000..8e1c40d192 --- /dev/null +++ b/external/vendor/concurrent-queue/src/unbounded.rs @@ -0,0 +1,452 @@ +use alloc::boxed::Box; +use core::mem::MaybeUninit; +use core::ptr; + +use crossbeam_utils::CachePadded; + +use crate::const_fn; +use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, PopError, PushError}; + +// Bits indicating the state of a slot: +// * If a value has been written into the slot, `WRITE` is set. +// * If a value has been read from the slot, `READ` is set. +// * If the block is being destroyed, `DESTROY` is set. +const WRITE: usize = 1; +const READ: usize = 2; +const DESTROY: usize = 4; + +// Each block covers one "lap" of indices. +const LAP: usize = 32; +// The maximum number of items a block can hold. +const BLOCK_CAP: usize = LAP - 1; +// How many lower bits are reserved for metadata. +const SHIFT: usize = 1; +// Has two different purposes: +// * If set in head, indicates that the block is not the last one. +// * If set in tail, indicates that the queue is closed. +const MARK_BIT: usize = 1; + +/// A slot in a block. +struct Slot { + /// The value. + value: UnsafeCell>, + + /// The state of the slot. + state: AtomicUsize, +} + +impl Slot { + #[cfg(not(loom))] + const UNINIT: Slot = Slot { + value: UnsafeCell::new(MaybeUninit::uninit()), + state: AtomicUsize::new(0), + }; + + #[cfg(not(loom))] + fn uninit_block() -> [Slot; BLOCK_CAP] { + [Self::UNINIT; BLOCK_CAP] + } + + #[cfg(loom)] + fn uninit_block() -> [Slot; BLOCK_CAP] { + // Repeat this expression 31 times. + // Update if we change BLOCK_CAP + macro_rules! repeat_31 { + ($e: expr) => { + [ + $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, + $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, + ] + }; + } + + repeat_31!(Slot { + value: UnsafeCell::new(MaybeUninit::uninit()), + state: AtomicUsize::new(0), + }) + } + + /// Waits until a value is written into the slot. + fn wait_write(&self) { + while self.state.load(Ordering::Acquire) & WRITE == 0 { + busy_wait(); + } + } +} + +/// A block in a linked list. +/// +/// Each block in the list can hold up to `BLOCK_CAP` values. +struct Block { + /// The next block in the linked list. + next: AtomicPtr>, + + /// Slots for values. + slots: [Slot; BLOCK_CAP], +} + +impl Block { + /// Creates an empty block. + fn new() -> Block { + Block { + next: AtomicPtr::new(ptr::null_mut()), + slots: Slot::uninit_block(), + } + } + + /// Waits until the next pointer is set. + fn wait_next(&self) -> *mut Block { + loop { + let next = self.next.load(Ordering::Acquire); + if !next.is_null() { + return next; + } + busy_wait(); + } + } + + /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. + unsafe fn destroy(this: *mut Block, start: usize) { + // It is not necessary to set the `DESTROY` bit in the last slot because that slot has + // begun destruction of the block. + for i in start..BLOCK_CAP - 1 { + let slot = (*this).slots.get_unchecked(i); + + // Mark the `DESTROY` bit if a thread is still using the slot. + if slot.state.load(Ordering::Acquire) & READ == 0 + && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 + { + // If a thread is still using the slot, it will continue destruction of the block. + return; + } + } + + // No thread is using the block, now it is safe to destroy it. + drop(Box::from_raw(this)); + } +} + +/// A position in a queue. +struct Position { + /// The index in the queue. + index: AtomicUsize, + + /// The block in the linked list. + block: AtomicPtr>, +} + +/// An unbounded queue. +pub struct Unbounded { + /// The head of the queue. + head: CachePadded>, + + /// The tail of the queue. + tail: CachePadded>, +} + +impl Unbounded { + const_fn!( + const_if: #[cfg(not(loom))]; + /// Creates a new unbounded queue. + pub const fn new() -> Unbounded { + Unbounded { + head: CachePadded::new(Position { + block: AtomicPtr::new(ptr::null_mut()), + index: AtomicUsize::new(0), + }), + tail: CachePadded::new(Position { + block: AtomicPtr::new(ptr::null_mut()), + index: AtomicUsize::new(0), + }), + } + } + ); + + /// Pushes an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + let mut tail = self.tail.index.load(Ordering::Acquire); + let mut block = self.tail.block.load(Ordering::Acquire); + let mut next_block = None; + + loop { + // Check if the queue is closed. + if tail & MARK_BIT != 0 { + return Err(PushError::Closed(value)); + } + + // Calculate the offset of the index into the block. + let offset = (tail >> SHIFT) % LAP; + + // If we reached the end of the block, wait until the next one is installed. + if offset == BLOCK_CAP { + busy_wait(); + tail = self.tail.index.load(Ordering::Acquire); + block = self.tail.block.load(Ordering::Acquire); + continue; + } + + // If we're going to have to install the next block, allocate it in advance in order to + // make the wait for other threads as short as possible. + if offset + 1 == BLOCK_CAP && next_block.is_none() { + next_block = Some(Box::new(Block::::new())); + } + + // If this is the first value to be pushed into the queue, we need to allocate the + // first block and install it. + if block.is_null() { + let new = Box::into_raw(Box::new(Block::::new())); + + if self + .tail + .block + .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed) + .is_ok() + { + self.head.block.store(new, Ordering::Release); + block = new; + } else { + next_block = unsafe { Some(Box::from_raw(new)) }; + tail = self.tail.index.load(Ordering::Acquire); + block = self.tail.block.load(Ordering::Acquire); + continue; + } + } + + let new_tail = tail + (1 << SHIFT); + + // Try advancing the tail forward. + match self.tail.index.compare_exchange_weak( + tail, + new_tail, + Ordering::SeqCst, + Ordering::Acquire, + ) { + Ok(_) => unsafe { + // If we've reached the end of the block, install the next one. + if offset + 1 == BLOCK_CAP { + let next_block = Box::into_raw(next_block.unwrap()); + self.tail.block.store(next_block, Ordering::Release); + self.tail.index.fetch_add(1 << SHIFT, Ordering::Release); + (*block).next.store(next_block, Ordering::Release); + } + + // Write the value into the slot. + let slot = (*block).slots.get_unchecked(offset); + slot.value.with_mut(|slot| { + slot.write(MaybeUninit::new(value)); + }); + slot.state.fetch_or(WRITE, Ordering::Release); + return Ok(()); + }, + Err(t) => { + tail = t; + block = self.tail.block.load(Ordering::Acquire); + } + } + } + } + + /// Pops an item from the queue. + pub fn pop(&self) -> Result { + let mut head = self.head.index.load(Ordering::Acquire); + let mut block = self.head.block.load(Ordering::Acquire); + + loop { + // Calculate the offset of the index into the block. + let offset = (head >> SHIFT) % LAP; + + // If we reached the end of the block, wait until the next one is installed. + if offset == BLOCK_CAP { + busy_wait(); + head = self.head.index.load(Ordering::Acquire); + block = self.head.block.load(Ordering::Acquire); + continue; + } + + let mut new_head = head + (1 << SHIFT); + + if new_head & MARK_BIT == 0 { + crate::full_fence(); + let tail = self.tail.index.load(Ordering::Relaxed); + + // If the tail equals the head, that means the queue is empty. + if head >> SHIFT == tail >> SHIFT { + // Check if the queue is closed. + if tail & MARK_BIT != 0 { + return Err(PopError::Closed); + } else { + return Err(PopError::Empty); + } + } + + // If head and tail are not in the same block, set `MARK_BIT` in head. + if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { + new_head |= MARK_BIT; + } + } + + // The block can be null here only if the first push operation is in progress. + if block.is_null() { + busy_wait(); + head = self.head.index.load(Ordering::Acquire); + block = self.head.block.load(Ordering::Acquire); + continue; + } + + // Try moving the head index forward. + match self.head.index.compare_exchange_weak( + head, + new_head, + Ordering::SeqCst, + Ordering::Acquire, + ) { + Ok(_) => unsafe { + // If we've reached the end of the block, move to the next one. + if offset + 1 == BLOCK_CAP { + let next = (*block).wait_next(); + let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT); + if !(*next).next.load(Ordering::Relaxed).is_null() { + next_index |= MARK_BIT; + } + + self.head.block.store(next, Ordering::Release); + self.head.index.store(next_index, Ordering::Release); + } + + // Read the value. + let slot = (*block).slots.get_unchecked(offset); + slot.wait_write(); + let value = slot.value.with_mut(|slot| slot.read().assume_init()); + + // Destroy the block if we've reached the end, or if another thread wanted to + // destroy but couldn't because we were busy reading from the slot. + if offset + 1 == BLOCK_CAP { + Block::destroy(block, 0); + } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { + Block::destroy(block, offset + 1); + } + + return Ok(value); + }, + Err(h) => { + head = h; + block = self.head.block.load(Ordering::Acquire); + } + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + loop { + // Load the tail index, then load the head index. + let mut tail = self.tail.index.load(Ordering::SeqCst); + let mut head = self.head.index.load(Ordering::SeqCst); + + // If the tail index didn't change, we've got consistent indices to work with. + if self.tail.index.load(Ordering::SeqCst) == tail { + // Erase the lower bits. + tail &= !((1 << SHIFT) - 1); + head &= !((1 << SHIFT) - 1); + + // Fix up indices if they fall onto block ends. + if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { + tail = tail.wrapping_add(1 << SHIFT); + } + if (head >> SHIFT) & (LAP - 1) == LAP - 1 { + head = head.wrapping_add(1 << SHIFT); + } + + // Rotate indices so that head falls into the first block. + let lap = (head >> SHIFT) / LAP; + tail = tail.wrapping_sub((lap * LAP) << SHIFT); + head = head.wrapping_sub((lap * LAP) << SHIFT); + + // Remove the lower bits. + tail >>= SHIFT; + head >>= SHIFT; + + // Return the difference minus the number of blocks between tail and head. + return tail - head - tail / LAP; + } + } + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + let head = self.head.index.load(Ordering::SeqCst); + let tail = self.tail.index.load(Ordering::SeqCst); + head >> SHIFT == tail >> SHIFT + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + false + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst); + tail & MARK_BIT == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0 + } +} + +impl Drop for Unbounded { + fn drop(&mut self) { + let Self { head, tail } = self; + let Position { index: head, block } = &mut **head; + + head.with_mut(|&mut mut head| { + tail.index.with_mut(|&mut mut tail| { + // Erase the lower bits. + head &= !((1 << SHIFT) - 1); + tail &= !((1 << SHIFT) - 1); + + unsafe { + // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. + while head != tail { + let offset = (head >> SHIFT) % LAP; + + if offset < BLOCK_CAP { + // Drop the value in the slot. + block.with_mut(|block| { + let slot = (**block).slots.get_unchecked(offset); + slot.value.with_mut(|slot| { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + }); + } else { + // Deallocate the block and move to the next one. + block.with_mut(|block| { + let next_block = (**block).next.with_mut(|next| *next); + drop(Box::from_raw(*block)); + *block = next_block; + }); + } + + head = head.wrapping_add(1 << SHIFT); + } + + // Deallocate the last remaining block. + block.with_mut(|block| { + if !block.is_null() { + drop(Box::from_raw(*block)); + } + }); + } + }); + }); + } +} diff --git a/external/vendor/concurrent-queue/tests/bounded.rs b/external/vendor/concurrent-queue/tests/bounded.rs new file mode 100644 index 0000000000..6f402b7f8b --- /dev/null +++ b/external/vendor/concurrent-queue/tests/bounded.rs @@ -0,0 +1,371 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::bounded(2); + + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn capacity() { + for i in 1..10 { + let q = ConcurrentQueue::::bounded(i); + assert_eq!(q.capacity(), Some(i)); + } +} + +#[test] +#[should_panic(expected = "capacity must be positive")] +fn zero_capacity() { + let _ = ConcurrentQueue::::bounded(0); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::bounded(2); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 2); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), true); + + q.pop().unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), false); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn len() { + const COUNT: usize = if cfg!(miri) { 50 } else { 25_000 }; + const CAP: usize = if cfg!(miri) { 50 } else { 1000 }; + + let q = ConcurrentQueue::bounded(CAP); + assert_eq!(q.len(), 0); + + for _ in 0..CAP / 10 { + for i in 0..50 { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for i in 0..50 { + q.pop().unwrap(); + assert_eq!(q.len(), 50 - i - 1); + } + } + assert_eq!(q.len(), 0); + + for i in 0..CAP { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for _ in 0..CAP { + q.pop().unwrap(); + } + assert_eq!(q.len(), 0); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + let len = q.len(); + assert!(len <= CAP); + } + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + let len = q.len(); + assert!(len <= CAP); + } + }) + .run(); + + assert_eq!(q.len(), 0); +} + +#[test] +fn close() { + let q = ConcurrentQueue::bounded(2); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[test] +fn force_push() { + let q = ConcurrentQueue::::bounded(5); + + for i in 1..=5 { + assert_eq!(q.force_push(i), Ok(None)); + } + + assert!(!q.is_closed()); + for i in 6..=10 { + assert_eq!(q.force_push(i), Ok(Some(i - 5))); + } + assert_eq!(q.pop(), Ok(6)); + assert_eq!(q.force_push(11), Ok(None)); + for i in 12..=15 { + assert_eq!(q.force_push(i), Ok(Some(i - 5))); + } + + assert!(q.close()); + assert_eq!(q.force_push(40), Err(ForcePushError(40))); + for i in 11..=15 { + assert_eq!(q.pop(), Ok(i)); + } + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::bounded(3); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 10 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(..STEPS); + let additional = fastrand::usize(..50); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::bounded(50); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + while q.push(DropCounter).is_err() { + DROPS.fetch_sub(1, Ordering::SeqCst); + } + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn linearizable() { + const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::bounded(THREADS); + + Parallel::new() + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + while q.push(0).is_err() {} + q.pop().unwrap(); + } + }) + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + if q.force_push(0).unwrap().is_none() { + q.pop().unwrap(); + } + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 }; + + let t = AtomicUsize::new(1); + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .add(|| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .add(|| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), 1); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let t = AtomicUsize::new(THREADS); + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} diff --git a/external/vendor/concurrent-queue/tests/loom.rs b/external/vendor/concurrent-queue/tests/loom.rs new file mode 100644 index 0000000000..77f99d4945 --- /dev/null +++ b/external/vendor/concurrent-queue/tests/loom.rs @@ -0,0 +1,307 @@ +#![cfg(loom)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; +use loom::sync::atomic::{AtomicUsize, Ordering}; +use loom::sync::{Arc, Condvar, Mutex}; +use loom::thread; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +/// A basic MPMC channel based on a ConcurrentQueue and loom primitives. +struct Channel { + /// The queue used to contain items. + queue: ConcurrentQueue, + + /// The number of senders. + senders: AtomicUsize, + + /// The number of receivers. + receivers: AtomicUsize, + + /// The event that is signaled when a new item is pushed. + push_event: Event, + + /// The event that is signaled when a new item is popped. + pop_event: Event, +} + +/// The sending side of a channel. +struct Sender { + /// The channel. + channel: Arc>, +} + +/// The receiving side of a channel. +struct Receiver { + /// The channel. + channel: Arc>, +} + +/// Create a new pair of senders/receivers based on a queue. +fn pair(queue: ConcurrentQueue) -> (Sender, Receiver) { + let channel = Arc::new(Channel { + queue, + senders: AtomicUsize::new(1), + receivers: AtomicUsize::new(1), + push_event: Event::new(), + pop_event: Event::new(), + }); + + ( + Sender { + channel: channel.clone(), + }, + Receiver { channel }, + ) +} + +impl Clone for Sender { + fn clone(&self) -> Self { + self.channel.senders.fetch_add(1, Ordering::SeqCst); + Sender { + channel: self.channel.clone(), + } + } +} + +impl Drop for Sender { + fn drop(&mut self) { + if self.channel.senders.fetch_sub(1, Ordering::SeqCst) == 1 { + // Close the channel and notify the receivers. + self.channel.queue.close(); + self.channel.push_event.signal_all(); + } + } +} + +impl Clone for Receiver { + fn clone(&self) -> Self { + self.channel.receivers.fetch_add(1, Ordering::SeqCst); + Receiver { + channel: self.channel.clone(), + } + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + if self.channel.receivers.fetch_sub(1, Ordering::SeqCst) == 1 { + // Close the channel and notify the senders. + self.channel.queue.close(); + self.channel.pop_event.signal_all(); + } + } +} + +impl Sender { + /// Send a value. + /// + /// Returns an error with the value if the channel is closed. + fn send(&self, mut value: T) -> Result<(), T> { + loop { + match self.channel.queue.push(value) { + Ok(()) => { + // Notify a single receiver. + self.channel.push_event.signal(); + return Ok(()); + } + Err(PushError::Closed(val)) => return Err(val), + Err(PushError::Full(val)) => { + // Wait for a receiver to pop an item. + value = val; + self.channel.pop_event.wait(); + } + } + } + } + + /// Send a value forcefully. + fn force_send(&self, value: T) -> Result, T> { + match self.channel.queue.force_push(value) { + Ok(bumped) => { + self.channel.push_event.signal(); + Ok(bumped) + } + + Err(ForcePushError(val)) => Err(val), + } + } +} + +impl Receiver { + /// Channel capacity. + fn capacity(&self) -> Option { + self.channel.queue.capacity() + } + + /// Receive a value. + /// + /// Returns an error if the channel is closed. + fn recv(&self) -> Result { + loop { + match self.channel.queue.pop() { + Ok(value) => { + // Notify a single sender. + self.channel.pop_event.signal(); + return Ok(value); + } + Err(PopError::Closed) => return Err(()), + Err(PopError::Empty) => { + // Wait for a sender to push an item. + self.channel.push_event.wait(); + } + } + } + } +} + +/// An event that can be waited on and then signaled. +struct Event { + /// The condition variable used to wait on the event. + condvar: Condvar, + + /// The mutex used to protect the event. + /// + /// Inside is the event's state. The first bit is used to indicate if the + /// notify_one method was called. The second bit is used to indicate if the + /// notify_all method was called. + mutex: Mutex, +} + +impl Event { + /// Create a new event. + fn new() -> Self { + Self { + condvar: Condvar::new(), + mutex: Mutex::new(0), + } + } + + /// Wait for the event to be signaled. + fn wait(&self) { + let mut state = self.mutex.lock().unwrap(); + + loop { + if *state & 0b11 != 0 { + // The event was signaled. + *state &= !0b01; + return; + } + + // Wait for the event to be signaled. + state = self.condvar.wait(state).unwrap(); + } + } + + /// Signal the event. + fn signal(&self) { + let mut state = self.mutex.lock().unwrap(); + *state |= 1; + drop(state); + + self.condvar.notify_one(); + } + + /// Signal the event, but notify all waiters. + fn signal_all(&self) { + let mut state = self.mutex.lock().unwrap(); + *state |= 3; + drop(state); + + self.condvar.notify_all(); + } +} + +/// Wrapper to run tests on all three queues. +fn run_test, usize) + Send + Sync + Clone + 'static>(f: F) { + // The length of a loom test seems to increase exponentially the higher this number is. + const LIMIT: usize = 4; + + let fc = f.clone(); + loom::model(move || { + fc(ConcurrentQueue::bounded(1), LIMIT); + }); + + let fc = f.clone(); + loom::model(move || { + fc(ConcurrentQueue::bounded(LIMIT / 2), LIMIT); + }); + + loom::model(move || { + f(ConcurrentQueue::unbounded(), LIMIT); + }); +} + +#[test] +fn spsc() { + run_test(|q, limit| { + // Create a new pair of senders/receivers. + let (tx, rx) = pair(q); + + // Push each onto a thread and run them. + let handle = thread::spawn(move || { + for i in 0..limit { + if tx.send(i).is_err() { + break; + } + } + }); + + let mut recv_values = vec![]; + + loop { + match rx.recv() { + Ok(value) => recv_values.push(value), + Err(()) => break, + } + } + + // Values may not be in order. + recv_values.sort_unstable(); + assert_eq!(recv_values, (0..limit).collect::>()); + + // Join the handle before we exit. + handle.join().unwrap(); + }); +} + +#[test] +fn spsc_force() { + run_test(|q, limit| { + // Create a new pair of senders/receivers. + let (tx, rx) = pair(q); + + // Push each onto a thread and run them. + let handle = thread::spawn(move || { + for i in 0..limit { + if tx.force_send(i).is_err() { + break; + } + } + }); + + let mut recv_values = vec![]; + + loop { + match rx.recv() { + Ok(value) => recv_values.push(value), + Err(()) => break, + } + } + + // Values may not be in order. + recv_values.sort_unstable(); + let cap = rx.capacity().unwrap_or(usize::MAX); + for (left, right) in (0..limit) + .rev() + .take(cap) + .zip(recv_values.into_iter().rev()) + { + assert_eq!(left, right); + } + + // Join the handle before we exit. + handle.join().unwrap(); + }); +} diff --git a/external/vendor/concurrent-queue/tests/single.rs b/external/vendor/concurrent-queue/tests/single.rs new file mode 100644 index 0000000000..ec4b912c94 --- /dev/null +++ b/external/vendor/concurrent-queue/tests/single.rs @@ -0,0 +1,289 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::bounded(1); + + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn capacity() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.capacity(), Some(1)); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::bounded(1); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), true); + + q.pop().unwrap(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); +} + +#[test] +fn close() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[test] +fn force_push() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.force_push(10), Ok(None)); + + assert!(!q.is_closed()); + assert_eq!(q.force_push(20), Ok(Some(10))); + assert_eq!(q.force_push(30), Ok(Some(20))); + + assert!(q.close()); + assert_eq!(q.force_push(40), Err(ForcePushError(40))); + assert_eq!(q.pop(), Ok(30)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 1; + + let q = ConcurrentQueue::::bounded(THREADS); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 20 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(..STEPS); + let additional = fastrand::usize(0..=1); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + while q.push(DropCounter).is_err() { + DROPS.fetch_sub(1, Ordering::SeqCst); + } + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn linearizable() { + const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + while q.push(0).is_err() {} + q.pop().unwrap(); + } + }) + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + if q.force_push(0).unwrap().is_none() { + q.pop().unwrap(); + } + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 }; + + let t = AtomicUsize::new(1); + let q = ConcurrentQueue::::bounded(1); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .add(|| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .add(|| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), 1); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let t = AtomicUsize::new(THREADS); + let q = ConcurrentQueue::::bounded(1); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} diff --git a/external/vendor/concurrent-queue/tests/unbounded.rs b/external/vendor/concurrent-queue/tests/unbounded.rs new file mode 100644 index 0000000000..e95dc8c725 --- /dev/null +++ b/external/vendor/concurrent-queue/tests/unbounded.rs @@ -0,0 +1,181 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::unbounded(); + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::unbounded(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + + q.pop().unwrap(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); +} + +#[test] +fn len() { + let q = ConcurrentQueue::unbounded(); + + assert_eq!(q.len(), 0); + + for i in 0..50 { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for i in 0..50 { + q.pop().unwrap(); + assert_eq!(q.len(), 50 - i - 1); + } + + assert_eq!(q.len(), 0); +} + +#[test] +fn close() { + let q = ConcurrentQueue::unbounded(); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::unbounded(); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + q.push(i).unwrap(); + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::::unbounded(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + q.push(i).unwrap(); + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 20 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(0..STEPS); + let additional = fastrand::usize(0..1000); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::unbounded(); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + q.push(DropCounter).unwrap(); + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} diff --git a/external/vendor/crossbeam-utils/.cargo-checksum.json b/external/vendor/crossbeam-utils/.cargo-checksum.json new file mode 100644 index 0000000000..32c3420a24 --- /dev/null +++ b/external/vendor/crossbeam-utils/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"d43f49dbbe655cb91dcd62c862552f2ca4520eaff2e4bee391e01b0df968e358","CHANGELOG.md":"366caba01b88f421c71b97f61b9806abbf05e1ba0d24e4bf034191c1f8aa03b8","Cargo.toml":"961aa297754d8fdbae9e23d15bbbcfdcd2b50b2db56becddb84e8cba4f730713","Cargo.toml.orig":"6697cafee3a273f8c04e25c8606621b2fff8a779e3e1d01f9c08d225a0f36fc5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"3c82bbb994f54ab76a9ed30a42dfd095c6e636258d379b9be3fbf66324310e71","benches/atomic_cell.rs":"c927eb3cd1e5ecc4b91adbc3bde98af15ffab4086190792ba64d5cde0e24df3d","build-common.rs":"502cb7494549bed6fa10ac7bea36e880eeb60290dc69b679ac5c92b376469562","build.rs":"7a7f9e56ea7fb4f78c4e532b84b9d27be719d600e85eaeb3a2f4b79a4f0b419c","no_atomic.rs":"fc1baa4489d9842988bacaaa545a7d7d0e2f8b93cfa0b7d1ae31f21256e4cb0a","src/atomic/atomic_cell.rs":"6d8b83b65c73644abc10ec88a1442c8db531ae140de79197901510fcaea45966","src/atomic/consume.rs":"381c2a8b13312ca0525d53ca1b7d0d4f525ddb154951fa3e216b061ad22012ff","src/atomic/mod.rs":"712e2337e710c07116e977154ea4247a1c065bf5599e6bf368138e715b403f6d","src/atomic/seq_lock.rs":"27182e6b87a9db73c5f6831759f8625f9fcdec3c2828204c444aef04f427735a","src/atomic/seq_lock_wide.rs":"9888dd03116bb89ca36d4ab8d5a0b5032107a2983a7eb8024454263b09080088","src/backoff.rs":"8715f0303ec91d1847c8ac3fc24bcc002a22a7284ade610e5eff4181f85827c7","src/cache_padded.rs":"b6ff04ecf6de9124c0069c014d35f37de543cff1c4bfc1f260586aa49a5af6d8","src/lib.rs":"060dabc6dc07de92a7afa57dcbc47222a95ef5819d543ad854858c3b329d6637","src/sync/mod.rs":"eca73c04f821859b8434d2b93db87d160dc6a3f65498ca201cd40d732ca4c134","src/sync/once_lock.rs":"aa8f957604d1119c4fc7038a18c14a6281230e81005f31201c099acff284ad4b","src/sync/parker.rs":"698996e7530da1f3815df11c89df7d916155229cbfd022cccbd555f1d1d31985","src/sync/sharded_lock.rs":"f96d536f5622fe2a0a0f7d8117be31e4b1ed607544c52c7e2ffcd1f51a6b93a1","src/sync/wait_group.rs":"3e339aab014f50e214fea535c841755113ea058153378ed54e50a4acb403c937","src/thread.rs":"04610787ba88f1f59549874a13fc037f2dcf4d8b5f1daaf08378f05c2b3c0039","tests/atomic_cell.rs":"716c864d4e103039dc5cd8bf6110da4cbabafc7e4e03819aa197828e8fb0a9c7","tests/cache_padded.rs":"1bfaff8354c8184e1ee1f902881ca9400b60effb273b0d3f752801a483d2b66d","tests/parker.rs":"6def4721287d9d70b1cfd63ebb34e1c83fbb3376edbad2bc8aac6ef69dd99d20","tests/sharded_lock.rs":"314adeb8a651a28935f7a49c9a261b8fa1fd82bf6a16c865a5aced6216d7e40b","tests/thread.rs":"9a7d7d3028c552fd834c68598b04a1cc252a816bc20ab62cec060d6cd09cab10","tests/wait_group.rs":"2a41533a5f7f113d19cd2bdafcc2abf86509109652274156efdd74abd00896b6"},"package":"d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"} \ No newline at end of file diff --git a/external/vendor/crossbeam-utils/.cargo_vcs_info.json b/external/vendor/crossbeam-utils/.cargo_vcs_info.json new file mode 100644 index 0000000000..87108da095 --- /dev/null +++ b/external/vendor/crossbeam-utils/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "ccd83ac4108a2a1b41e9c6e79c87267167d18dfa" + }, + "path_in_vcs": "crossbeam-utils" +} \ No newline at end of file diff --git a/external/vendor/crossbeam-utils/CHANGELOG.md b/external/vendor/crossbeam-utils/CHANGELOG.md new file mode 100644 index 0000000000..5aa1967e71 --- /dev/null +++ b/external/vendor/crossbeam-utils/CHANGELOG.md @@ -0,0 +1,243 @@ +# Version 0.8.21 + +- Improve implementation of `CachePadded`. (#1152) + +# Version 0.8.20 + +- Implement `Display` for `CachePadded`. (#1097) + +# Version 0.8.19 + +- Remove dependency on `cfg-if`. (#1072) + +# Version 0.8.18 + +- Relax the minimum supported Rust version to 1.60. (#1056) +- Improve scalability of `AtomicCell` fallback. (#1055) + +# Version 0.8.17 + +- Bump the minimum supported Rust version to 1.61. (#1037) +- Improve support for targets without atomic CAS or 64-bit atomic. (#1037) +- Always implement `UnwindSafe` and `RefUnwindSafe` for `AtomicCell`. (#1045) +- Improve compatibility with Miri, TSan, and loom. (#995, #1003) +- Improve compatibility with unstable `oom=panic`. (#1045) +- Improve implementation of `CachePadded`. (#1014, #1025) +- Update `loom` dependency to 0.7. + +# Version 0.8.16 + +- Improve implementation of `CachePadded`. (#967) + +# Version 0.8.15 + +- Add `#[clippy::has_significant_drop]` to `ShardedLock{Read,Write}Guard`. (#958) +- Improve handling of very large timeout. (#953) +- Soft-deprecate `thread::scope()` in favor of the more efficient `std::thread::scope` that stabilized in Rust 1.63. (#954) + +# Version 0.8.14 + +- Fix build script bug introduced in 0.8.13. (#932) + +# Version 0.8.13 + +**Note:** This release has been yanked due to regression fixed in 0.8.14. + +- Improve support for custom targets. (#922) + +# Version 0.8.12 + +- Removes the dependency on the `once_cell` crate to restore the MSRV. (#913) +- Work around [rust-lang#98302](https://github.com/rust-lang/rust/issues/98302), which causes compile error on windows-gnu when LTO is enabled. (#913) + +# Version 0.8.11 + +- Bump the minimum supported Rust version to 1.38. (#877) + +# Version 0.8.10 + +- Fix unsoundness of `AtomicCell` on types containing niches. (#834) + This fix contains breaking changes, but they are allowed because this is a soundness bug fix. See #834 for more. + +# Version 0.8.9 + +- Replace lazy_static with once_cell. (#817) + +# Version 0.8.8 + +- Fix a bug when unstable `loom` support is enabled. (#787) + +# Version 0.8.7 + +- Add `AtomicCell<{i*,u*}>::{fetch_max,fetch_min}`. (#785) +- Add `AtomicCell<{i*,u*,bool}>::fetch_nand`. (#785) +- Fix unsoundness of `AtomicCell<{i,u}64>` arithmetics on 32-bit targets that support `Atomic{I,U}64` (#781) + +# Version 0.8.6 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Re-add `AtomicCell<{i,u}64>::{fetch_add,fetch_sub,fetch_and,fetch_or,fetch_xor}` that were accidentally removed in 0.8.0 on targets that do not support `Atomic{I,U}64`. (#767) +- Re-add `AtomicCell<{i,u}128>::{fetch_add,fetch_sub,fetch_and,fetch_or,fetch_xor}` that were accidentally removed in 0.8.0. (#767) + +# Version 0.8.5 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Add `AtomicCell::fetch_update`. (#704) +- Support targets that do not have atomic CAS on stable Rust. (#698) + +# Version 0.8.4 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Bump `loom` dependency to version 0.5. (#686) + +# Version 0.8.3 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Make `loom` dependency optional. (#666) + +# Version 0.8.2 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Deprecate `AtomicCell::compare_and_swap`. Use `AtomicCell::compare_exchange` instead. (#619) +- Add `Parker::park_deadline`. (#563) +- Improve implementation of `CachePadded`. (#636) +- Add unstable support for `loom`. (#487) + +# Version 0.8.1 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Make `AtomicCell::is_lock_free` always const fn. (#600) +- Fix a bug in `seq_lock_wide`. (#596) +- Remove `const_fn` dependency. (#600) +- `crossbeam-utils` no longer fails to compile if unable to determine rustc version. Instead, it now displays a warning. (#604) + +# Version 0.8.0 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Bump the minimum supported Rust version to 1.36. +- Remove deprecated `AtomicCell::get_mut()` and `Backoff::is_complete()` methods. +- Remove `alloc` feature. +- Make `CachePadded::new()` const function. +- Make `AtomicCell::is_lock_free()` const function at 1.46+. +- Implement `From` for `AtomicCell`. + +# Version 0.7.2 + +- Fix bug in release (yanking 0.7.1) + +# Version 0.7.1 + +- Bump `autocfg` dependency to version 1.0. (#460) +- Make `AtomicCell` lockfree for u8, u16, u32, u64 sized values at 1.34+. (#454) + +# Version 0.7.0 + +- Bump the minimum required version to 1.28. +- Fix breakage with nightly feature due to rust-lang/rust#65214. +- Apply `#[repr(transparent)]` to `AtomicCell`. +- Make `AtomicCell::new()` const function at 1.31+. + +# Version 0.6.6 + +- Add `UnwindSafe` and `RefUnwindSafe` impls for `AtomicCell`. +- Add `AtomicCell::as_ptr()`. +- Add `AtomicCell::take()`. +- Fix a bug in `AtomicCell::compare_exchange()` and `AtomicCell::compare_and_swap()`. +- Various documentation improvements. + +# Version 0.6.5 + +- Rename `Backoff::is_complete()` to `Backoff::is_completed()`. + +# Version 0.6.4 + +- Add `WaitGroup`, `ShardedLock`, and `Backoff`. +- Add `fetch_*` methods for `AtomicCell` and `AtomicCell`. +- Expand documentation. + +# Version 0.6.3 + +- Add `AtomicCell`. +- Improve documentation. + +# Version 0.6.2 + +- Add `Parker`. +- Improve documentation. + +# Version 0.6.1 + +- Fix a soundness bug in `Scope::spawn()`. +- Remove the `T: 'scope` bound on `ScopedJoinHandle`. + +# Version 0.6.0 + +- Move `AtomicConsume` to `atomic` module. +- `scope()` returns a `Result` of thread joins. +- Remove `spawn_unchecked`. +- Fix a soundness bug due to incorrect lifetimes. +- Improve documentation. +- Support nested scoped spawns. +- Implement `Copy`, `Hash`, `PartialEq`, and `Eq` for `CachePadded`. +- Add `CachePadded::into_inner()`. + +# Version 0.5.0 + +- Reorganize sub-modules and rename functions. + +# Version 0.4.1 + +- Fix a documentation link. + +# Version 0.4.0 + +- `CachePadded` supports types bigger than 64 bytes. +- Fix a bug in scoped threads where unitialized memory was being dropped. +- Minimum required Rust version is now 1.25. + +# Version 0.3.2 + +- Mark `load_consume` with `#[inline]`. + +# Version 0.3.1 + +- `load_consume` on ARM and AArch64. + +# Version 0.3.0 + +- Add `join` for scoped thread API. +- Add `load_consume` for atomic load-consume memory ordering. +- Remove `AtomicOption`. + +# Version 0.2.2 + +- Support Rust 1.12.1. +- Call `T::clone` when cloning a `CachePadded`. + +# Version 0.2.1 + +- Add `use_std` feature. + +# Version 0.2.0 + +- Add `nightly` feature. +- Use `repr(align(64))` on `CachePadded` with the `nightly` feature. +- Implement `Drop` for `CachePadded`. +- Implement `Clone` for `CachePadded`. +- Implement `From` for `CachePadded`. +- Implement better `Debug` for `CachePadded`. +- Write more tests. +- Add this changelog. +- Change cache line length to 64 bytes. +- Remove `ZerosValid`. + +# Version 0.1.0 + +- Old implementation of `CachePadded` from `crossbeam` version 0.3.0 diff --git a/external/vendor/crossbeam-utils/Cargo.toml b/external/vendor/crossbeam-utils/Cargo.toml new file mode 100644 index 0000000000..c93dc2f275 --- /dev/null +++ b/external/vendor/crossbeam-utils/Cargo.toml @@ -0,0 +1,101 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "crossbeam-utils" +version = "0.8.21" +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Utilities for concurrent programming" +homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" +readme = "README.md" +keywords = [ + "scoped", + "thread", + "atomic", + "cache", +] +categories = [ + "algorithms", + "concurrency", + "data-structures", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam" + +[lib] +name = "crossbeam_utils" +path = "src/lib.rs" + +[[test]] +name = "atomic_cell" +path = "tests/atomic_cell.rs" + +[[test]] +name = "cache_padded" +path = "tests/cache_padded.rs" + +[[test]] +name = "parker" +path = "tests/parker.rs" + +[[test]] +name = "sharded_lock" +path = "tests/sharded_lock.rs" + +[[test]] +name = "thread" +path = "tests/thread.rs" + +[[test]] +name = "wait_group" +path = "tests/wait_group.rs" + +[[bench]] +name = "atomic_cell" +path = "benches/atomic_cell.rs" + +[dependencies] + +[dev-dependencies.rand] +version = "0.8" + +[features] +default = ["std"] +nightly = [] +std = [] + +[target."cfg(crossbeam_loom)".dependencies.loom] +version = "0.7.1" +optional = true + +[lints.clippy.declare_interior_mutable_const] +level = "allow" +priority = 1 + +[lints.clippy.lint_groups_priority] +level = "allow" +priority = 1 + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = [ + "cfg(crossbeam_loom)", + "cfg(crossbeam_sanitize)", +] diff --git a/external/vendor/crossbeam-utils/Cargo.toml.orig b/external/vendor/crossbeam-utils/Cargo.toml.orig new file mode 100644 index 0000000000..3a95baea25 --- /dev/null +++ b/external/vendor/crossbeam-utils/Cargo.toml.orig @@ -0,0 +1,46 @@ +[package] +name = "crossbeam-utils" +# When publishing a new version: +# - Update CHANGELOG.md +# - Update README.md (when increasing major or minor version) +# - Run './tools/publish.sh crossbeam-utils ' +version = "0.8.21" +edition = "2021" +rust-version = "1.60" +license = "MIT OR Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam" +homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" +description = "Utilities for concurrent programming" +keywords = ["scoped", "thread", "atomic", "cache"] +categories = ["algorithms", "concurrency", "data-structures", "no-std"] + +[features] +default = ["std"] + +# Enable to use APIs that require `std`. +# This is enabled by default. +std = [] + +# These features are no longer used. +# TODO: remove in the next major version. +# Enable to use of unstable functionality. +# This is disabled by default and requires recent nightly compiler. +# +# NOTE: This feature is outside of the normal semver guarantees and minor or +# patch versions of crossbeam may make breaking changes to them at any time. +nightly = [] + +[dependencies] + +# Enable the use of loom for concurrency testing. +# +# NOTE: This feature is outside of the normal semver guarantees and minor or +# patch versions of crossbeam may make breaking changes to them at any time. +[target.'cfg(crossbeam_loom)'.dependencies] +loom = { version = "0.7.1", optional = true } + +[dev-dependencies] +rand = "0.8" + +[lints] +workspace = true diff --git a/external/vendor/crossbeam-utils/LICENSE-APACHE b/external/vendor/crossbeam-utils/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/crossbeam-utils/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/crossbeam-utils/LICENSE-MIT b/external/vendor/crossbeam-utils/LICENSE-MIT new file mode 100644 index 0000000000..068d491fd5 --- /dev/null +++ b/external/vendor/crossbeam-utils/LICENSE-MIT @@ -0,0 +1,27 @@ +The MIT License (MIT) + +Copyright (c) 2019 The Crossbeam Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/crossbeam-utils/README.md b/external/vendor/crossbeam-utils/README.md new file mode 100644 index 0000000000..7d6a679487 --- /dev/null +++ b/external/vendor/crossbeam-utils/README.md @@ -0,0 +1,73 @@ +# Crossbeam Utils + +[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( +https://github.com/crossbeam-rs/crossbeam/actions) +[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils#license) +[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)]( +https://crates.io/crates/crossbeam-utils) +[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)]( +https://docs.rs/crossbeam-utils) +[![Rust 1.60+](https://img.shields.io/badge/rust-1.60+-lightgray.svg)]( +https://www.rust-lang.org) +[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) + +This crate provides miscellaneous tools for concurrent programming: + +#### Atomics + +* [`AtomicCell`], a thread-safe mutable memory location.(no_std) +* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.(no_std) + +#### Thread synchronization + +* [`Parker`], a thread parking primitive. +* [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. +* [`WaitGroup`], for synchronizing the beginning or end of some computation. + +#### Utilities + +* [`Backoff`], for exponential backoff in spin loops.(no_std) +* [`CachePadded`], for padding and aligning a value to the length of a cache line.(no_std) +* [`scope`], for spawning threads that borrow local variables from the stack. + +*Features marked with (no_std) can be used in `no_std` environments.*
+ +[`AtomicCell`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/struct.AtomicCell.html +[`AtomicConsume`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/trait.AtomicConsume.html +[`Parker`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.Parker.html +[`ShardedLock`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.ShardedLock.html +[`WaitGroup`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.WaitGroup.html +[`Backoff`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.Backoff.html +[`CachePadded`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.CachePadded.html +[`scope`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/thread/fn.scope.html + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-utils = "0.8" +``` + +## Compatibility + +Crossbeam Utils supports stable Rust releases going back at least six months, +and every time the minimum supported Rust version is increased, a new minor +version is released. Currently, the minimum supported Rust version is 1.60. + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/crossbeam-utils/benches/atomic_cell.rs b/external/vendor/crossbeam-utils/benches/atomic_cell.rs new file mode 100644 index 0000000000..844f7c02b6 --- /dev/null +++ b/external/vendor/crossbeam-utils/benches/atomic_cell.rs @@ -0,0 +1,156 @@ +#![feature(test)] + +extern crate test; + +use std::sync::Barrier; + +use crossbeam_utils::atomic::AtomicCell; +use crossbeam_utils::thread; + +#[bench] +fn load_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + let mut sum = 0; + b.iter(|| sum += a.load()); + test::black_box(sum); +} + +#[bench] +fn store_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + b.iter(|| a.store(1)); +} + +#[bench] +fn fetch_add_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + b.iter(|| a.fetch_add(1)); +} + +#[bench] +fn compare_exchange_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + let mut i = 0; + b.iter(|| { + let _ = a.compare_exchange(i, i.wrapping_add(1)); + i = i.wrapping_add(1); + }); +} + +#[bench] +fn concurrent_load_u8(b: &mut test::Bencher) { + const THREADS: usize = 2; + const STEPS: usize = 1_000_000; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + let exit = AtomicCell::new(false); + + let a = AtomicCell::new(0u8); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; + } + }); + } + + start.wait(); + end.wait(); + + b.iter(|| { + start.wait(); + end.wait(); + }); + + start.wait(); + exit.store(true); + end.wait(); + }) + .unwrap(); +} + +#[bench] +fn load_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + let mut sum = 0; + b.iter(|| sum += a.load()); + test::black_box(sum); +} + +#[bench] +fn store_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + b.iter(|| a.store(1)); +} + +#[bench] +fn fetch_add_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + b.iter(|| a.fetch_add(1)); +} + +#[bench] +fn compare_exchange_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + let mut i = 0; + b.iter(|| { + let _ = a.compare_exchange(i, i.wrapping_add(1)); + i = i.wrapping_add(1); + }); +} + +#[bench] +fn concurrent_load_usize(b: &mut test::Bencher) { + const THREADS: usize = 2; + const STEPS: usize = 1_000_000; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + let exit = AtomicCell::new(false); + + let a = AtomicCell::new(0usize); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; + } + }); + } + + start.wait(); + end.wait(); + + b.iter(|| { + start.wait(); + end.wait(); + }); + + start.wait(); + exit.store(true); + end.wait(); + }) + .unwrap(); +} diff --git a/external/vendor/crossbeam-utils/build-common.rs b/external/vendor/crossbeam-utils/build-common.rs new file mode 100644 index 0000000000..e91bb4d471 --- /dev/null +++ b/external/vendor/crossbeam-utils/build-common.rs @@ -0,0 +1,13 @@ +// The target triplets have the form of 'arch-vendor-system'. +// +// When building for Linux (e.g. the 'system' part is +// 'linux-something'), replace the vendor with 'unknown' +// so that mapping to rust standard targets happens correctly. +fn convert_custom_linux_target(target: String) -> String { + let mut parts: Vec<&str> = target.split('-').collect(); + let system = parts.get(2); + if system == Some(&"linux") { + parts[1] = "unknown"; + }; + parts.join("-") +} diff --git a/external/vendor/crossbeam-utils/build.rs b/external/vendor/crossbeam-utils/build.rs new file mode 100644 index 0000000000..ff7e81f949 --- /dev/null +++ b/external/vendor/crossbeam-utils/build.rs @@ -0,0 +1,48 @@ +// The rustc-cfg listed below are considered public API, but it is *unstable* +// and outside of the normal semver guarantees: +// +// - `crossbeam_no_atomic` +// Assume the target does *not* support any atomic operations. +// This is usually detected automatically by the build script, but you may +// need to enable it manually when building for custom targets or using +// non-cargo build systems that don't run the build script. +// +// With the exceptions mentioned above, the rustc-cfg emitted by the build +// script are *not* public API. + +#![warn(rust_2018_idioms)] + +use std::env; + +include!("no_atomic.rs"); +include!("build-common.rs"); + +fn main() { + println!("cargo:rerun-if-changed=no_atomic.rs"); + println!("cargo:rustc-check-cfg=cfg(crossbeam_no_atomic,crossbeam_sanitize_thread)"); + + let target = match env::var("TARGET") { + Ok(target) => convert_custom_linux_target(target), + Err(e) => { + println!( + "cargo:warning={}: unable to get TARGET environment variable: {}", + env!("CARGO_PKG_NAME"), + e + ); + return; + } + }; + + // Note that this is `no_`*, not `has_*`. This allows treating as the latest + // stable rustc is used when the build script doesn't run. This is useful + // for non-cargo build systems that don't run the build script. + if NO_ATOMIC.contains(&&*target) { + println!("cargo:rustc-cfg=crossbeam_no_atomic"); + } + + // `cfg(sanitize = "..")` is not stabilized. + let sanitize = env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); + if sanitize.contains("thread") { + println!("cargo:rustc-cfg=crossbeam_sanitize_thread"); + } +} diff --git a/external/vendor/crossbeam-utils/no_atomic.rs b/external/vendor/crossbeam-utils/no_atomic.rs new file mode 100644 index 0000000000..f7e6d2fa42 --- /dev/null +++ b/external/vendor/crossbeam-utils/no_atomic.rs @@ -0,0 +1,9 @@ +// This file is @generated by no_atomic.sh. +// It is not intended for manual editing. + +const NO_ATOMIC: &[&str] = &[ + "bpfeb-unknown-none", + "bpfel-unknown-none", + "mipsel-sony-psx", + "msp430-none-elf", +]; diff --git a/external/vendor/crossbeam-utils/src/atomic/atomic_cell.rs b/external/vendor/crossbeam-utils/src/atomic/atomic_cell.rs new file mode 100644 index 0000000000..47472534c8 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/atomic_cell.rs @@ -0,0 +1,1182 @@ +// Necessary for implementing atomic methods for `AtomicUnit` +#![allow(clippy::unit_arg)] + +use crate::primitive::sync::atomic::{self, Ordering}; +use crate::CachePadded; +use core::cell::UnsafeCell; +use core::cmp; +use core::fmt; +use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::panic::{RefUnwindSafe, UnwindSafe}; +use core::ptr; + +use super::seq_lock::SeqLock; + +/// A thread-safe mutable memory location. +/// +/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads. +/// +/// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using +/// global locks otherwise. You can call [`AtomicCell::::is_lock_free()`] to check whether +/// atomic instructions or locks will be used. +/// +/// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering. +/// +/// [`Cell`]: std::cell::Cell +/// [`AtomicCell::::is_lock_free()`]: AtomicCell::is_lock_free +/// [`Acquire`]: std::sync::atomic::Ordering::Acquire +/// [`Release`]: std::sync::atomic::Ordering::Release +#[repr(transparent)] +pub struct AtomicCell { + /// The inner value. + /// + /// If this value can be transmuted into a primitive atomic type, it will be treated as such. + /// Otherwise, all potentially concurrent operations on this data will be protected by a global + /// lock. + /// + /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state: + /// + /// (This rustc bug has been fixed in Rust 1.64.) + /// + /// Note: + /// - we'll never store uninitialized `T` due to our API only using initialized `T`. + /// - this `MaybeUninit` does *not* fix . + value: UnsafeCell>, +} + +unsafe impl Send for AtomicCell {} +unsafe impl Sync for AtomicCell {} + +impl UnwindSafe for AtomicCell {} +impl RefUnwindSafe for AtomicCell {} + +impl AtomicCell { + /// Creates a new atomic cell initialized with `val`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// ``` + pub const fn new(val: T) -> AtomicCell { + AtomicCell { + value: UnsafeCell::new(MaybeUninit::new(val)), + } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// let v = a.into_inner(); + /// + /// assert_eq!(v, 7); + /// ``` + pub fn into_inner(self) -> T { + let this = ManuallyDrop::new(self); + // SAFETY: + // - passing `self` by value guarantees that no other threads are concurrently + // accessing the atomic data + // - the raw pointer passed in is valid because we got it from an owned value. + // - `ManuallyDrop` prevents double dropping `T` + unsafe { this.as_ptr().read() } + } + + /// Returns `true` if operations on values of this type are lock-free. + /// + /// If the compiler or the platform doesn't support the necessary atomic instructions, + /// `AtomicCell` will use global locks for every potentially concurrent atomic operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// // This type is internally represented as `AtomicUsize` so we can just use atomic + /// // operations provided by it. + /// assert_eq!(AtomicCell::::is_lock_free(), true); + /// + /// // A wrapper struct around `isize`. + /// struct Foo { + /// bar: isize, + /// } + /// // `AtomicCell` will be internally represented as `AtomicIsize`. + /// assert_eq!(AtomicCell::::is_lock_free(), true); + /// + /// // Operations on zero-sized types are always lock-free. + /// assert_eq!(AtomicCell::<()>::is_lock_free(), true); + /// + /// // Very large types cannot be represented as any of the standard atomic types, so atomic + /// // operations on them will have to use global locks for synchronization. + /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false); + /// ``` + pub const fn is_lock_free() -> bool { + atomic_is_lock_free::() + } + + /// Stores `val` into the atomic cell. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// + /// assert_eq!(a.load(), 7); + /// a.store(8); + /// assert_eq!(a.load(), 8); + /// ``` + pub fn store(&self, val: T) { + if mem::needs_drop::() { + drop(self.swap(val)); + } else { + unsafe { + atomic_store(self.as_ptr(), val); + } + } + } + + /// Stores `val` into the atomic cell and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// + /// assert_eq!(a.load(), 7); + /// assert_eq!(a.swap(8), 7); + /// assert_eq!(a.load(), 8); + /// ``` + pub fn swap(&self, val: T) -> T { + unsafe { atomic_swap(self.as_ptr(), val) } + } + + /// Returns a raw pointer to the underlying data in this atomic cell. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(5); + /// + /// let ptr = a.as_ptr(); + /// ``` + #[inline] + pub fn as_ptr(&self) -> *mut T { + self.value.get().cast::() + } +} + +impl AtomicCell { + /// Takes the value of the atomic cell, leaving `Default::default()` in its place. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(5); + /// let five = a.take(); + /// + /// assert_eq!(five, 5); + /// assert_eq!(a.into_inner(), 0); + /// ``` + pub fn take(&self) -> T { + self.swap(Default::default()) + } +} + +impl AtomicCell { + /// Loads a value from the atomic cell. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// + /// assert_eq!(a.load(), 7); + /// ``` + pub fn load(&self) -> T { + unsafe { atomic_load(self.as_ptr()) } + } +} + +impl AtomicCell { + /// If the current value equals `current`, stores `new` into the atomic cell. + /// + /// The return value is always the previous value. If it is equal to `current`, then the value + /// was updated. + /// + /// # Examples + /// + /// ``` + /// # #![allow(deprecated)] + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(1); + /// + /// assert_eq!(a.compare_and_swap(2, 3), 1); + /// assert_eq!(a.load(), 1); + /// + /// assert_eq!(a.compare_and_swap(1, 2), 1); + /// assert_eq!(a.load(), 2); + /// ``` + // TODO: remove in the next major version. + #[deprecated(note = "Use `compare_exchange` instead")] + pub fn compare_and_swap(&self, current: T, new: T) -> T { + match self.compare_exchange(current, new) { + Ok(v) => v, + Err(v) => v, + } + } + + /// If the current value equals `current`, stores `new` into the atomic cell. + /// + /// The return value is a result indicating whether the new value was written and containing + /// the previous value. On success this value is guaranteed to be equal to `current`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(1); + /// + /// assert_eq!(a.compare_exchange(2, 3), Err(1)); + /// assert_eq!(a.load(), 1); + /// + /// assert_eq!(a.compare_exchange(1, 2), Ok(1)); + /// assert_eq!(a.load(), 2); + /// ``` + pub fn compare_exchange(&self, current: T, new: T) -> Result { + unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) } + } + + /// Fetches the value, and applies a function to it that returns an optional + /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else + /// `Err(previous_value)`. + /// + /// Note: This may call the function multiple times if the value has been changed from other threads in + /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied + /// only once to the stored value. + /// + /// # Examples + /// + /// ```rust + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// assert_eq!(a.fetch_update(|_| None), Err(7)); + /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7)); + /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8)); + /// assert_eq!(a.load(), 9); + /// ``` + #[inline] + pub fn fetch_update(&self, mut f: F) -> Result + where + F: FnMut(T) -> Option, + { + let mut prev = self.load(); + while let Some(next) = f(prev) { + match self.compare_exchange(prev, next) { + x @ Ok(_) => return x, + Err(next_prev) => prev = next_prev, + } + } + Err(prev) + } +} + +// `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop` +// for `AtomicCell` to avoid leaks of non-`Copy` types. +impl Drop for AtomicCell { + fn drop(&mut self) { + if mem::needs_drop::() { + // SAFETY: + // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data + // - the raw pointer passed in is valid because we got it from a reference + // - `MaybeUninit` prevents double dropping `T` + unsafe { + self.as_ptr().drop_in_place(); + } + } + } +} + +macro_rules! atomic { + // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`, + // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop. + (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => { + if can_transmute::<$t, $atomic>() { + let $a: &$atomic; + break $atomic_op; + } + }; + + // If values of type `$t` can be transmuted into values of a primitive atomic type, declares + // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes + // `$fallback_op`. + ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => { + loop { + atomic!(@check, $t, AtomicUnit, $a, $atomic_op); + + atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op); + atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op); + atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op); + #[cfg(target_has_atomic = "64")] + atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op); + // TODO: AtomicU128 is unstable + // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op); + + break $fallback_op; + } + }; +} + +macro_rules! impl_arithmetic { + ($t:ty, fallback, $example:tt) => { + impl AtomicCell<$t> { + /// Increments the current value by `val` and returns the previous value. + /// + /// The addition wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_add(3), 7); + /// assert_eq!(a.load(), 10); + /// ``` + #[inline] + pub fn fetch_add(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_add(val); + old + } + + /// Decrements the current value by `val` and returns the previous value. + /// + /// The subtraction wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_sub(3), 7); + /// assert_eq!(a.load(), 4); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_sub(val); + old + } + + /// Applies bitwise "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_and(3), 7); + /// assert_eq!(a.load(), 3); + /// ``` + #[inline] + pub fn fetch_and(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value &= val; + old + } + + /// Applies bitwise "nand" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_nand(3), 7); + /// assert_eq!(a.load(), !(7 & 3)); + /// ``` + #[inline] + pub fn fetch_nand(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = !(old & val); + old + } + + /// Applies bitwise "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_or(16), 7); + /// assert_eq!(a.load(), 23); + /// ``` + #[inline] + pub fn fetch_or(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value |= val; + old + } + + /// Applies bitwise "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_xor(2), 7); + /// assert_eq!(a.load(), 5); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value ^= val; + old + } + + /// Compares and sets the maximum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_max(2), 7); + /// assert_eq!(a.load(), 7); + /// ``` + #[inline] + pub fn fetch_max(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::max(old, val); + old + } + + /// Compares and sets the minimum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_min(2), 7); + /// assert_eq!(a.load(), 2); + /// ``` + #[inline] + pub fn fetch_min(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::min(old, val); + old + } + } + }; + ($t:ty, $atomic:ident, $example:tt) => { + impl AtomicCell<$t> { + /// Increments the current value by `val` and returns the previous value. + /// + /// The addition wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_add(3), 7); + /// assert_eq!(a.load(), 10); + /// ``` + #[inline] + pub fn fetch_add(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_add(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_add(val); + old + } + } + } + + /// Decrements the current value by `val` and returns the previous value. + /// + /// The subtraction wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_sub(3), 7); + /// assert_eq!(a.load(), 4); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_sub(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_sub(val); + old + } + } + } + + /// Applies bitwise "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_and(3), 7); + /// assert_eq!(a.load(), 3); + /// ``` + #[inline] + pub fn fetch_and(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_and(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value &= val; + old + } + } + } + + /// Applies bitwise "nand" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_nand(3), 7); + /// assert_eq!(a.load(), !(7 & 3)); + /// ``` + #[inline] + pub fn fetch_nand(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_nand(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = !(old & val); + old + } + } + } + + /// Applies bitwise "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_or(16), 7); + /// assert_eq!(a.load(), 23); + /// ``` + #[inline] + pub fn fetch_or(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_or(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value |= val; + old + } + } + } + + /// Applies bitwise "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_xor(2), 7); + /// assert_eq!(a.load(), 5); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_xor(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value ^= val; + old + } + } + } + + /// Compares and sets the maximum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_max(9), 7); + /// assert_eq!(a.load(), 9); + /// ``` + #[inline] + pub fn fetch_max(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_max(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::max(old, val); + old + } + } + } + + /// Compares and sets the minimum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_min(2), 7); + /// assert_eq!(a.load(), 2); + /// ``` + #[inline] + pub fn fetch_min(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_min(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::min(old, val); + old + } + } + } + } + }; +} + +impl_arithmetic!(u8, AtomicU8, "let a = AtomicCell::new(7u8);"); +impl_arithmetic!(i8, AtomicI8, "let a = AtomicCell::new(7i8);"); +impl_arithmetic!(u16, AtomicU16, "let a = AtomicCell::new(7u16);"); +impl_arithmetic!(i16, AtomicI16, "let a = AtomicCell::new(7i16);"); + +impl_arithmetic!(u32, AtomicU32, "let a = AtomicCell::new(7u32);"); +impl_arithmetic!(i32, AtomicI32, "let a = AtomicCell::new(7i32);"); + +#[cfg(target_has_atomic = "64")] +impl_arithmetic!(u64, AtomicU64, "let a = AtomicCell::new(7u64);"); +#[cfg(target_has_atomic = "64")] +impl_arithmetic!(i64, AtomicI64, "let a = AtomicCell::new(7i64);"); +#[cfg(not(target_has_atomic = "64"))] +impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);"); +#[cfg(not(target_has_atomic = "64"))] +impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);"); + +// TODO: AtomicU128 is unstable +// impl_arithmetic!(u128, AtomicU128, "let a = AtomicCell::new(7u128);"); +// impl_arithmetic!(i128, AtomicI128, "let a = AtomicCell::new(7i128);"); +impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);"); +impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);"); + +impl_arithmetic!(usize, AtomicUsize, "let a = AtomicCell::new(7usize);"); +impl_arithmetic!(isize, AtomicIsize, "let a = AtomicCell::new(7isize);"); + +impl AtomicCell { + /// Applies logical "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(true); + /// + /// assert_eq!(a.fetch_and(true), true); + /// assert_eq!(a.load(), true); + /// + /// assert_eq!(a.fetch_and(false), true); + /// assert_eq!(a.load(), false); + /// ``` + #[inline] + pub fn fetch_and(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_and(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value &= val; + old + } + } + } + + /// Applies logical "nand" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(true); + /// + /// assert_eq!(a.fetch_nand(false), true); + /// assert_eq!(a.load(), true); + /// + /// assert_eq!(a.fetch_nand(true), true); + /// assert_eq!(a.load(), false); + /// + /// assert_eq!(a.fetch_nand(false), false); + /// assert_eq!(a.load(), true); + /// ``` + #[inline] + pub fn fetch_nand(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_nand(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = !(old & val); + old + } + } + } + + /// Applies logical "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(false); + /// + /// assert_eq!(a.fetch_or(false), false); + /// assert_eq!(a.load(), false); + /// + /// assert_eq!(a.fetch_or(true), false); + /// assert_eq!(a.load(), true); + /// ``` + #[inline] + pub fn fetch_or(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_or(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value |= val; + old + } + } + } + + /// Applies logical "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(true); + /// + /// assert_eq!(a.fetch_xor(false), true); + /// assert_eq!(a.load(), true); + /// + /// assert_eq!(a.fetch_xor(true), true); + /// assert_eq!(a.load(), false); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_xor(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value ^= val; + old + } + } + } +} + +impl Default for AtomicCell { + fn default() -> AtomicCell { + AtomicCell::new(T::default()) + } +} + +impl From for AtomicCell { + #[inline] + fn from(val: T) -> AtomicCell { + AtomicCell::new(val) + } +} + +impl fmt::Debug for AtomicCell { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AtomicCell") + .field("value", &self.load()) + .finish() + } +} + +/// Returns `true` if values of type `A` can be transmuted into values of type `B`. +const fn can_transmute() -> bool { + // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`. + (mem::size_of::() == mem::size_of::()) & (mem::align_of::() >= mem::align_of::()) +} + +/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`. +/// +/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic +/// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock. +/// +/// However, there is not only one global lock but an array of many locks, and one of them is +/// picked based on the given address. Having many locks reduces contention and improves +/// scalability. +#[inline] +#[must_use] +fn lock(addr: usize) -> &'static SeqLock { + // The number of locks is a prime number because we want to make sure `addr % LEN` gets + // dispersed across all locks. + // + // Note that addresses are always aligned to some power of 2, depending on type `T` in + // `AtomicCell`. If `LEN` was an even number, then `addr % LEN` would be an even number, + // too, which means only half of the locks would get utilized! + // + // It is also possible for addresses to accidentally get aligned to a number that is not a + // power of 2. Consider this example: + // + // ``` + // #[repr(C)] + // struct Foo { + // a: AtomicCell, + // b: u8, + // c: u8, + // } + // ``` + // + // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets + // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3. + // In order to protect from such cases, we simply choose a large prime number for `LEN`. + const LEN: usize = 67; + const L: CachePadded = CachePadded::new(SeqLock::new()); + static LOCKS: [CachePadded; LEN] = [L; LEN]; + + // If the modulus is a constant number, the compiler will use crazy math to transform this into + // a sequence of cheap arithmetic operations rather than using the slow modulo instruction. + &LOCKS[addr % LEN] +} + +/// An atomic `()`. +/// +/// All operations are noops. +struct AtomicUnit; + +impl AtomicUnit { + #[inline] + fn load(&self, _order: Ordering) {} + + #[inline] + fn store(&self, _val: (), _order: Ordering) {} + + #[inline] + fn swap(&self, _val: (), _order: Ordering) {} + + #[inline] + fn compare_exchange_weak( + &self, + _current: (), + _new: (), + _success: Ordering, + _failure: Ordering, + ) -> Result<(), ()> { + Ok(()) + } +} + +/// Returns `true` if operations on `AtomicCell` are lock-free. +const fn atomic_is_lock_free() -> bool { + atomic! { T, _a, true, false } +} + +/// Atomically reads data from `src`. +/// +/// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +unsafe fn atomic_load(src: *mut T) -> T +where + T: Copy, +{ + atomic! { + T, a, + { + a = &*(src as *const _ as *const _); + mem::transmute_copy(&a.load(Ordering::Acquire)) + }, + { + let lock = lock(src as usize); + + // Try doing an optimistic read first. + if let Some(stamp) = lock.optimistic_read() { + // We need a volatile read here because other threads might concurrently modify the + // value. In theory, data races are *always* UB, even if we use volatile reads and + // discard the data when a data race is detected. The proper solution would be to + // do atomic reads and atomic writes, but we can't atomically read and write all + // kinds of data since `AtomicU8` is not available on stable Rust yet. + // Load as `MaybeUninit` because we may load a value that is not valid as `T`. + let val = ptr::read_volatile(src.cast::>()); + + if lock.validate_read(stamp) { + return val.assume_init(); + } + } + + // Grab a regular write lock so that writers don't starve this load. + let guard = lock.write(); + let val = ptr::read(src); + // The value hasn't been changed. Drop the guard without incrementing the stamp. + guard.abort(); + val + } + } +} + +/// Atomically writes `val` to `dst`. +/// +/// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +unsafe fn atomic_store(dst: *mut T, val: T) { + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + a.store(mem::transmute_copy(&val), Ordering::Release); + mem::forget(val); + }, + { + let _guard = lock(dst as usize).write(); + ptr::write(dst, val); + } + } +} + +/// Atomically swaps data at `dst` with `val`. +/// +/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +unsafe fn atomic_swap(dst: *mut T, val: T) -> T { + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel)); + mem::forget(val); + res + }, + { + let _guard = lock(dst as usize).write(); + ptr::replace(dst, val) + } + } +} + +/// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at +/// `dst` with `new`. +/// +/// Returns the old value on success, or the current value at `dst` on failure. +/// +/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +#[allow(clippy::let_unit_value)] +unsafe fn atomic_compare_exchange_weak(dst: *mut T, mut current: T, new: T) -> Result +where + T: Copy + Eq, +{ + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + let mut current_raw = mem::transmute_copy(¤t); + let new_raw = mem::transmute_copy(&new); + + loop { + match a.compare_exchange_weak( + current_raw, + new_raw, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => break Ok(current), + Err(previous_raw) => { + let previous = mem::transmute_copy(&previous_raw); + + if !T::eq(&previous, ¤t) { + break Err(previous); + } + + // The compare-exchange operation has failed and didn't store `new`. The + // failure is either spurious, or `previous` was semantically equal to + // `current` but not byte-equal. Let's retry with `previous` as the new + // `current`. + current = previous; + current_raw = previous_raw; + } + } + } + }, + { + let guard = lock(dst as usize).write(); + + if T::eq(&*dst, ¤t) { + Ok(ptr::replace(dst, new)) + } else { + let val = ptr::read(dst); + // The value hasn't been changed. Drop the guard without incrementing the stamp. + guard.abort(); + Err(val) + } + } + } +} diff --git a/external/vendor/crossbeam-utils/src/atomic/consume.rs b/external/vendor/crossbeam-utils/src/atomic/consume.rs new file mode 100644 index 0000000000..ff8e316b2c --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/consume.rs @@ -0,0 +1,111 @@ +#[cfg(not(crossbeam_no_atomic))] +use core::sync::atomic::Ordering; + +/// Trait which allows reading from primitive atomic types with "consume" ordering. +pub trait AtomicConsume { + /// Type returned by `load_consume`. + type Val; + + /// Loads a value from the atomic using a "consume" memory ordering. + /// + /// This is similar to the "acquire" ordering, except that an ordering is + /// only guaranteed with operations that "depend on" the result of the load. + /// However consume loads are usually much faster than acquire loads on + /// architectures with a weak memory model since they don't require memory + /// fence instructions. + /// + /// The exact definition of "depend on" is a bit vague, but it works as you + /// would expect in practice since a lot of software, especially the Linux + /// kernel, rely on this behavior. + /// + /// This is currently only implemented on ARM and AArch64, where a fence + /// can be avoided. On other architectures this will fall back to a simple + /// `load(Ordering::Acquire)`. + fn load_consume(&self) -> Self::Val; +} + +#[cfg(not(crossbeam_no_atomic))] +// Miri and Loom don't support "consume" ordering and ThreadSanitizer doesn't treat +// load(Relaxed) + compiler_fence(Acquire) as "consume" load. +// LLVM generates machine code equivalent to fence(Acquire) in compiler_fence(Acquire) +// on PowerPC, MIPS, etc. (https://godbolt.org/z/hffvjvW7h), so for now the fence +// can be actually avoided here only on ARM and AArch64. See also +// https://github.com/rust-lang/rust/issues/62256. +#[cfg(all( + any(target_arch = "arm", target_arch = "aarch64"), + not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)), +))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + use crate::primitive::sync::atomic::compiler_fence; + let result = self.load(Ordering::Relaxed); + compiler_fence(Ordering::Acquire); + result + } + }; +} + +#[cfg(not(crossbeam_no_atomic))] +#[cfg(not(all( + any(target_arch = "arm", target_arch = "aarch64"), + not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)), +)))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + self.load(Ordering::Acquire) + } + }; +} + +macro_rules! impl_atomic { + ($atomic:ident, $val:ty) => { + #[cfg(not(crossbeam_no_atomic))] + impl AtomicConsume for core::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } + #[cfg(crossbeam_loom)] + impl AtomicConsume for loom::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } + }; +} + +impl_atomic!(AtomicBool, bool); +impl_atomic!(AtomicUsize, usize); +impl_atomic!(AtomicIsize, isize); +impl_atomic!(AtomicU8, u8); +impl_atomic!(AtomicI8, i8); +impl_atomic!(AtomicU16, u16); +impl_atomic!(AtomicI16, i16); +#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] +impl_atomic!(AtomicU32, u32); +#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] +impl_atomic!(AtomicI32, i32); +#[cfg(any( + target_has_atomic = "64", + not(any(target_pointer_width = "16", target_pointer_width = "32")), +))] +impl_atomic!(AtomicU64, u64); +#[cfg(any( + target_has_atomic = "64", + not(any(target_pointer_width = "16", target_pointer_width = "32")), +))] +impl_atomic!(AtomicI64, i64); + +#[cfg(not(crossbeam_no_atomic))] +impl AtomicConsume for core::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} + +#[cfg(crossbeam_loom)] +impl AtomicConsume for loom::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} diff --git a/external/vendor/crossbeam-utils/src/atomic/mod.rs b/external/vendor/crossbeam-utils/src/atomic/mod.rs new file mode 100644 index 0000000000..8662ded564 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/mod.rs @@ -0,0 +1,32 @@ +//! Atomic types. +//! +//! * [`AtomicCell`], a thread-safe mutable memory location. +//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering. + +#[cfg(target_has_atomic = "ptr")] +#[cfg(not(crossbeam_loom))] +// Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap +// around. +// +// In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be +// vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the +// counter will not be increased that fast. +// Note that Rust (and C99) pointers must be at least 16-bit (i.e., 8-bit targets are impossible): https://github.com/rust-lang/rust/pull/49305 +#[cfg_attr( + any(target_pointer_width = "16", target_pointer_width = "32"), + path = "seq_lock_wide.rs" +)] +mod seq_lock; + +#[cfg(target_has_atomic = "ptr")] +// We cannot provide AtomicCell under cfg(crossbeam_loom) because loom's atomic +// types have a different in-memory representation than the underlying type. +// TODO: The latest loom supports fences, so fallback using seqlock may be available. +#[cfg(not(crossbeam_loom))] +mod atomic_cell; +#[cfg(target_has_atomic = "ptr")] +#[cfg(not(crossbeam_loom))] +pub use atomic_cell::AtomicCell; + +mod consume; +pub use consume::AtomicConsume; diff --git a/external/vendor/crossbeam-utils/src/atomic/seq_lock.rs b/external/vendor/crossbeam-utils/src/atomic/seq_lock.rs new file mode 100644 index 0000000000..ff8defd26d --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/seq_lock.rs @@ -0,0 +1,112 @@ +use core::mem; +use core::sync::atomic::{self, AtomicUsize, Ordering}; + +use crate::Backoff; + +/// A simple stamped lock. +pub(crate) struct SeqLock { + /// The current state of the lock. + /// + /// All bits except the least significant one hold the current stamp. When locked, the state + /// equals 1 and doesn't contain a valid stamp. + state: AtomicUsize, +} + +impl SeqLock { + pub(crate) const fn new() -> Self { + Self { + state: AtomicUsize::new(0), + } + } + + /// If not locked, returns the current stamp. + /// + /// This method should be called before optimistic reads. + #[inline] + pub(crate) fn optimistic_read(&self) -> Option { + let state = self.state.load(Ordering::Acquire); + if state == 1 { + None + } else { + Some(state) + } + } + + /// Returns `true` if the current stamp is equal to `stamp`. + /// + /// This method should be called after optimistic reads to check whether they are valid. The + /// argument `stamp` should correspond to the one returned by method `optimistic_read`. + #[inline] + pub(crate) fn validate_read(&self, stamp: usize) -> bool { + atomic::fence(Ordering::Acquire); + self.state.load(Ordering::Relaxed) == stamp + } + + /// Grabs the lock for writing. + #[inline] + pub(crate) fn write(&'static self) -> SeqLockWriteGuard { + let backoff = Backoff::new(); + loop { + let previous = self.state.swap(1, Ordering::Acquire); + + if previous != 1 { + atomic::fence(Ordering::Release); + + return SeqLockWriteGuard { + lock: self, + state: previous, + }; + } + + backoff.snooze(); + } + } +} + +/// An RAII guard that releases the lock and increments the stamp when dropped. +pub(crate) struct SeqLockWriteGuard { + /// The parent lock. + lock: &'static SeqLock, + + /// The stamp before locking. + state: usize, +} + +impl SeqLockWriteGuard { + /// Releases the lock without incrementing the stamp. + #[inline] + pub(crate) fn abort(self) { + self.lock.state.store(self.state, Ordering::Release); + + // We specifically don't want to call drop(), since that's + // what increments the stamp. + mem::forget(self); + } +} + +impl Drop for SeqLockWriteGuard { + #[inline] + fn drop(&mut self) { + // Release the lock and increment the stamp. + self.lock + .state + .store(self.state.wrapping_add(2), Ordering::Release); + } +} + +#[cfg(test)] +mod tests { + use super::SeqLock; + + #[test] + fn test_abort() { + static LK: SeqLock = SeqLock::new(); + let before = LK.optimistic_read().unwrap(); + { + let guard = LK.write(); + guard.abort(); + } + let after = LK.optimistic_read().unwrap(); + assert_eq!(before, after, "aborted write does not update the stamp"); + } +} diff --git a/external/vendor/crossbeam-utils/src/atomic/seq_lock_wide.rs b/external/vendor/crossbeam-utils/src/atomic/seq_lock_wide.rs new file mode 100644 index 0000000000..ef5d94a454 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/seq_lock_wide.rs @@ -0,0 +1,155 @@ +use core::mem; +use core::sync::atomic::{self, AtomicUsize, Ordering}; + +use crate::Backoff; + +/// A simple stamped lock. +/// +/// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low +/// bits. +pub(crate) struct SeqLock { + /// The high bits of the current state of the lock. + state_hi: AtomicUsize, + + /// The low bits of the current state of the lock. + /// + /// All bits except the least significant one hold the current stamp. When locked, the state_lo + /// equals 1 and doesn't contain a valid stamp. + state_lo: AtomicUsize, +} + +impl SeqLock { + pub(crate) const fn new() -> Self { + Self { + state_hi: AtomicUsize::new(0), + state_lo: AtomicUsize::new(0), + } + } + + /// If not locked, returns the current stamp. + /// + /// This method should be called before optimistic reads. + #[inline] + pub(crate) fn optimistic_read(&self) -> Option<(usize, usize)> { + // The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in + // `SeqLockWriteGuard::drop`. + // + // As a consequence, we can make sure that (1) all writes within the era of `state_hi - 1` + // happens before now; and therefore, (2) if `state_lo` is even, all writes within the + // critical section of (`state_hi`, `state_lo`) happens before now. + let state_hi = self.state_hi.load(Ordering::Acquire); + let state_lo = self.state_lo.load(Ordering::Acquire); + if state_lo == 1 { + None + } else { + Some((state_hi, state_lo)) + } + } + + /// Returns `true` if the current stamp is equal to `stamp`. + /// + /// This method should be called after optimistic reads to check whether they are valid. The + /// argument `stamp` should correspond to the one returned by method `optimistic_read`. + #[inline] + pub(crate) fn validate_read(&self, stamp: (usize, usize)) -> bool { + // Thanks to the fence, if we're noticing any modification to the data at the critical + // section of `(a, b)`, then the critical section's write of 1 to state_lo should be + // visible. + atomic::fence(Ordering::Acquire); + + // So if `state_lo` coincides with `stamp.1`, then either (1) we're noticing no modification + // to the data after the critical section of `(stamp.0, stamp.1)`, or (2) `state_lo` wrapped + // around. + // + // If (2) is the case, the acquire ordering ensures we see the new value of `state_hi`. + let state_lo = self.state_lo.load(Ordering::Acquire); + + // If (2) is the case and `state_hi` coincides with `stamp.0`, then `state_hi` also wrapped + // around, which we give up to correctly validate the read. + let state_hi = self.state_hi.load(Ordering::Relaxed); + + // Except for the case that both `state_hi` and `state_lo` wrapped around, the following + // condition implies that we're noticing no modification to the data after the critical + // section of `(stamp.0, stamp.1)`. + (state_hi, state_lo) == stamp + } + + /// Grabs the lock for writing. + #[inline] + pub(crate) fn write(&'static self) -> SeqLockWriteGuard { + let backoff = Backoff::new(); + loop { + let previous = self.state_lo.swap(1, Ordering::Acquire); + + if previous != 1 { + // To synchronize with the acquire fence in `validate_read` via any modification to + // the data at the critical section of `(state_hi, previous)`. + atomic::fence(Ordering::Release); + + return SeqLockWriteGuard { + lock: self, + state_lo: previous, + }; + } + + backoff.snooze(); + } + } +} + +/// An RAII guard that releases the lock and increments the stamp when dropped. +pub(crate) struct SeqLockWriteGuard { + /// The parent lock. + lock: &'static SeqLock, + + /// The stamp before locking. + state_lo: usize, +} + +impl SeqLockWriteGuard { + /// Releases the lock without incrementing the stamp. + #[inline] + pub(crate) fn abort(self) { + self.lock.state_lo.store(self.state_lo, Ordering::Release); + mem::forget(self); + } +} + +impl Drop for SeqLockWriteGuard { + #[inline] + fn drop(&mut self) { + let state_lo = self.state_lo.wrapping_add(2); + + // Increase the high bits if the low bits wrap around. + // + // Release ordering for synchronizing with `optimistic_read`. + if state_lo == 0 { + let state_hi = self.lock.state_hi.load(Ordering::Relaxed); + self.lock + .state_hi + .store(state_hi.wrapping_add(1), Ordering::Release); + } + + // Release the lock and increment the stamp. + // + // Release ordering for synchronizing with `optimistic_read`. + self.lock.state_lo.store(state_lo, Ordering::Release); + } +} + +#[cfg(test)] +mod tests { + use super::SeqLock; + + #[test] + fn test_abort() { + static LK: SeqLock = SeqLock::new(); + let before = LK.optimistic_read().unwrap(); + { + let guard = LK.write(); + guard.abort(); + } + let after = LK.optimistic_read().unwrap(); + assert_eq!(before, after, "aborted write does not update the stamp"); + } +} diff --git a/external/vendor/crossbeam-utils/src/backoff.rs b/external/vendor/crossbeam-utils/src/backoff.rs new file mode 100644 index 0000000000..7a505ed614 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/backoff.rs @@ -0,0 +1,287 @@ +use crate::primitive::hint; +use core::cell::Cell; +use core::fmt; + +const SPIN_LIMIT: u32 = 6; +const YIELD_LIMIT: u32 = 10; + +/// Performs exponential backoff in spin loops. +/// +/// Backing off in spin loops reduces contention and improves overall performance. +/// +/// This primitive can execute *YIELD* and *PAUSE* instructions, yield the current thread to the OS +/// scheduler, and tell when is a good time to block the thread using a different synchronization +/// mechanism. Each step of the back off procedure takes roughly twice as long as the previous +/// step. +/// +/// # Examples +/// +/// Backing off in a lock-free loop: +/// +/// ``` +/// use crossbeam_utils::Backoff; +/// use std::sync::atomic::AtomicUsize; +/// use std::sync::atomic::Ordering::SeqCst; +/// +/// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize { +/// let backoff = Backoff::new(); +/// loop { +/// let val = a.load(SeqCst); +/// if a.compare_exchange(val, val.wrapping_mul(b), SeqCst, SeqCst).is_ok() { +/// return val; +/// } +/// backoff.spin(); +/// } +/// } +/// ``` +/// +/// Waiting for an [`AtomicBool`] to become `true`: +/// +/// ``` +/// use crossbeam_utils::Backoff; +/// use std::sync::atomic::AtomicBool; +/// use std::sync::atomic::Ordering::SeqCst; +/// +/// fn spin_wait(ready: &AtomicBool) { +/// let backoff = Backoff::new(); +/// while !ready.load(SeqCst) { +/// backoff.snooze(); +/// } +/// } +/// ``` +/// +/// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait. +/// Note that whoever sets the atomic variable to `true` must notify the parked thread by calling +/// [`unpark()`]: +/// +/// ``` +/// use crossbeam_utils::Backoff; +/// use std::sync::atomic::AtomicBool; +/// use std::sync::atomic::Ordering::SeqCst; +/// use std::thread; +/// +/// fn blocking_wait(ready: &AtomicBool) { +/// let backoff = Backoff::new(); +/// while !ready.load(SeqCst) { +/// if backoff.is_completed() { +/// thread::park(); +/// } else { +/// backoff.snooze(); +/// } +/// } +/// } +/// ``` +/// +/// [`is_completed`]: Backoff::is_completed +/// [`std::thread::park()`]: std::thread::park +/// [`Condvar`]: std::sync::Condvar +/// [`AtomicBool`]: std::sync::atomic::AtomicBool +/// [`unpark()`]: std::thread::Thread::unpark +pub struct Backoff { + step: Cell, +} + +impl Backoff { + /// Creates a new `Backoff`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// + /// let backoff = Backoff::new(); + /// ``` + #[inline] + pub fn new() -> Self { + Backoff { step: Cell::new(0) } + } + + /// Resets the `Backoff`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// + /// let backoff = Backoff::new(); + /// backoff.reset(); + /// ``` + #[inline] + pub fn reset(&self) { + self.step.set(0); + } + + /// Backs off in a lock-free loop. + /// + /// This method should be used when we need to retry an operation because another thread made + /// progress. + /// + /// The processor may yield using the *YIELD* or *PAUSE* instruction. + /// + /// # Examples + /// + /// Backing off in a lock-free loop: + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// use std::sync::atomic::AtomicUsize; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize { + /// let backoff = Backoff::new(); + /// loop { + /// let val = a.load(SeqCst); + /// if a.compare_exchange(val, val.wrapping_mul(b), SeqCst, SeqCst).is_ok() { + /// return val; + /// } + /// backoff.spin(); + /// } + /// } + /// + /// let a = AtomicUsize::new(7); + /// assert_eq!(fetch_mul(&a, 8), 7); + /// assert_eq!(a.load(SeqCst), 56); + /// ``` + #[inline] + pub fn spin(&self) { + for _ in 0..1 << self.step.get().min(SPIN_LIMIT) { + hint::spin_loop(); + } + + if self.step.get() <= SPIN_LIMIT { + self.step.set(self.step.get() + 1); + } + } + + /// Backs off in a blocking loop. + /// + /// This method should be used when we need to wait for another thread to make progress. + /// + /// The processor may yield using the *YIELD* or *PAUSE* instruction and the current thread + /// may yield by giving up a timeslice to the OS scheduler. + /// + /// In `#[no_std]` environments, this method is equivalent to [`spin`]. + /// + /// If possible, use [`is_completed`] to check when it is advised to stop using backoff and + /// block the current thread using a different synchronization mechanism instead. + /// + /// [`spin`]: Backoff::spin + /// [`is_completed`]: Backoff::is_completed + /// + /// # Examples + /// + /// Waiting for an [`AtomicBool`] to become `true`: + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// use std::sync::Arc; + /// use std::sync::atomic::AtomicBool; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// fn spin_wait(ready: &AtomicBool) { + /// let backoff = Backoff::new(); + /// while !ready.load(SeqCst) { + /// backoff.snooze(); + /// } + /// } + /// + /// let ready = Arc::new(AtomicBool::new(false)); + /// let ready2 = ready.clone(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_millis(100)); + /// ready2.store(true, SeqCst); + /// }); + /// + /// assert_eq!(ready.load(SeqCst), false); + /// spin_wait(&ready); + /// assert_eq!(ready.load(SeqCst), true); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + /// + /// [`AtomicBool`]: std::sync::atomic::AtomicBool + #[inline] + pub fn snooze(&self) { + if self.step.get() <= SPIN_LIMIT { + for _ in 0..1 << self.step.get() { + hint::spin_loop(); + } + } else { + #[cfg(not(feature = "std"))] + for _ in 0..1 << self.step.get() { + hint::spin_loop(); + } + + #[cfg(feature = "std")] + ::std::thread::yield_now(); + } + + if self.step.get() <= YIELD_LIMIT { + self.step.set(self.step.get() + 1); + } + } + + /// Returns `true` if exponential backoff has completed and blocking the thread is advised. + /// + /// # Examples + /// + /// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait: + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// use std::sync::Arc; + /// use std::sync::atomic::AtomicBool; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// fn blocking_wait(ready: &AtomicBool) { + /// let backoff = Backoff::new(); + /// while !ready.load(SeqCst) { + /// if backoff.is_completed() { + /// thread::park(); + /// } else { + /// backoff.snooze(); + /// } + /// } + /// } + /// + /// let ready = Arc::new(AtomicBool::new(false)); + /// let ready2 = ready.clone(); + /// let waiter = thread::current(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_millis(100)); + /// ready2.store(true, SeqCst); + /// waiter.unpark(); + /// }); + /// + /// assert_eq!(ready.load(SeqCst), false); + /// blocking_wait(&ready); + /// assert_eq!(ready.load(SeqCst), true); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + /// + /// [`AtomicBool`]: std::sync::atomic::AtomicBool + #[inline] + pub fn is_completed(&self) -> bool { + self.step.get() > YIELD_LIMIT + } +} + +impl fmt::Debug for Backoff { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Backoff") + .field("step", &self.step) + .field("is_completed", &self.is_completed()) + .finish() + } +} + +impl Default for Backoff { + fn default() -> Backoff { + Backoff::new() + } +} diff --git a/external/vendor/crossbeam-utils/src/cache_padded.rs b/external/vendor/crossbeam-utils/src/cache_padded.rs new file mode 100644 index 0000000000..6c930c6f3f --- /dev/null +++ b/external/vendor/crossbeam-utils/src/cache_padded.rs @@ -0,0 +1,217 @@ +use core::fmt; +use core::ops::{Deref, DerefMut}; + +/// Pads and aligns a value to the length of a cache line. +/// +/// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of +/// data are not placed into the same cache line. Updating an atomic value invalidates the whole +/// cache line it belongs to, which makes the next access to the same cache line slower for other +/// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other +/// cached data. +/// +/// # Size and alignment +/// +/// Cache lines are assumed to be N bytes long, depending on the architecture: +/// +/// * On x86-64, aarch64, and powerpc64, N = 128. +/// * On arm, mips, mips64, sparc, and hexagon, N = 32. +/// * On m68k, N = 16. +/// * On s390x, N = 256. +/// * On all others, N = 64. +/// +/// Note that N is just a reasonable guess and is not guaranteed to match the actual cache line +/// length of the machine the program is running on. On modern Intel architectures, spatial +/// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that +/// cache lines are 128 bytes long. +/// +/// The size of `CachePadded` is the smallest multiple of N bytes large enough to accommodate +/// a value of type `T`. +/// +/// The alignment of `CachePadded` is the maximum of N bytes and the alignment of `T`. +/// +/// # Examples +/// +/// Alignment and padding: +/// +/// ``` +/// use crossbeam_utils::CachePadded; +/// +/// let array = [CachePadded::new(1i8), CachePadded::new(2i8)]; +/// let addr1 = &*array[0] as *const i8 as usize; +/// let addr2 = &*array[1] as *const i8 as usize; +/// +/// assert!(addr2 - addr1 >= 32); +/// assert_eq!(addr1 % 32, 0); +/// assert_eq!(addr2 % 32, 0); +/// ``` +/// +/// When building a concurrent queue with a head and a tail index, it is wise to place them in +/// different cache lines so that concurrent threads pushing and popping elements don't invalidate +/// each other's cache lines: +/// +/// ``` +/// use crossbeam_utils::CachePadded; +/// use std::sync::atomic::AtomicUsize; +/// +/// struct Queue { +/// head: CachePadded, +/// tail: CachePadded, +/// buffer: *mut T, +/// } +/// ``` +#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] +// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache +// lines at a time, so we have to align to 128 bytes rather than 64. +// +// Sources: +// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf +// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 +// +// aarch64/arm64ec's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. +// +// Sources: +// - https://www.mono-project.com/news/2016/09/12/arm64-icache/ +// +// powerpc64 has 128-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/powerpc/include/asm/cache.h#L26 +#[cfg_attr( + any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "powerpc64", + ), + repr(align(128)) +)] +// arm, mips, mips64, sparc, and hexagon have 32-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12 +#[cfg_attr( + any( + target_arch = "arm", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "sparc", + target_arch = "hexagon", + ), + repr(align(32)) +)] +// m68k has 16-byte cache line size. +// +// Sources: +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9 +#[cfg_attr(target_arch = "m68k", repr(align(16)))] +// s390x has 256-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13 +#[cfg_attr(target_arch = "s390x", repr(align(256)))] +// x86, wasm, riscv, and sparc64 have 64-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19 +// +// All others are assumed to have 64-byte cache line size. +#[cfg_attr( + not(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "powerpc64", + target_arch = "arm", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "sparc", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "s390x", + )), + repr(align(64)) +)] +pub struct CachePadded { + value: T, +} + +unsafe impl Send for CachePadded {} +unsafe impl Sync for CachePadded {} + +impl CachePadded { + /// Pads and aligns a value to the length of a cache line. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::CachePadded; + /// + /// let padded_value = CachePadded::new(1); + /// ``` + pub const fn new(t: T) -> CachePadded { + CachePadded:: { value: t } + } + + /// Returns the inner value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::CachePadded; + /// + /// let padded_value = CachePadded::new(7); + /// let value = padded_value.into_inner(); + /// assert_eq!(value, 7); + /// ``` + pub fn into_inner(self) -> T { + self.value + } +} + +impl Deref for CachePadded { + type Target = T; + + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for CachePadded { + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl fmt::Debug for CachePadded { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CachePadded") + .field("value", &self.value) + .finish() + } +} + +impl From for CachePadded { + fn from(t: T) -> Self { + CachePadded::new(t) + } +} + +impl fmt::Display for CachePadded { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.value, f) + } +} diff --git a/external/vendor/crossbeam-utils/src/lib.rs b/external/vendor/crossbeam-utils/src/lib.rs new file mode 100644 index 0000000000..6f124f9700 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/lib.rs @@ -0,0 +1,110 @@ +//! Miscellaneous tools for concurrent programming. +//! +//! ## Atomics +//! +//! * [`AtomicCell`], a thread-safe mutable memory location. +//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering. +//! +//! ## Thread synchronization +//! +//! * [`Parker`], a thread parking primitive. +//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. +//! * [`WaitGroup`], for synchronizing the beginning or end of some computation. +//! +//! ## Utilities +//! +//! * [`Backoff`], for exponential backoff in spin loops. +//! * [`CachePadded`], for padding and aligning a value to the length of a cache line. +//! * [`scope`], for spawning threads that borrow local variables from the stack. +//! +//! [`AtomicCell`]: atomic::AtomicCell +//! [`AtomicConsume`]: atomic::AtomicConsume +//! [`Parker`]: sync::Parker +//! [`ShardedLock`]: sync::ShardedLock +//! [`WaitGroup`]: sync::WaitGroup +//! [`scope`]: thread::scope + +#![no_std] +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms), + allow(dead_code, unused_assignments, unused_variables) + ) +))] +#![warn( + missing_docs, + missing_debug_implementations, + rust_2018_idioms, + unreachable_pub +)] + +#[cfg(feature = "std")] +extern crate std; + +#[cfg(crossbeam_loom)] +#[allow(unused_imports)] +mod primitive { + pub(crate) mod hint { + pub(crate) use loom::hint::spin_loop; + } + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::{ + AtomicBool, AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, + AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering, + }; + + // FIXME: loom does not support compiler_fence at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // we use fence as a stand-in for compiler_fence for the time being. + // this may miss some races since fence is stronger than compiler_fence, + // but it's the best we can do for the time being. + pub(crate) use loom::sync::atomic::fence as compiler_fence; + } + pub(crate) use loom::sync::{Arc, Condvar, Mutex}; + } +} +#[cfg(not(crossbeam_loom))] +#[allow(unused_imports)] +mod primitive { + pub(crate) mod hint { + pub(crate) use core::hint::spin_loop; + } + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::{compiler_fence, Ordering}; + #[cfg(not(crossbeam_no_atomic))] + pub(crate) use core::sync::atomic::{ + AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicU16, AtomicU8, AtomicUsize, + }; + #[cfg(not(crossbeam_no_atomic))] + #[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] + pub(crate) use core::sync::atomic::{AtomicI32, AtomicU32}; + #[cfg(not(crossbeam_no_atomic))] + #[cfg(any( + target_has_atomic = "64", + not(any(target_pointer_width = "16", target_pointer_width = "32")), + ))] + pub(crate) use core::sync::atomic::{AtomicI64, AtomicU64}; + } + + #[cfg(feature = "std")] + pub(crate) use std::sync::{Arc, Condvar, Mutex}; + } +} + +pub mod atomic; + +mod cache_padded; +pub use crate::cache_padded::CachePadded; + +mod backoff; +pub use crate::backoff::Backoff; + +#[cfg(feature = "std")] +pub mod sync; + +#[cfg(feature = "std")] +#[cfg(not(crossbeam_loom))] +pub mod thread; diff --git a/external/vendor/crossbeam-utils/src/sync/mod.rs b/external/vendor/crossbeam-utils/src/sync/mod.rs new file mode 100644 index 0000000000..f9eec71fb3 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/mod.rs @@ -0,0 +1,17 @@ +//! Thread synchronization primitives. +//! +//! * [`Parker`], a thread parking primitive. +//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. +//! * [`WaitGroup`], for synchronizing the beginning or end of some computation. + +#[cfg(not(crossbeam_loom))] +mod once_lock; +mod parker; +#[cfg(not(crossbeam_loom))] +mod sharded_lock; +mod wait_group; + +pub use self::parker::{Parker, Unparker}; +#[cfg(not(crossbeam_loom))] +pub use self::sharded_lock::{ShardedLock, ShardedLockReadGuard, ShardedLockWriteGuard}; +pub use self::wait_group::WaitGroup; diff --git a/external/vendor/crossbeam-utils/src/sync/once_lock.rs b/external/vendor/crossbeam-utils/src/sync/once_lock.rs new file mode 100644 index 0000000000..e057aca7d5 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/once_lock.rs @@ -0,0 +1,88 @@ +// Based on unstable std::sync::OnceLock. +// +// Source: https://github.com/rust-lang/rust/blob/8e9c93df464b7ada3fc7a1c8ccddd9dcb24ee0a0/library/std/src/sync/once_lock.rs + +use core::cell::UnsafeCell; +use core::mem::MaybeUninit; +use std::sync::Once; + +pub(crate) struct OnceLock { + once: Once, + value: UnsafeCell>, + // Unlike std::sync::OnceLock, we don't need PhantomData here because + // we don't use #[may_dangle]. +} + +unsafe impl Sync for OnceLock {} +unsafe impl Send for OnceLock {} + +impl OnceLock { + /// Creates a new empty cell. + #[must_use] + pub(crate) const fn new() -> Self { + Self { + once: Once::new(), + value: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Gets the contents of the cell, initializing it with `f` if the cell + /// was empty. + /// + /// Many threads may call `get_or_init` concurrently with different + /// initializing functions, but it is guaranteed that only one function + /// will be executed. + /// + /// # Panics + /// + /// If `f` panics, the panic is propagated to the caller, and the cell + /// remains uninitialized. + /// + /// It is an error to reentrantly initialize the cell from `f`. The + /// exact outcome is unspecified. Current implementation deadlocks, but + /// this may be changed to a panic in the future. + pub(crate) fn get_or_init(&self, f: F) -> &T + where + F: FnOnce() -> T, + { + // Fast path check + if self.once.is_completed() { + // SAFETY: The inner value has been initialized + return unsafe { self.get_unchecked() }; + } + self.initialize(f); + + // SAFETY: The inner value has been initialized + unsafe { self.get_unchecked() } + } + + #[cold] + fn initialize(&self, f: F) + where + F: FnOnce() -> T, + { + let slot = self.value.get(); + + self.once.call_once(|| { + let value = f(); + unsafe { slot.write(MaybeUninit::new(value)) } + }); + } + + /// # Safety + /// + /// The value must be initialized + unsafe fn get_unchecked(&self) -> &T { + debug_assert!(self.once.is_completed()); + &*self.value.get().cast::() + } +} + +impl Drop for OnceLock { + fn drop(&mut self) { + if self.once.is_completed() { + // SAFETY: The inner value has been initialized + unsafe { (*self.value.get()).assume_init_drop() }; + } + } +} diff --git a/external/vendor/crossbeam-utils/src/sync/parker.rs b/external/vendor/crossbeam-utils/src/sync/parker.rs new file mode 100644 index 0000000000..971981d2b7 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/parker.rs @@ -0,0 +1,415 @@ +use crate::primitive::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use crate::primitive::sync::{Arc, Condvar, Mutex}; +use std::fmt; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; + +/// A thread parking primitive. +/// +/// Conceptually, each `Parker` has an associated token which is initially not present: +/// +/// * The [`park`] method blocks the current thread unless or until the token is available, at +/// which point it automatically consumes the token. +/// +/// * The [`park_timeout`] and [`park_deadline`] methods work the same as [`park`], but block for +/// a specified maximum time. +/// +/// * The [`unpark`] method atomically makes the token available if it wasn't already. Because the +/// token is initially absent, [`unpark`] followed by [`park`] will result in the second call +/// returning immediately. +/// +/// In other words, each `Parker` acts a bit like a spinlock that can be locked and unlocked using +/// [`park`] and [`unpark`]. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_utils::sync::Parker; +/// +/// let p = Parker::new(); +/// let u = p.unparker().clone(); +/// +/// // Make the token available. +/// u.unpark(); +/// // Wakes up immediately and consumes the token. +/// p.park(); +/// +/// thread::spawn(move || { +/// thread::sleep(Duration::from_millis(500)); +/// u.unpark(); +/// }); +/// +/// // Wakes up when `u.unpark()` provides the token. +/// p.park(); +/// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +/// ``` +/// +/// [`park`]: Parker::park +/// [`park_timeout`]: Parker::park_timeout +/// [`park_deadline`]: Parker::park_deadline +/// [`unpark`]: Unparker::unpark +pub struct Parker { + unparker: Unparker, + _marker: PhantomData<*const ()>, +} + +unsafe impl Send for Parker {} + +impl Default for Parker { + fn default() -> Self { + Self { + unparker: Unparker { + inner: Arc::new(Inner { + state: AtomicUsize::new(EMPTY), + lock: Mutex::new(()), + cvar: Condvar::new(), + }), + }, + _marker: PhantomData, + } + } +} + +impl Parker { + /// Creates a new `Parker`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// ``` + /// + pub fn new() -> Parker { + Self::default() + } + + /// Blocks the current thread until the token is made available. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// // Make the token available. + /// u.unpark(); + /// + /// // Wakes up immediately and consumes the token. + /// p.park(); + /// ``` + pub fn park(&self) { + self.unparker.inner.park(None); + } + + /// Blocks the current thread until the token is made available, but only for a limited time. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// + /// // Waits for the token to become available, but will not wait longer than 500 ms. + /// p.park_timeout(Duration::from_millis(500)); + /// ``` + pub fn park_timeout(&self, timeout: Duration) { + match Instant::now().checked_add(timeout) { + Some(deadline) => self.park_deadline(deadline), + None => self.park(), + } + } + + /// Blocks the current thread until the token is made available, or until a certain deadline. + /// + /// # Examples + /// + /// ``` + /// use std::time::{Duration, Instant}; + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let deadline = Instant::now() + Duration::from_millis(500); + /// + /// // Waits for the token to become available, but will not wait longer than 500 ms. + /// p.park_deadline(deadline); + /// ``` + pub fn park_deadline(&self, deadline: Instant) { + self.unparker.inner.park(Some(deadline)) + } + + /// Returns a reference to an associated [`Unparker`]. + /// + /// The returned [`Unparker`] doesn't have to be used by reference - it can also be cloned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// // Make the token available. + /// u.unpark(); + /// // Wakes up immediately and consumes the token. + /// p.park(); + /// ``` + /// + /// [`park`]: Parker::park + /// [`park_timeout`]: Parker::park_timeout + pub fn unparker(&self) -> &Unparker { + &self.unparker + } + + /// Converts a `Parker` into a raw pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let raw = Parker::into_raw(p); + /// # let _ = unsafe { Parker::from_raw(raw) }; + /// ``` + pub fn into_raw(this: Parker) -> *const () { + Unparker::into_raw(this.unparker) + } + + /// Converts a raw pointer into a `Parker`. + /// + /// # Safety + /// + /// This method is safe to use only with pointers returned by [`Parker::into_raw`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let raw = Parker::into_raw(p); + /// let p = unsafe { Parker::from_raw(raw) }; + /// ``` + pub unsafe fn from_raw(ptr: *const ()) -> Parker { + Parker { + unparker: Unparker::from_raw(ptr), + _marker: PhantomData, + } + } +} + +impl fmt::Debug for Parker { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Parker { .. }") + } +} + +/// Unparks a thread parked by the associated [`Parker`]. +pub struct Unparker { + inner: Arc, +} + +unsafe impl Send for Unparker {} +unsafe impl Sync for Unparker {} + +impl Unparker { + /// Atomically makes the token available if it is not already. + /// + /// This method will wake up the thread blocked on [`park`] or [`park_timeout`], if there is + /// any. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_millis(500)); + /// u.unpark(); + /// }); + /// + /// // Wakes up when `u.unpark()` provides the token. + /// p.park(); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + /// + /// [`park`]: Parker::park + /// [`park_timeout`]: Parker::park_timeout + pub fn unpark(&self) { + self.inner.unpark() + } + + /// Converts an `Unparker` into a raw pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::{Parker, Unparker}; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// let raw = Unparker::into_raw(u); + /// # let _ = unsafe { Unparker::from_raw(raw) }; + /// ``` + pub fn into_raw(this: Unparker) -> *const () { + Arc::into_raw(this.inner).cast::<()>() + } + + /// Converts a raw pointer into an `Unparker`. + /// + /// # Safety + /// + /// This method is safe to use only with pointers returned by [`Unparker::into_raw`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::{Parker, Unparker}; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// let raw = Unparker::into_raw(u); + /// let u = unsafe { Unparker::from_raw(raw) }; + /// ``` + pub unsafe fn from_raw(ptr: *const ()) -> Unparker { + Unparker { + inner: Arc::from_raw(ptr.cast::()), + } + } +} + +impl fmt::Debug for Unparker { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Unparker { .. }") + } +} + +impl Clone for Unparker { + fn clone(&self) -> Unparker { + Unparker { + inner: self.inner.clone(), + } + } +} + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +struct Inner { + state: AtomicUsize, + lock: Mutex<()>, + cvar: Condvar, +} + +impl Inner { + fn park(&self, deadline: Option) { + // If we were previously notified then we consume this notification and return quickly. + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + return; + } + + // If the timeout is zero, then there is no need to actually block. + if let Some(deadline) = deadline { + if deadline <= Instant::now() { + return; + } + } + + // Otherwise we need to coordinate going to sleep. + let mut m = self.lock.lock().unwrap(); + + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + // Consume this notification to avoid spurious wakeups in the next park. + Err(NOTIFIED) => { + // We must read `state` here, even though we know it will be `NOTIFIED`. This is + // because `unpark` may have been called again since we read `NOTIFIED` in the + // `compare_exchange` above. We must perform an acquire operation that synchronizes + // with that `unpark` to observe any writes it made before the call to `unpark`. To + // do that we must read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } + Err(n) => panic!("inconsistent park_timeout state: {}", n), + } + + loop { + // Block the current thread on the conditional variable. + m = match deadline { + None => self.cvar.wait(m).unwrap(), + Some(deadline) => { + let now = Instant::now(); + if now < deadline { + // We could check for a timeout here, in the return value of wait_timeout, + // but in the case that a timeout and an unpark arrive simultaneously, we + // prefer to report the former. + self.cvar.wait_timeout(m, deadline - now).unwrap().0 + } else { + // We've timed out; swap out the state back to empty on our way out + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED | PARKED => return, + n => panic!("inconsistent park_timeout state: {}", n), + }; + } + } + }; + + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + // got a notification + return; + } + + // Spurious wakeup, go back to sleep. Alternatively, if we timed out, it will be caught + // in the branch above, when we discover the deadline is in the past + } + } + + pub(crate) fn unpark(&self) { + // To ensure the unparked thread will observe any writes we made before this call, we must + // perform a release operation that `park` can synchronize with. To do that we must write + // `NOTIFIED` even if `state` is already `NOTIFIED`. That is why this must be a swap rather + // than a compare-and-swap that returns if it reads `NOTIFIED` on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, // no one was waiting + NOTIFIED => return, // already unparked + PARKED => {} // gotta go wake someone up + _ => panic!("inconsistent state in unpark"), + } + + // There is a period between when the parked thread sets `state` to `PARKED` (or last + // checked `state` in the case of a spurious wakeup) and when it actually waits on `cvar`. + // If we were to notify during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has `lock` locked at this + // stage so we can acquire `lock` to wait until it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the parked thread wakes + // it doesn't get woken only to have to wait for us to release `lock`. + drop(self.lock.lock().unwrap()); + self.cvar.notify_one(); + } +} diff --git a/external/vendor/crossbeam-utils/src/sync/sharded_lock.rs b/external/vendor/crossbeam-utils/src/sync/sharded_lock.rs new file mode 100644 index 0000000000..629b97598e --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/sharded_lock.rs @@ -0,0 +1,638 @@ +use std::boxed::Box; +use std::cell::UnsafeCell; +use std::collections::HashMap; +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::sync::{LockResult, PoisonError, TryLockError, TryLockResult}; +use std::sync::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use std::thread::{self, ThreadId}; +use std::vec::Vec; + +use crate::sync::once_lock::OnceLock; +use crate::CachePadded; + +/// The number of shards per sharded lock. Must be a power of two. +const NUM_SHARDS: usize = 8; + +/// A shard containing a single reader-writer lock. +struct Shard { + /// The inner reader-writer lock. + lock: RwLock<()>, + + /// The write-guard keeping this shard locked. + /// + /// Write operations will lock each shard and store the guard here. These guards get dropped at + /// the same time the big guard is dropped. + write_guard: UnsafeCell>>, +} + +/// A sharded reader-writer lock. +/// +/// This lock is equivalent to [`RwLock`], except read operations are faster and write operations +/// are slower. +/// +/// A `ShardedLock` is internally made of a list of *shards*, each being a [`RwLock`] occupying a +/// single cache line. Read operations will pick one of the shards depending on the current thread +/// and lock it. Write operations need to lock all shards in succession. +/// +/// By splitting the lock into shards, concurrent read operations will in most cases choose +/// different shards and thus update different cache lines, which is good for scalability. However, +/// write operations need to do more work and are therefore slower than usual. +/// +/// The priority policy of the lock is dependent on the underlying operating system's +/// implementation, and this type does not guarantee that any particular policy will be used. +/// +/// # Poisoning +/// +/// A `ShardedLock`, like [`RwLock`], will become poisoned on a panic. Note that it may only be +/// poisoned if a panic occurs while a write operation is in progress. If a panic occurs in any +/// read operation, the lock will not be poisoned. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::sync::ShardedLock; +/// +/// let lock = ShardedLock::new(5); +/// +/// // Any number of read locks can be held at once. +/// { +/// let r1 = lock.read().unwrap(); +/// let r2 = lock.read().unwrap(); +/// assert_eq!(*r1, 5); +/// assert_eq!(*r2, 5); +/// } // Read locks are dropped at this point. +/// +/// // However, only one write lock may be held. +/// { +/// let mut w = lock.write().unwrap(); +/// *w += 1; +/// assert_eq!(*w, 6); +/// } // Write lock is dropped here. +/// ``` +/// +/// [`RwLock`]: std::sync::RwLock +pub struct ShardedLock { + /// A list of locks protecting the internal data. + shards: Box<[CachePadded]>, + + /// The internal data. + value: UnsafeCell, +} + +unsafe impl Send for ShardedLock {} +unsafe impl Sync for ShardedLock {} + +impl UnwindSafe for ShardedLock {} +impl RefUnwindSafe for ShardedLock {} + +impl ShardedLock { + /// Creates a new sharded reader-writer lock. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(5); + /// ``` + pub fn new(value: T) -> ShardedLock { + ShardedLock { + shards: (0..NUM_SHARDS) + .map(|_| { + CachePadded::new(Shard { + lock: RwLock::new(()), + write_guard: UnsafeCell::new(None), + }) + }) + .collect::>(), + value: UnsafeCell::new(value), + } + } + + /// Consumes this lock, returning the underlying data. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(String::new()); + /// { + /// let mut s = lock.write().unwrap(); + /// *s = "modified".to_owned(); + /// } + /// assert_eq!(lock.into_inner().unwrap(), "modified"); + /// ``` + pub fn into_inner(self) -> LockResult { + let is_poisoned = self.is_poisoned(); + let inner = self.value.into_inner(); + + if is_poisoned { + Err(PoisonError::new(inner)) + } else { + Ok(inner) + } + } +} + +impl ShardedLock { + /// Returns `true` if the lock is poisoned. + /// + /// If another thread can still access the lock, it may become poisoned at any time. A `false` + /// result should not be trusted without additional synchronization. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// use std::sync::Arc; + /// use std::thread; + /// + /// let lock = Arc::new(ShardedLock::new(0)); + /// let c_lock = lock.clone(); + /// + /// let _ = thread::spawn(move || { + /// let _lock = c_lock.write().unwrap(); + /// panic!(); // the lock gets poisoned + /// }).join(); + /// assert_eq!(lock.is_poisoned(), true); + /// ``` + pub fn is_poisoned(&self) -> bool { + self.shards[0].lock.is_poisoned() + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the lock mutably, no actual locking needs to take place. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let mut lock = ShardedLock::new(0); + /// *lock.get_mut().unwrap() = 10; + /// assert_eq!(*lock.read().unwrap(), 10); + /// ``` + pub fn get_mut(&mut self) -> LockResult<&mut T> { + let is_poisoned = self.is_poisoned(); + let inner = unsafe { &mut *self.value.get() }; + + if is_poisoned { + Err(PoisonError::new(inner)) + } else { + Ok(inner) + } + } + + /// Attempts to acquire this lock with shared read access. + /// + /// If the access could not be granted at this time, an error is returned. Otherwise, a guard + /// is returned which will release the shared access when it is dropped. This method does not + /// provide any guarantees with respect to the ordering of whether contentious readers or + /// writers will acquire the lock first. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(1); + /// + /// match lock.try_read() { + /// Ok(n) => assert_eq!(*n, 1), + /// Err(_) => unreachable!(), + /// }; + /// ``` + pub fn try_read(&self) -> TryLockResult> { + // Take the current thread index and map it to a shard index. Thread indices will tend to + // distribute shards among threads equally, thus reducing contention due to read-locking. + let current_index = current_index().unwrap_or(0); + let shard_index = current_index & (self.shards.len() - 1); + + match self.shards[shard_index].lock.try_read() { + Ok(guard) => Ok(ShardedLockReadGuard { + lock: self, + _guard: guard, + _marker: PhantomData, + }), + Err(TryLockError::Poisoned(err)) => { + let guard = ShardedLockReadGuard { + lock: self, + _guard: err.into_inner(), + _marker: PhantomData, + }; + Err(TryLockError::Poisoned(PoisonError::new(guard))) + } + Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock), + } + } + + /// Locks with shared read access, blocking the current thread until it can be acquired. + /// + /// The calling thread will be blocked until there are no more writers which hold the lock. + /// There may be other readers currently inside the lock when this method returns. This method + /// does not provide any guarantees with respect to the ordering of whether contentious readers + /// or writers will acquire the lock first. + /// + /// Returns a guard which will release the shared access when dropped. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Panics + /// + /// This method might panic when called if the lock is already held by the current thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// use std::sync::Arc; + /// use std::thread; + /// + /// let lock = Arc::new(ShardedLock::new(1)); + /// let c_lock = lock.clone(); + /// + /// let n = lock.read().unwrap(); + /// assert_eq!(*n, 1); + /// + /// thread::spawn(move || { + /// let r = c_lock.read(); + /// assert!(r.is_ok()); + /// }).join().unwrap(); + /// ``` + pub fn read(&self) -> LockResult> { + // Take the current thread index and map it to a shard index. Thread indices will tend to + // distribute shards among threads equally, thus reducing contention due to read-locking. + let current_index = current_index().unwrap_or(0); + let shard_index = current_index & (self.shards.len() - 1); + + match self.shards[shard_index].lock.read() { + Ok(guard) => Ok(ShardedLockReadGuard { + lock: self, + _guard: guard, + _marker: PhantomData, + }), + Err(err) => Err(PoisonError::new(ShardedLockReadGuard { + lock: self, + _guard: err.into_inner(), + _marker: PhantomData, + })), + } + } + + /// Attempts to acquire this lock with exclusive write access. + /// + /// If the access could not be granted at this time, an error is returned. Otherwise, a guard + /// is returned which will release the exclusive access when it is dropped. This method does + /// not provide any guarantees with respect to the ordering of whether contentious readers or + /// writers will acquire the lock first. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(1); + /// + /// let n = lock.read().unwrap(); + /// assert_eq!(*n, 1); + /// + /// assert!(lock.try_write().is_err()); + /// ``` + pub fn try_write(&self) -> TryLockResult> { + let mut poisoned = false; + let mut blocked = None; + + // Write-lock each shard in succession. + for (i, shard) in self.shards.iter().enumerate() { + let guard = match shard.lock.try_write() { + Ok(guard) => guard, + Err(TryLockError::Poisoned(err)) => { + poisoned = true; + err.into_inner() + } + Err(TryLockError::WouldBlock) => { + blocked = Some(i); + break; + } + }; + + // Store the guard into the shard. + unsafe { + let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); + let dest: *mut _ = shard.write_guard.get(); + *dest = Some(guard); + } + } + + if let Some(i) = blocked { + // Unlock the shards in reverse order of locking. + for shard in self.shards[0..i].iter().rev() { + unsafe { + let dest: *mut _ = shard.write_guard.get(); + let guard = (*dest).take(); + drop(guard); + } + } + Err(TryLockError::WouldBlock) + } else if poisoned { + let guard = ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }; + Err(TryLockError::Poisoned(PoisonError::new(guard))) + } else { + Ok(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }) + } + } + + /// Locks with exclusive write access, blocking the current thread until it can be acquired. + /// + /// The calling thread will be blocked until there are no more writers which hold the lock. + /// There may be other readers currently inside the lock when this method returns. This method + /// does not provide any guarantees with respect to the ordering of whether contentious readers + /// or writers will acquire the lock first. + /// + /// Returns a guard which will release the exclusive access when dropped. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Panics + /// + /// This method might panic when called if the lock is already held by the current thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(1); + /// + /// let mut n = lock.write().unwrap(); + /// *n = 2; + /// + /// assert!(lock.try_read().is_err()); + /// ``` + pub fn write(&self) -> LockResult> { + let mut poisoned = false; + + // Write-lock each shard in succession. + for shard in self.shards.iter() { + let guard = match shard.lock.write() { + Ok(guard) => guard, + Err(err) => { + poisoned = true; + err.into_inner() + } + }; + + // Store the guard into the shard. + unsafe { + let guard: RwLockWriteGuard<'_, ()> = guard; + let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); + let dest: *mut _ = shard.write_guard.get(); + *dest = Some(guard); + } + } + + if poisoned { + Err(PoisonError::new(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + })) + } else { + Ok(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }) + } + } +} + +impl fmt::Debug for ShardedLock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_read() { + Ok(guard) => f + .debug_struct("ShardedLock") + .field("data", &&*guard) + .finish(), + Err(TryLockError::Poisoned(err)) => f + .debug_struct("ShardedLock") + .field("data", &&**err.get_ref()) + .finish(), + Err(TryLockError::WouldBlock) => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("") + } + } + f.debug_struct("ShardedLock") + .field("data", &LockedPlaceholder) + .finish() + } + } + } +} + +impl Default for ShardedLock { + fn default() -> ShardedLock { + ShardedLock::new(Default::default()) + } +} + +impl From for ShardedLock { + fn from(t: T) -> Self { + ShardedLock::new(t) + } +} + +/// A guard used to release the shared read access of a [`ShardedLock`] when dropped. +#[clippy::has_significant_drop] +pub struct ShardedLockReadGuard<'a, T: ?Sized> { + lock: &'a ShardedLock, + _guard: RwLockReadGuard<'a, ()>, + _marker: PhantomData>, +} + +unsafe impl Sync for ShardedLockReadGuard<'_, T> {} + +impl Deref for ShardedLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.value.get() } + } +} + +impl fmt::Debug for ShardedLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ShardedLockReadGuard") + .field("lock", &self.lock) + .finish() + } +} + +impl fmt::Display for ShardedLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +/// A guard used to release the exclusive write access of a [`ShardedLock`] when dropped. +#[clippy::has_significant_drop] +pub struct ShardedLockWriteGuard<'a, T: ?Sized> { + lock: &'a ShardedLock, + _marker: PhantomData>, +} + +unsafe impl Sync for ShardedLockWriteGuard<'_, T> {} + +impl Drop for ShardedLockWriteGuard<'_, T> { + fn drop(&mut self) { + // Unlock the shards in reverse order of locking. + for shard in self.lock.shards.iter().rev() { + unsafe { + let dest: *mut _ = shard.write_guard.get(); + let guard = (*dest).take(); + drop(guard); + } + } + } +} + +impl fmt::Debug for ShardedLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ShardedLockWriteGuard") + .field("lock", &self.lock) + .finish() + } +} + +impl fmt::Display for ShardedLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl Deref for ShardedLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.value.get() } + } +} + +impl DerefMut for ShardedLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.value.get() } + } +} + +/// Returns a `usize` that identifies the current thread. +/// +/// Each thread is associated with an 'index'. While there are no particular guarantees, indices +/// usually tend to be consecutive numbers between 0 and the number of running threads. +/// +/// Since this function accesses TLS, `None` might be returned if the current thread's TLS is +/// tearing down. +#[inline] +fn current_index() -> Option { + REGISTRATION.try_with(|reg| reg.index).ok() +} + +/// The global registry keeping track of registered threads and indices. +struct ThreadIndices { + /// Mapping from `ThreadId` to thread index. + mapping: HashMap, + + /// A list of free indices. + free_list: Vec, + + /// The next index to allocate if the free list is empty. + next_index: usize, +} + +fn thread_indices() -> &'static Mutex { + static THREAD_INDICES: OnceLock> = OnceLock::new(); + fn init() -> Mutex { + Mutex::new(ThreadIndices { + mapping: HashMap::new(), + free_list: Vec::new(), + next_index: 0, + }) + } + THREAD_INDICES.get_or_init(init) +} + +/// A registration of a thread with an index. +/// +/// When dropped, unregisters the thread and frees the reserved index. +struct Registration { + index: usize, + thread_id: ThreadId, +} + +impl Drop for Registration { + fn drop(&mut self) { + let mut indices = thread_indices().lock().unwrap(); + indices.mapping.remove(&self.thread_id); + indices.free_list.push(self.index); + } +} + +std::thread_local! { + static REGISTRATION: Registration = { + let thread_id = thread::current().id(); + let mut indices = thread_indices().lock().unwrap(); + + let index = match indices.free_list.pop() { + Some(i) => i, + None => { + let i = indices.next_index; + indices.next_index += 1; + i + } + }; + indices.mapping.insert(thread_id, index); + + Registration { + index, + thread_id, + } + }; +} diff --git a/external/vendor/crossbeam-utils/src/sync/wait_group.rs b/external/vendor/crossbeam-utils/src/sync/wait_group.rs new file mode 100644 index 0000000000..19d6074157 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/wait_group.rs @@ -0,0 +1,145 @@ +use crate::primitive::sync::{Arc, Condvar, Mutex}; +use std::fmt; + +/// Enables threads to synchronize the beginning or end of some computation. +/// +/// # Wait groups vs barriers +/// +/// `WaitGroup` is very similar to [`Barrier`], but there are a few differences: +/// +/// * [`Barrier`] needs to know the number of threads at construction, while `WaitGroup` is cloned to +/// register more threads. +/// +/// * A [`Barrier`] can be reused even after all threads have synchronized, while a `WaitGroup` +/// synchronizes threads only once. +/// +/// * All threads wait for others to reach the [`Barrier`]. With `WaitGroup`, each thread can choose +/// to either wait for other threads or to continue without blocking. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::sync::WaitGroup; +/// use std::thread; +/// +/// // Create a new wait group. +/// let wg = WaitGroup::new(); +/// +/// for _ in 0..4 { +/// // Create another reference to the wait group. +/// let wg = wg.clone(); +/// +/// thread::spawn(move || { +/// // Do some work. +/// +/// // Drop the reference to the wait group. +/// drop(wg); +/// }); +/// } +/// +/// // Block until all threads have finished their work. +/// wg.wait(); +/// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +/// ``` +/// +/// [`Barrier`]: std::sync::Barrier +pub struct WaitGroup { + inner: Arc, +} + +/// Inner state of a `WaitGroup`. +struct Inner { + cvar: Condvar, + count: Mutex, +} + +impl Default for WaitGroup { + fn default() -> Self { + Self { + inner: Arc::new(Inner { + cvar: Condvar::new(), + count: Mutex::new(1), + }), + } + } +} + +impl WaitGroup { + /// Creates a new wait group and returns the single reference to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::WaitGroup; + /// + /// let wg = WaitGroup::new(); + /// ``` + pub fn new() -> Self { + Self::default() + } + + /// Drops this reference and waits until all other references are dropped. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::WaitGroup; + /// use std::thread; + /// + /// let wg = WaitGroup::new(); + /// + /// thread::spawn({ + /// let wg = wg.clone(); + /// move || { + /// // Block until both threads have reached `wait()`. + /// wg.wait(); + /// } + /// }); + /// + /// // Block until both threads have reached `wait()`. + /// wg.wait(); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + pub fn wait(self) { + if *self.inner.count.lock().unwrap() == 1 { + return; + } + + let inner = self.inner.clone(); + drop(self); + + let mut count = inner.count.lock().unwrap(); + while *count > 0 { + count = inner.cvar.wait(count).unwrap(); + } + } +} + +impl Drop for WaitGroup { + fn drop(&mut self) { + let mut count = self.inner.count.lock().unwrap(); + *count -= 1; + + if *count == 0 { + self.inner.cvar.notify_all(); + } + } +} + +impl Clone for WaitGroup { + fn clone(&self) -> WaitGroup { + let mut count = self.inner.count.lock().unwrap(); + *count += 1; + + WaitGroup { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for WaitGroup { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let count: &usize = &*self.inner.count.lock().unwrap(); + f.debug_struct("WaitGroup").field("count", count).finish() + } +} diff --git a/external/vendor/crossbeam-utils/src/thread.rs b/external/vendor/crossbeam-utils/src/thread.rs new file mode 100644 index 0000000000..847f4cf112 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/thread.rs @@ -0,0 +1,611 @@ +//! Threads that can borrow variables from the stack. +//! +//! Create a scope when spawned threads need to access variables on the stack: +//! +//! ``` +//! use crossbeam_utils::thread; +//! +//! let people = vec![ +//! "Alice".to_string(), +//! "Bob".to_string(), +//! "Carol".to_string(), +//! ]; +//! +//! thread::scope(|s| { +//! for person in &people { +//! s.spawn(move |_| { +//! println!("Hello, {}!", person); +//! }); +//! } +//! }).unwrap(); +//! ``` +//! +//! # Why scoped threads? +//! +//! Suppose we wanted to re-write the previous example using plain threads: +//! +//! ```compile_fail,E0597 +//! use std::thread; +//! +//! let people = vec![ +//! "Alice".to_string(), +//! "Bob".to_string(), +//! "Carol".to_string(), +//! ]; +//! +//! let mut threads = Vec::new(); +//! +//! for person in &people { +//! threads.push(thread::spawn(move || { +//! println!("Hello, {}!", person); +//! })); +//! } +//! +//! for thread in threads { +//! thread.join().unwrap(); +//! } +//! ``` +//! +//! This doesn't work because the borrow checker complains about `people` not living long enough: +//! +//! ```text +//! error[E0597]: `people` does not live long enough +//! --> src/main.rs:12:20 +//! | +//! 12 | for person in &people { +//! | ^^^^^^ borrowed value does not live long enough +//! ... +//! 21 | } +//! | - borrowed value only lives until here +//! | +//! = note: borrowed value must be valid for the static lifetime... +//! ``` +//! +//! The problem here is that spawned threads are not allowed to borrow variables on stack because +//! the compiler cannot prove they will be joined before `people` is destroyed. +//! +//! Scoped threads are a mechanism to guarantee to the compiler that spawned threads will be joined +//! before the scope ends. +//! +//! # How scoped threads work +//! +//! If a variable is borrowed by a thread, the thread must complete before the variable is +//! destroyed. Threads spawned using [`std::thread::spawn`] can only borrow variables with the +//! `'static` lifetime because the borrow checker cannot be sure when the thread will complete. +//! +//! A scope creates a clear boundary between variables outside the scope and threads inside the +//! scope. Whenever a scope spawns a thread, it promises to join the thread before the scope ends. +//! This way we guarantee to the borrow checker that scoped threads only live within the scope and +//! can safely access variables outside it. +//! +//! # Nesting scoped threads +//! +//! Sometimes scoped threads need to spawn more threads within the same scope. This is a little +//! tricky because argument `s` lives *inside* the invocation of `thread::scope()` and as such +//! cannot be borrowed by scoped threads: +//! +//! ```compile_fail,E0521 +//! use crossbeam_utils::thread; +//! +//! thread::scope(|s| { +//! s.spawn(|_| { +//! // Not going to compile because we're trying to borrow `s`, +//! // which lives *inside* the scope! :( +//! s.spawn(|_| println!("nested thread")); +//! }); +//! }); +//! ``` +//! +//! Fortunately, there is a solution. Every scoped thread is passed a reference to its scope as an +//! argument, which can be used for spawning nested threads: +//! +//! ``` +//! use crossbeam_utils::thread; +//! +//! thread::scope(|s| { +//! // Note the `|s|` here. +//! s.spawn(|s| { +//! // Yay, this works because we're using a fresh argument `s`! :) +//! s.spawn(|_| println!("nested thread")); +//! }); +//! }).unwrap(); +//! ``` + +use std::boxed::Box; +use std::fmt; +use std::io; +use std::marker::PhantomData; +use std::mem; +use std::panic; +use std::string::String; +use std::sync::{Arc, Mutex}; +use std::thread; +use std::vec::Vec; + +use crate::sync::WaitGroup; + +type SharedVec = Arc>>; +type SharedOption = Arc>>; + +/// Creates a new scope for spawning threads. +/// +/// All child threads that haven't been manually joined will be automatically joined just before +/// this function invocation ends. If all joined threads have successfully completed, `Ok` is +/// returned with the return value of `f`. If any of the joined threads has panicked, an `Err` is +/// returned containing errors from panicked threads. Note that if panics are implemented by +/// aborting the process, no error is returned; see the notes of [std::panic::catch_unwind]. +/// +/// **Note:** Since Rust 1.63, this function is soft-deprecated in favor of the more efficient [`std::thread::scope`]. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::thread; +/// +/// let var = vec![1, 2, 3]; +/// +/// thread::scope(|s| { +/// s.spawn(|_| { +/// println!("A child thread borrowing `var`: {:?}", var); +/// }); +/// }).unwrap(); +/// ``` +pub fn scope<'env, F, R>(f: F) -> thread::Result +where + F: FnOnce(&Scope<'env>) -> R, +{ + struct AbortOnPanic; + impl Drop for AbortOnPanic { + fn drop(&mut self) { + if thread::panicking() { + std::process::abort(); + } + } + } + + let wg = WaitGroup::new(); + let scope = Scope::<'env> { + handles: SharedVec::default(), + wait_group: wg.clone(), + _marker: PhantomData, + }; + + // Execute the scoped function, but catch any panics. + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&scope))); + + // If an unwinding panic occurs before all threads are joined + // promote it to an aborting panic to prevent any threads from escaping the scope. + let guard = AbortOnPanic; + + // Wait until all nested scopes are dropped. + drop(scope.wait_group); + wg.wait(); + + // Join all remaining spawned threads. + let panics: Vec<_> = scope + .handles + .lock() + .unwrap() + // Filter handles that haven't been joined, join them, and collect errors. + .drain(..) + .filter_map(|handle| handle.lock().unwrap().take()) + .filter_map(|handle| handle.join().err()) + .collect(); + + mem::forget(guard); + + // If `f` has panicked, resume unwinding. + // If any of the child threads have panicked, return the panic errors. + // Otherwise, everything is OK and return the result of `f`. + match result { + Err(err) => panic::resume_unwind(err), + Ok(res) => { + if panics.is_empty() { + Ok(res) + } else { + Err(Box::new(panics)) + } + } + } +} + +/// A scope for spawning threads. +pub struct Scope<'env> { + /// The list of the thread join handles. + handles: SharedVec>>, + + /// Used to wait until all subscopes all dropped. + wait_group: WaitGroup, + + /// Borrows data with invariant lifetime `'env`. + _marker: PhantomData<&'env mut &'env ()>, +} + +unsafe impl Sync for Scope<'_> {} + +impl<'env> Scope<'env> { + /// Spawns a scoped thread. + /// + /// This method is similar to the [`spawn`] function in Rust's standard library. The difference + /// is that this thread is scoped, meaning it's guaranteed to terminate before the scope exits, + /// allowing it to reference variables outside the scope. + /// + /// The scoped thread is passed a reference to this scope as an argument, which can be used for + /// spawning nested threads. + /// + /// The returned [handle](ScopedJoinHandle) can be used to manually + /// [join](ScopedJoinHandle::join) the thread before the scope exits. + /// + /// This will create a thread using default parameters of [`ScopedThreadBuilder`], if you want to specify the + /// stack size or the name of the thread, use this API instead. + /// + /// [`spawn`]: std::thread::spawn + /// + /// # Panics + /// + /// Panics if the OS fails to create a thread; use [`ScopedThreadBuilder::spawn`] + /// to recover from such errors. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle = s.spawn(|_| { + /// println!("A child thread is running"); + /// 42 + /// }); + /// + /// // Join the thread and retrieve its result. + /// let res = handle.join().unwrap(); + /// assert_eq!(res, 42); + /// }).unwrap(); + /// ``` + pub fn spawn<'scope, F, T>(&'scope self, f: F) -> ScopedJoinHandle<'scope, T> + where + F: FnOnce(&Scope<'env>) -> T, + F: Send + 'env, + T: Send + 'env, + { + self.builder() + .spawn(f) + .expect("failed to spawn scoped thread") + } + + /// Creates a builder that can configure a thread before spawning. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// s.builder() + /// .spawn(|_| println!("A child thread is running")) + /// .unwrap(); + /// }).unwrap(); + /// ``` + pub fn builder<'scope>(&'scope self) -> ScopedThreadBuilder<'scope, 'env> { + ScopedThreadBuilder { + scope: self, + builder: thread::Builder::new(), + } + } +} + +impl fmt::Debug for Scope<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Scope { .. }") + } +} + +/// Configures the properties of a new thread. +/// +/// The two configurable properties are: +/// +/// - [`name`]: Specifies an [associated name for the thread][naming-threads]. +/// - [`stack_size`]: Specifies the [desired stack size for the thread][stack-size]. +/// +/// The [`spawn`] method will take ownership of the builder and return an [`io::Result`] of the +/// thread handle with the given configuration. +/// +/// The [`Scope::spawn`] method uses a builder with default configuration and unwraps its return +/// value. You may want to use this builder when you want to recover from a failure to launch a +/// thread. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::thread; +/// +/// thread::scope(|s| { +/// s.builder() +/// .spawn(|_| println!("Running a child thread")) +/// .unwrap(); +/// }).unwrap(); +/// ``` +/// +/// [`name`]: ScopedThreadBuilder::name +/// [`stack_size`]: ScopedThreadBuilder::stack_size +/// [`spawn`]: ScopedThreadBuilder::spawn +/// [`io::Result`]: std::io::Result +/// [naming-threads]: std::thread#naming-threads +/// [stack-size]: std::thread#stack-size +#[derive(Debug)] +pub struct ScopedThreadBuilder<'scope, 'env> { + scope: &'scope Scope<'env>, + builder: thread::Builder, +} + +impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { + /// Sets the name for the new thread. + /// + /// The name must not contain null bytes (`\0`). + /// + /// For more information about named threads, see [here][naming-threads]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// use std::thread::current; + /// + /// thread::scope(|s| { + /// s.builder() + /// .name("my thread".to_string()) + /// .spawn(|_| assert_eq!(current().name(), Some("my thread"))) + /// .unwrap(); + /// }).unwrap(); + /// ``` + /// + /// [naming-threads]: std::thread#naming-threads + pub fn name(mut self, name: String) -> ScopedThreadBuilder<'scope, 'env> { + self.builder = self.builder.name(name); + self + } + + /// Sets the size of the stack for the new thread. + /// + /// The stack size is measured in bytes. + /// + /// For more information about the stack size for threads, see [here][stack-size]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// s.builder() + /// .stack_size(32 * 1024) + /// .spawn(|_| println!("Running a child thread")) + /// .unwrap(); + /// }).unwrap(); + /// ``` + /// + /// [stack-size]: std::thread#stack-size + pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'scope, 'env> { + self.builder = self.builder.stack_size(size); + self + } + + /// Spawns a scoped thread with this configuration. + /// + /// The scoped thread is passed a reference to this scope as an argument, which can be used for + /// spawning nested threads. + /// + /// The returned handle can be used to manually join the thread before the scope exits. + /// + /// # Errors + /// + /// Unlike the [`Scope::spawn`] method, this method yields an + /// [`io::Result`] to capture any failure to create the thread at + /// the OS level. + /// + /// [`io::Result`]: std::io::Result + /// + /// # Panics + /// + /// Panics if a thread name was set and it contained null bytes. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle = s.builder() + /// .spawn(|_| { + /// println!("A child thread is running"); + /// 42 + /// }) + /// .unwrap(); + /// + /// // Join the thread and retrieve its result. + /// let res = handle.join().unwrap(); + /// assert_eq!(res, 42); + /// }).unwrap(); + /// ``` + pub fn spawn(self, f: F) -> io::Result> + where + F: FnOnce(&Scope<'env>) -> T, + F: Send + 'env, + T: Send + 'env, + { + // The result of `f` will be stored here. + let result = SharedOption::default(); + + // Spawn the thread and grab its join handle and thread handle. + let (handle, thread) = { + let result = Arc::clone(&result); + + // A clone of the scope that will be moved into the new thread. + let scope = Scope::<'env> { + handles: Arc::clone(&self.scope.handles), + wait_group: self.scope.wait_group.clone(), + _marker: PhantomData, + }; + + // Spawn the thread. + let handle = { + let closure = move || { + // Make sure the scope is inside the closure with the proper `'env` lifetime. + let scope: Scope<'env> = scope; + + // Run the closure. + let res = f(&scope); + + // Store the result if the closure didn't panic. + *result.lock().unwrap() = Some(res); + }; + + // Allocate `closure` on the heap and erase the `'env` bound. + let closure: Box = Box::new(closure); + let closure: Box = + unsafe { mem::transmute(closure) }; + + // Finally, spawn the closure. + self.builder.spawn(closure)? + }; + + let thread = handle.thread().clone(); + let handle = Arc::new(Mutex::new(Some(handle))); + (handle, thread) + }; + + // Add the handle to the shared list of join handles. + self.scope.handles.lock().unwrap().push(Arc::clone(&handle)); + + Ok(ScopedJoinHandle { + handle, + result, + thread, + _marker: PhantomData, + }) + } +} + +unsafe impl Send for ScopedJoinHandle<'_, T> {} +unsafe impl Sync for ScopedJoinHandle<'_, T> {} + +/// A handle that can be used to join its scoped thread. +/// +/// This struct is created by the [`Scope::spawn`] method and the +/// [`ScopedThreadBuilder::spawn`] method. +pub struct ScopedJoinHandle<'scope, T> { + /// A join handle to the spawned thread. + handle: SharedOption>, + + /// Holds the result of the inner closure. + result: SharedOption, + + /// A handle to the spawned thread. + thread: thread::Thread, + + /// Borrows the parent scope with lifetime `'scope`. + _marker: PhantomData<&'scope ()>, +} + +impl ScopedJoinHandle<'_, T> { + /// Waits for the thread to finish and returns its result. + /// + /// If the child thread panics, an error is returned. Note that if panics are implemented by + /// aborting the process, no error is returned; see the notes of [std::panic::catch_unwind]. + /// + /// # Panics + /// + /// This function may panic on some platforms if a thread attempts to join itself or otherwise + /// may create a deadlock with joining threads. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle1 = s.spawn(|_| println!("I'm a happy thread :)")); + /// let handle2 = s.spawn(|_| panic!("I'm a sad thread :(")); + /// + /// // Join the first thread and verify that it succeeded. + /// let res = handle1.join(); + /// assert!(res.is_ok()); + /// + /// // Join the second thread and verify that it panicked. + /// let res = handle2.join(); + /// assert!(res.is_err()); + /// }).unwrap(); + /// ``` + pub fn join(self) -> thread::Result { + // Take out the handle. The handle will surely be available because the root scope waits + // for nested scopes before joining remaining threads. + let handle = self.handle.lock().unwrap().take().unwrap(); + + // Join the thread and then take the result out of its inner closure. + handle + .join() + .map(|()| self.result.lock().unwrap().take().unwrap()) + } + + /// Returns a handle to the underlying thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle = s.spawn(|_| println!("A child thread is running")); + /// println!("The child thread ID: {:?}", handle.thread().id()); + /// }).unwrap(); + /// ``` + pub fn thread(&self) -> &thread::Thread { + &self.thread + } +} + +/// Unix-specific extensions. +#[cfg(unix)] +mod unix { + use super::ScopedJoinHandle; + use std::os::unix::thread::{JoinHandleExt, RawPthread}; + + impl JoinHandleExt for ScopedJoinHandle<'_, T> { + fn as_pthread_t(&self) -> RawPthread { + // Borrow the handle. The handle will surely be available because the root scope waits + // for nested scopes before joining remaining threads. + let handle = self.handle.lock().unwrap(); + handle.as_ref().unwrap().as_pthread_t() + } + fn into_pthread_t(self) -> RawPthread { + self.as_pthread_t() + } + } +} +/// Windows-specific extensions. +#[cfg(windows)] +mod windows { + use super::ScopedJoinHandle; + use std::os::windows::io::{AsRawHandle, IntoRawHandle, RawHandle}; + + impl AsRawHandle for ScopedJoinHandle<'_, T> { + fn as_raw_handle(&self) -> RawHandle { + // Borrow the handle. The handle will surely be available because the root scope waits + // for nested scopes before joining remaining threads. + let handle = self.handle.lock().unwrap(); + handle.as_ref().unwrap().as_raw_handle() + } + } + + impl IntoRawHandle for ScopedJoinHandle<'_, T> { + fn into_raw_handle(self) -> RawHandle { + self.as_raw_handle() + } + } +} + +impl fmt::Debug for ScopedJoinHandle<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("ScopedJoinHandle { .. }") + } +} diff --git a/external/vendor/crossbeam-utils/tests/atomic_cell.rs b/external/vendor/crossbeam-utils/tests/atomic_cell.rs new file mode 100644 index 0000000000..9fe69328df --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/atomic_cell.rs @@ -0,0 +1,374 @@ +use std::mem; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +use crossbeam_utils::atomic::AtomicCell; + +#[test] +fn is_lock_free() { + struct UsizeWrap(#[allow(dead_code)] usize); + struct U8Wrap(#[allow(dead_code)] bool); + struct I16Wrap(#[allow(dead_code)] i16); + #[repr(align(8))] + struct U64Align8(#[allow(dead_code)] u64); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + assert!(AtomicCell::<()>::is_lock_free()); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + // Sizes of both types must be equal, and the alignment of `u64` must be greater or equal than + // that of `AtomicU64`. In i686-unknown-linux-gnu, the alignment of `u64` is `4` and alignment + // of `AtomicU64` is `8`, so `AtomicCell` is not lock-free. + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(target_has_atomic = "64") && std::mem::align_of::() == 8 + ); + assert_eq!(mem::size_of::(), 8); + assert_eq!(mem::align_of::(), 8); + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(target_has_atomic = "64") + ); + + // AtomicU128 is unstable + assert!(!AtomicCell::::is_lock_free()); +} + +#[test] +fn const_is_lock_free() { + const _U: bool = AtomicCell::::is_lock_free(); + const _I: bool = AtomicCell::::is_lock_free(); +} + +#[test] +fn drops_unit() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(); + + impl Foo { + fn new() -> Foo { + CNT.fetch_add(1, SeqCst); + Foo() + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new() + } + } + + let a = AtomicCell::new(Foo::new()); + + assert_eq!(a.swap(Foo::new()), Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn drops_u8() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(u8); + + impl Foo { + fn new(val: u8) -> Foo { + CNT.fetch_add(1, SeqCst); + Foo(val) + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new(0) + } + } + + let a = AtomicCell::new(Foo::new(5)); + + assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); + assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(0)); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn drops_usize() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(usize); + + impl Foo { + fn new(val: usize) -> Foo { + CNT.fetch_add(1, SeqCst); + Foo(val) + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new(0) + } + } + + let a = AtomicCell::new(Foo::new(5)); + + assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); + assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(0)); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn modular_u8() { + #[derive(Clone, Copy, Eq, Debug, Default)] + struct Foo(u8); + + impl PartialEq for Foo { + fn eq(&self, other: &Foo) -> bool { + self.0 % 5 == other.0 % 5 + } + } + + let a = AtomicCell::new(Foo(1)); + + assert_eq!(a.load(), Foo(1)); + assert_eq!(a.swap(Foo(2)), Foo(11)); + assert_eq!(a.load(), Foo(52)); + + a.store(Foo(0)); + assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); + assert_eq!(a.load().0, 5); + assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); + assert_eq!(a.load().0, 15); +} + +#[test] +fn modular_usize() { + #[derive(Clone, Copy, Eq, Debug, Default)] + struct Foo(usize); + + impl PartialEq for Foo { + fn eq(&self, other: &Foo) -> bool { + self.0 % 5 == other.0 % 5 + } + } + + let a = AtomicCell::new(Foo(1)); + + assert_eq!(a.load(), Foo(1)); + assert_eq!(a.swap(Foo(2)), Foo(11)); + assert_eq!(a.load(), Foo(52)); + + a.store(Foo(0)); + assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); + assert_eq!(a.load().0, 5); + assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); + assert_eq!(a.load().0, 15); +} + +#[test] +fn garbage_padding() { + #[derive(Copy, Clone, Eq, PartialEq)] + struct Object { + a: i64, + b: i32, + } + + let cell = AtomicCell::new(Object { a: 0, b: 0 }); + let _garbage = [0xfe, 0xfe, 0xfe, 0xfe, 0xfe]; // Needed + let next = Object { a: 0, b: 0 }; + + let prev = cell.load(); + assert!(cell.compare_exchange(prev, next).is_ok()); + println!(); +} + +#[test] +fn const_atomic_cell_new() { + static CELL: AtomicCell = AtomicCell::new(0); + + CELL.store(1); + assert_eq!(CELL.load(), 1); +} + +// https://github.com/crossbeam-rs/crossbeam/pull/767 +macro_rules! test_arithmetic { + ($test_name:ident, $ty:ident) => { + #[test] + fn $test_name() { + let a: AtomicCell<$ty> = AtomicCell::new(7); + + assert_eq!(a.fetch_add(3), 7); + assert_eq!(a.load(), 10); + + assert_eq!(a.fetch_sub(3), 10); + assert_eq!(a.load(), 7); + + assert_eq!(a.fetch_and(3), 7); + assert_eq!(a.load(), 3); + + assert_eq!(a.fetch_or(16), 3); + assert_eq!(a.load(), 19); + + assert_eq!(a.fetch_xor(2), 19); + assert_eq!(a.load(), 17); + + assert_eq!(a.fetch_max(18), 17); + assert_eq!(a.load(), 18); + + assert_eq!(a.fetch_min(17), 18); + assert_eq!(a.load(), 17); + + assert_eq!(a.fetch_nand(7), 17); + assert_eq!(a.load(), !(17 & 7)); + } + }; +} +test_arithmetic!(arithmetic_u8, u8); +test_arithmetic!(arithmetic_i8, i8); +test_arithmetic!(arithmetic_u16, u16); +test_arithmetic!(arithmetic_i16, i16); +test_arithmetic!(arithmetic_u32, u32); +test_arithmetic!(arithmetic_i32, i32); +test_arithmetic!(arithmetic_u64, u64); +test_arithmetic!(arithmetic_i64, i64); +test_arithmetic!(arithmetic_u128, u128); +test_arithmetic!(arithmetic_i128, i128); + +// https://github.com/crossbeam-rs/crossbeam/issues/748 +#[cfg_attr(miri, ignore)] // TODO +#[test] +fn issue_748() { + #[allow(dead_code)] + #[repr(align(8))] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + enum Test { + Field(u32), + FieldLess, + } + + assert_eq!(mem::size_of::(), 8); + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(target_has_atomic = "64") + ); + let x = AtomicCell::new(Test::FieldLess); + assert_eq!(x.load(), Test::FieldLess); +} + +// https://github.com/crossbeam-rs/crossbeam/issues/833 +#[test] +fn issue_833() { + use std::num::NonZeroU128; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::thread; + + #[cfg(miri)] + const N: usize = 10_000; + #[cfg(not(miri))] + const N: usize = 1_000_000; + + #[allow(dead_code)] + enum Enum { + NeverConstructed, + Cell(AtomicCell), + } + + static STATIC: Enum = Enum::Cell(AtomicCell::new(match NonZeroU128::new(1) { + Some(nonzero) => nonzero, + None => unreachable!(), + })); + static FINISHED: AtomicBool = AtomicBool::new(false); + + let handle = thread::spawn(|| { + let cell = match &STATIC { + Enum::NeverConstructed => unreachable!(), + Enum::Cell(cell) => cell, + }; + let x = NonZeroU128::new(0xFFFF_FFFF_FFFF_FFFF_0000_0000_0000_0000).unwrap(); + let y = NonZeroU128::new(0x0000_0000_0000_0000_FFFF_FFFF_FFFF_FFFF).unwrap(); + while !FINISHED.load(Ordering::Relaxed) { + cell.store(x); + cell.store(y); + } + }); + + for _ in 0..N { + if let Enum::NeverConstructed = STATIC { + unreachable!(":("); + } + } + + FINISHED.store(true, Ordering::Relaxed); + handle.join().unwrap(); +} diff --git a/external/vendor/crossbeam-utils/tests/cache_padded.rs b/external/vendor/crossbeam-utils/tests/cache_padded.rs new file mode 100644 index 0000000000..86e9a7709c --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/cache_padded.rs @@ -0,0 +1,113 @@ +use std::cell::Cell; +use std::mem; + +use crossbeam_utils::CachePadded; + +#[test] +fn default() { + let x: CachePadded = Default::default(); + assert_eq!(*x, 0); +} + +#[test] +fn store_u64() { + let x: CachePadded = CachePadded::new(17); + assert_eq!(*x, 17); +} + +#[test] +fn store_pair() { + let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37)); + assert_eq!(x.0, 17); + assert_eq!(x.1, 37); +} + +#[test] +fn distance() { + let arr = [CachePadded::new(17u8), CachePadded::new(37u8)]; + let a = &*arr[0] as *const u8; + let b = &*arr[1] as *const u8; + let align = mem::align_of::>(); + assert!(align >= 32); + assert_eq!(unsafe { a.add(align) }, b); +} + +#[test] +fn different_sizes() { + CachePadded::new(17u8); + CachePadded::new(17u16); + CachePadded::new(17u32); + CachePadded::new([17u64; 0]); + CachePadded::new([17u64; 1]); + CachePadded::new([17u64; 2]); + CachePadded::new([17u64; 3]); + CachePadded::new([17u64; 4]); + CachePadded::new([17u64; 5]); + CachePadded::new([17u64; 6]); + CachePadded::new([17u64; 7]); + CachePadded::new([17u64; 8]); +} + +#[test] +fn large() { + let a = [17u64; 9]; + let b = CachePadded::new(a); + assert!(mem::size_of_val(&a) <= mem::size_of_val(&b)); +} + +#[test] +fn debug() { + assert_eq!( + format!("{:?}", CachePadded::new(17u64)), + "CachePadded { value: 17 }" + ); +} + +#[test] +fn drops() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Drop for Foo<'a> { + fn drop(&mut self) { + self.0.set(self.0.get() + 1); + } + } + + let a = CachePadded::new(Foo(&count)); + let b = CachePadded::new(Foo(&count)); + + assert_eq!(count.get(), 0); + drop(a); + assert_eq!(count.get(), 1); + drop(b); + assert_eq!(count.get(), 2); +} + +#[allow(clippy::clone_on_copy)] // This is intentional. +#[test] +fn clone() { + let a = CachePadded::new(17); + let b = a.clone(); + assert_eq!(*a, *b); +} + +#[test] +fn runs_custom_clone() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Clone for Foo<'a> { + fn clone(&self) -> Foo<'a> { + self.0.set(self.0.get() + 1); + Foo::<'a>(self.0) + } + } + + let a = CachePadded::new(Foo(&count)); + let _ = a.clone(); + + assert_eq!(count.get(), 1); +} diff --git a/external/vendor/crossbeam-utils/tests/parker.rs b/external/vendor/crossbeam-utils/tests/parker.rs new file mode 100644 index 0000000000..2bf9c37d49 --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/parker.rs @@ -0,0 +1,41 @@ +use std::thread::sleep; +use std::time::Duration; +use std::u32; + +use crossbeam_utils::sync::Parker; +use crossbeam_utils::thread; + +#[test] +fn park_timeout_unpark_before() { + let p = Parker::new(); + for _ in 0..10 { + p.unparker().unpark(); + p.park_timeout(Duration::from_millis(u32::MAX as u64)); + } +} + +#[test] +fn park_timeout_unpark_not_called() { + let p = Parker::new(); + for _ in 0..10 { + p.park_timeout(Duration::from_millis(10)) + } +} + +#[test] +fn park_timeout_unpark_called_other_thread() { + for _ in 0..10 { + let p = Parker::new(); + let u = p.unparker().clone(); + + thread::scope(|scope| { + scope.spawn(move |_| { + sleep(Duration::from_millis(50)); + u.unpark(); + }); + + p.park_timeout(Duration::from_millis(u32::MAX as u64)) + }) + .unwrap(); + } +} diff --git a/external/vendor/crossbeam-utils/tests/sharded_lock.rs b/external/vendor/crossbeam-utils/tests/sharded_lock.rs new file mode 100644 index 0000000000..002f7f5e19 --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/sharded_lock.rs @@ -0,0 +1,252 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc::channel; +use std::sync::{Arc, TryLockError}; +use std::thread; + +use crossbeam_utils::sync::ShardedLock; +use rand::Rng; + +#[derive(Eq, PartialEq, Debug)] +struct NonCopy(i32); + +#[test] +fn smoke() { + let l = ShardedLock::new(()); + drop(l.read().unwrap()); + drop(l.write().unwrap()); + drop((l.read().unwrap(), l.read().unwrap())); + drop(l.write().unwrap()); +} + +#[test] +fn frob() { + const N: u32 = 10; + #[cfg(miri)] + const M: usize = 50; + #[cfg(not(miri))] + const M: usize = 1000; + + let r = Arc::new(ShardedLock::new(())); + + let (tx, rx) = channel::<()>(); + for _ in 0..N { + let tx = tx.clone(); + let r = r.clone(); + thread::spawn(move || { + let mut rng = rand::thread_rng(); + for _ in 0..M { + if rng.gen_bool(1.0 / (N as f64)) { + drop(r.write().unwrap()); + } else { + drop(r.read().unwrap()); + } + } + drop(tx); + }); + } + drop(tx); + let _ = rx.recv(); +} + +#[test] +fn arc_poison_wr() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }) + .join(); + assert!(arc.read().is_err()); +} + +#[test] +fn arc_poison_ww() { + let arc = Arc::new(ShardedLock::new(1)); + assert!(!arc.is_poisoned()); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }) + .join(); + assert!(arc.write().is_err()); + assert!(arc.is_poisoned()); +} + +#[test] +fn arc_no_poison_rr() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!(); + }) + .join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 1); +} +#[test] +fn arc_no_poison_sl() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!() + }) + .join(); + let lock = arc.write().unwrap(); + assert_eq!(*lock, 1); +} + +#[test] +fn arc() { + let arc = Arc::new(ShardedLock::new(0)); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + + thread::spawn(move || { + let mut lock = arc2.write().unwrap(); + for _ in 0..10 { + let tmp = *lock; + *lock = -1; + thread::yield_now(); + *lock = tmp + 1; + } + tx.send(()).unwrap(); + }); + + // Readers try to catch the writer in the act + let mut children = Vec::new(); + for _ in 0..5 { + let arc3 = arc.clone(); + children.push(thread::spawn(move || { + let lock = arc3.read().unwrap(); + assert!(*lock >= 0); + })); + } + + // Wait for children to pass their asserts + for r in children { + assert!(r.join().is_ok()); + } + + // Wait for writer to finish + rx.recv().unwrap(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 10); +} + +#[test] +fn arc_access_in_unwind() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + let mut lock = self.i.write().unwrap(); + *lock += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 2); +} + +#[test] +fn unsized_type() { + let sl: &ShardedLock<[i32]> = &ShardedLock::new([1, 2, 3]); + { + let b = &mut *sl.write().unwrap(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*sl.read().unwrap(), comp); +} + +#[test] +fn try_write() { + let lock = ShardedLock::new(0isize); + let read_guard = lock.read().unwrap(); + + let write_result = lock.try_write(); + match write_result { + Err(TryLockError::WouldBlock) => (), + Ok(_) => panic!("try_write should not succeed while read_guard is in scope"), + Err(_) => panic!("unexpected error"), + } + + drop(read_guard); +} + +#[test] +fn test_into_inner() { + let m = ShardedLock::new(NonCopy(10)); + assert_eq!(m.into_inner().unwrap(), NonCopy(10)); +} + +#[test] +fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = ShardedLock::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner().unwrap(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); +} + +#[test] +fn test_into_inner_poison() { + let m = Arc::new(ShardedLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison ShardedLock"); + }) + .join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().into_inner() { + Err(e) => assert_eq!(e.into_inner(), NonCopy(10)), + Ok(x) => panic!("into_inner of poisoned ShardedLock is Ok: {:?}", x), + } +} + +#[test] +fn test_get_mut() { + let mut m = ShardedLock::new(NonCopy(10)); + *m.get_mut().unwrap() = NonCopy(20); + assert_eq!(m.into_inner().unwrap(), NonCopy(20)); +} + +#[test] +fn test_get_mut_poison() { + let m = Arc::new(ShardedLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison ShardedLock"); + }) + .join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().get_mut() { + Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)), + Ok(x) => panic!("get_mut of poisoned ShardedLock is Ok: {:?}", x), + } +} diff --git a/external/vendor/crossbeam-utils/tests/thread.rs b/external/vendor/crossbeam-utils/tests/thread.rs new file mode 100644 index 0000000000..0dfad90bd6 --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/thread.rs @@ -0,0 +1,215 @@ +use std::any::Any; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; + +use crossbeam_utils::thread; + +const THREADS: usize = 10; +const SMALL_STACK_SIZE: usize = 20; + +#[test] +fn join() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + let handle = scope.spawn(|_| { + counter.store(1, Ordering::Relaxed); + }); + assert!(handle.join().is_ok()); + + let panic_handle = scope.spawn(|_| { + panic!("\"My honey is running out!\", said Pooh."); + }); + assert!(panic_handle.join().is_err()); + }) + .unwrap(); + + // There should be sufficient synchronization. + assert_eq!(1, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }); + } + }) + .unwrap(); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter_builder() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + for i in 0..THREADS { + scope + .builder() + .name(format!("child-{}", i)) + .stack_size(SMALL_STACK_SIZE) + .spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }) + .unwrap(); + } + }) + .unwrap(); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter_panic() { + let counter = AtomicUsize::new(0); + let result = thread::scope(|scope| { + scope.spawn(|_| { + panic!("\"My honey is running out!\", said Pooh."); + }); + sleep(Duration::from_millis(100)); + + for _ in 0..THREADS { + scope.spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }); + } + }); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); + assert!(result.is_err()); +} + +#[test] +fn panic_twice() { + let result = thread::scope(|scope| { + scope.spawn(|_| { + sleep(Duration::from_millis(500)); + panic!("thread #1"); + }); + scope.spawn(|_| { + panic!("thread #2"); + }); + }); + + let err = result.unwrap_err(); + let vec = err + .downcast_ref::>>() + .unwrap(); + assert_eq!(2, vec.len()); + + let first = vec[0].downcast_ref::<&str>().unwrap(); + let second = vec[1].downcast_ref::<&str>().unwrap(); + assert_eq!("thread #1", *first); + assert_eq!("thread #2", *second) +} + +#[test] +fn panic_many() { + let result = thread::scope(|scope| { + scope.spawn(|_| panic!("deliberate panic #1")); + scope.spawn(|_| panic!("deliberate panic #2")); + scope.spawn(|_| panic!("deliberate panic #3")); + }); + + let err = result.unwrap_err(); + let vec = err + .downcast_ref::>>() + .unwrap(); + assert_eq!(3, vec.len()); + + for panic in vec.iter() { + let panic = panic.downcast_ref::<&str>().unwrap(); + assert!( + *panic == "deliberate panic #1" + || *panic == "deliberate panic #2" + || *panic == "deliberate panic #3" + ); + } +} + +#[test] +fn nesting() { + let var = "foo".to_string(); + + struct Wrapper<'a> { + var: &'a String, + } + + impl<'a> Wrapper<'a> { + fn recurse(&'a self, scope: &thread::Scope<'a>, depth: usize) { + assert_eq!(self.var, "foo"); + + if depth > 0 { + scope.spawn(move |scope| { + self.recurse(scope, depth - 1); + }); + } + } + } + + let wrapper = Wrapper { var: &var }; + + thread::scope(|scope| { + scope.spawn(|scope| { + scope.spawn(|scope| { + wrapper.recurse(scope, 5); + }); + }); + }) + .unwrap(); +} + +#[test] +fn join_nested() { + thread::scope(|scope| { + scope.spawn(|scope| { + let handle = scope.spawn(|_| 7); + + sleep(Duration::from_millis(200)); + handle.join().unwrap(); + }); + + sleep(Duration::from_millis(100)); + }) + .unwrap(); +} + +#[test] +fn scope_returns_ok() { + let result = thread::scope(|scope| scope.spawn(|_| 1234).join().unwrap()).unwrap(); + assert_eq!(result, 1234); +} + +#[cfg(unix)] +#[test] +fn as_pthread_t() { + use std::os::unix::thread::JoinHandleExt; + thread::scope(|scope| { + let handle = scope.spawn(|_scope| { + sleep(Duration::from_millis(100)); + 42 + }); + let _pthread_t = handle.as_pthread_t(); + handle.join().unwrap(); + }) + .unwrap(); +} + +#[cfg(windows)] +#[test] +fn as_raw_handle() { + use std::os::windows::io::AsRawHandle; + thread::scope(|scope| { + let handle = scope.spawn(|_scope| { + sleep(Duration::from_millis(100)); + 42 + }); + let _raw_handle = handle.as_raw_handle(); + handle.join().unwrap(); + }) + .unwrap(); +} diff --git a/external/vendor/crossbeam-utils/tests/wait_group.rs b/external/vendor/crossbeam-utils/tests/wait_group.rs new file mode 100644 index 0000000000..5b549b849c --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/wait_group.rs @@ -0,0 +1,67 @@ +use std::sync::mpsc; +use std::thread; +use std::time::Duration; + +use crossbeam_utils::sync::WaitGroup; + +const THREADS: usize = 10; + +#[test] +fn wait() { + let wg = WaitGroup::new(); + let (tx, rx) = mpsc::channel(); + + for _ in 0..THREADS { + let wg = wg.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + wg.wait(); + tx.send(()).unwrap(); + }); + } + + thread::sleep(Duration::from_millis(100)); + + // At this point, all spawned threads should be blocked, so we shouldn't get anything from the + // channel. + assert!(rx.try_recv().is_err()); + + wg.wait(); + + // Now, the wait group is cleared and we should receive messages. + for _ in 0..THREADS { + rx.recv().unwrap(); + } +} + +#[test] +fn wait_and_drop() { + let wg = WaitGroup::new(); + let wg2 = WaitGroup::new(); + let (tx, rx) = mpsc::channel(); + + for _ in 0..THREADS { + let wg = wg.clone(); + let wg2 = wg2.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + wg2.wait(); + tx.send(()).unwrap(); + drop(wg); + }); + } + + // At this point, no thread has gotten past `wg2.wait()`, so we shouldn't get anything from the + // channel. + assert!(rx.try_recv().is_err()); + drop(wg2); + + wg.wait(); + + // Now, the wait group is cleared and we should receive messages. + for _ in 0..THREADS { + rx.try_recv().unwrap(); + } +} diff --git a/external/vendor/event-listener-strategy/.cargo-checksum.json b/external/vendor/event-listener-strategy/.cargo-checksum.json new file mode 100644 index 0000000000..bb79fbf06d --- /dev/null +++ b/external/vendor/event-listener-strategy/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"7757162cf528299d660c378795d9f1d8430ac3ee03e67af5a0bb9b441aefda0a","CHANGELOG.md":"3fbfc89860f733a54c5ccec504e45baebdf6d89b289780bccebda2e28c09c616","Cargo.lock":"f9fa4dde21b5e9318922a9ff6ec01869cc259a468e788ab41723c1664197d754","Cargo.toml":"0a810743893bcd8bfd16e5abf152e4a48da8b6a270bd2aaca43f37a98cb77eb7","Cargo.toml.orig":"28ff199d93a889c2eb45dc54a9958b01df712286c289f385bddaddd0b7de847b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"df6555ffb5946bbaf931b71be241ec638b54a614ffd808185407b5071f348b28","src/lib.rs":"da209dd2d22c18580a9345dbb3b02a68b48bbf4019c54024b61e97640a461399","tests/easy_wrapper.rs":"b6b15a1cd6090ed5c59135c830aeca8a11046dbbbf428f2f7ef5cbe2fe28147d"},"package":"8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93"} \ No newline at end of file diff --git a/external/vendor/event-listener-strategy/.cargo_vcs_info.json b/external/vendor/event-listener-strategy/.cargo_vcs_info.json new file mode 100644 index 0000000000..2d4a7ee169 --- /dev/null +++ b/external/vendor/event-listener-strategy/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "f533c437e8a6561e9b625c3ee036a8ea7d19375c" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/event-listener-strategy/CHANGELOG.md b/external/vendor/event-listener-strategy/CHANGELOG.md new file mode 100644 index 0000000000..2abe1e456a --- /dev/null +++ b/external/vendor/event-listener-strategy/CHANGELOG.md @@ -0,0 +1,37 @@ +# Version 0.5.4 + +- Add `portable-atomic` feature that exposes `event-listener`'s underlying `portable-atomic` feature. (#27) + +# Version 0.5.3 + +- Add `loom` feature that exposes `event-listener`'s underlying `loom` feature. (#24) + +# Version 0.5.2 + +- Re-export the `event-listener` crate. (#20) + +# Version 0.5.1 + +- Fix the `repository` field in `Cargo.toml` to point to the correct repository. (#17) + +# Version 0.5.0 + +- **Breaking:** Bump `event-listener` to v5.0.0. (#12) +- Bump MSRV to 1.60. (#14) +- Make `NonBlocking` `Send` and `Sync`. (#15) + +# Version 0.4.0 + +- **Breaking:** Bump `event-listener` to v4.0.0. (#10) + +# Version 0.3.0 + +- **Breaking:** Remove an unneeded lifetime from the public API. (#6) + +# Version 0.2.0 + +- **Breaking:** Add support for WASM targets by disabling `wait()` on them. (#3) + +# Version 0.1.0 + +- Initial version diff --git a/external/vendor/event-listener-strategy/Cargo.lock b/external/vendor/event-listener-strategy/Cargo.lock new file mode 100644 index 0000000000..3235537c96 --- /dev/null +++ b/external/vendor/event-listener-strategy/Cargo.lock @@ -0,0 +1,704 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "cc" +version = "1.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", + "loom", + "portable-atomic", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "event-listener" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +dependencies = [ + "concurrent-queue", + "loom", + "parking", + "pin-project-lite", + "portable-atomic", + "portable-atomic-util", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +dependencies = [ + "event-listener", + "futures-lite", + "pin-project-lite", + "wasm-bindgen-test", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "minicov" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" +dependencies = [ + "cc", + "walkdir", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "once_cell" +version = "1.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2806eaa3524762875e21c3dcd057bc4b7bfa01ce4da8d46be1cd43649e1cc6b" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" +dependencies = [ + "loom", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "proc-macro2" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "smallvec" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" + +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +dependencies = [ + "js-sys", + "minicov", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core", + "windows-targets", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/external/vendor/event-listener-strategy/Cargo.toml b/external/vendor/event-listener-strategy/Cargo.toml new file mode 100644 index 0000000000..bfbd0b581d --- /dev/null +++ b/external/vendor/event-listener-strategy/Cargo.toml @@ -0,0 +1,78 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "event-listener-strategy" +version = "0.5.4" +authors = ["John Nunley "] +build = false +exclude = ["/.*"] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Block or poll on event_listener easily" +readme = "README.md" +keywords = [ + "condvar", + "envcount", + "wake", + "blocking", + "park", +] +categories = [ + "asynchronous", + "concurrency", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/event-listener-strategy" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[features] +default = ["std"] +loom = ["event-listener/loom"] +portable-atomic = ["event-listener/portable-atomic"] +std = ["event-listener/std"] + +[lib] +name = "event_listener_strategy" +path = "src/lib.rs" + +[[test]] +name = "easy_wrapper" +path = "tests/easy_wrapper.rs" + +[dependencies.event-listener] +version = "5.0.0" +default-features = false + +[dependencies.pin-project-lite] +version = "0.2.12" + +[dev-dependencies.futures-lite] +version = "2.0.0" + +[target.'cfg(target_family = "wasm")'.dev-dependencies.wasm-bindgen-test] +version = "0.3.37" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(wasm_bindgen_unstable_test_coverage)"] diff --git a/external/vendor/event-listener-strategy/Cargo.toml.orig b/external/vendor/event-listener-strategy/Cargo.toml.orig new file mode 100644 index 0000000000..d5172c3c9f --- /dev/null +++ b/external/vendor/event-listener-strategy/Cargo.toml.orig @@ -0,0 +1,36 @@ +[package] +name = "event-listener-strategy" +# Make sure to update CHANGELOG.md when the version is bumped here. +version = "0.5.4" +edition = "2021" +authors = ["John Nunley "] +rust-version = "1.60" +description = "Block or poll on event_listener easily" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/event-listener-strategy" +keywords = ["condvar", "envcount", "wake", "blocking", "park"] +categories = ["asynchronous", "concurrency"] +exclude = ["/.*"] + +[dependencies] +event-listener = { version = "5.0.0", default-features = false } +pin-project-lite = "0.2.12" + +[features] +default = ["std"] +loom = ["event-listener/loom"] +std = ["event-listener/std"] +portable-atomic = ["event-listener/portable-atomic"] + +[dev-dependencies] +futures-lite = "2.0.0" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(wasm_bindgen_unstable_test_coverage)'] } + +[target.'cfg(target_family = "wasm")'.dev-dependencies] +wasm-bindgen-test = "0.3.37" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/external/vendor/event-listener-strategy/LICENSE-APACHE b/external/vendor/event-listener-strategy/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/event-listener-strategy/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/event-listener-strategy/LICENSE-MIT b/external/vendor/event-listener-strategy/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/event-listener-strategy/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/event-listener-strategy/README.md b/external/vendor/event-listener-strategy/README.md new file mode 100644 index 0000000000..ab0ae05042 --- /dev/null +++ b/external/vendor/event-listener-strategy/README.md @@ -0,0 +1,84 @@ +# event-listener-strategy + +[![Build](https://github.com/smol-rs/event-listener-strategy/workflows/CI/badge.svg)]( +https://github.com/smol-rs/event-listener-strategy/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/event-listener-strategy) +[![Cargo](https://img.shields.io/crates/v/event-listener-strategy.svg)]( +https://crates.io/crates/event-listener-strategy) +[![Documentation](https://docs.rs/event-listener-strategy/badge.svg)]( +https://docs.rs/event-listener-strategy) + +A strategy for using the [`event-listener`] crate in both blocking and non-blocking contexts. + +One of the stand-out features of the [`event-listener`] crate is the ability to use it in both +asynchronous and synchronous contexts. However, sometimes using it like this causes a lot of +boilerplate to be duplicated. This crate aims to reduce that boilerplate by providing an `EventListenerFuture` trait that implements both blocking and non-blocking functionality. + +[`event-listener`]: https://docs.rs/event-listener + +# Examples + +``` +use event_listener::{Event, EventListener}; +use event_listener_strategy::{EventListenerFuture, FutureWrapper, Strategy}; + +use std::pin::Pin; +use std::task::Poll; +use std::thread; +use std::sync::Arc; + +// A future that waits three seconds for an event to be fired. +fn wait_three_seconds() -> WaitThreeSeconds { + let event = Event::new(); + let listener = event.listen(); + + thread::spawn(move || { + thread::sleep(std::time::Duration::from_secs(3)); + event.notify(1); + }); + + WaitThreeSeconds { listener } +} + +struct WaitThreeSeconds { + listener: Pin>, +} + +impl EventListenerFuture for WaitThreeSeconds { + type Output = (); + + fn poll_with_strategy<'a, S: Strategy<'a>>( + mut self: Pin<&'a mut Self>, + strategy: &mut S, + context: &mut S::Context, + ) -> Poll { + strategy.poll(self.listener.as_mut(), context) + } +} + +// Use the future in a blocking context. +let future = wait_three_seconds(); +future.wait(); + +// Use the future in a non-blocking context. +futures_lite::future::block_on(async { + let future = FutureWrapper::new(wait_three_seconds()); + future.await; +}); +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/event-listener-strategy/src/lib.rs b/external/vendor/event-listener-strategy/src/lib.rs new file mode 100644 index 0000000000..f680076193 --- /dev/null +++ b/external/vendor/event-listener-strategy/src/lib.rs @@ -0,0 +1,549 @@ +// SPDX-Licenser-Identifier: MIT OR Apache-2.0 +//! A strategy for using the [`event-listener`] crate in both blocking and non-blocking contexts. +//! +//! One of the stand-out features of the [`event-listener`] crate is the ability to use it in both +//! asynchronous and synchronous contexts. However, sometimes using it like this causes a lot of +//! boilerplate to be duplicated. This crate aims to reduce that boilerplate by providing an +//! [`EventListenerFuture`] trait that implements both blocking and non-blocking functionality. +//! +//! # Examples +//! +//! ``` +//! use event_listener_strategy::{ +//! event_listener::{Event, EventListener}, +//! EventListenerFuture, FutureWrapper, Strategy +//! }; +//! +//! use std::pin::Pin; +//! use std::task::Poll; +//! use std::thread; +//! use std::sync::Arc; +//! +//! // A future that waits three seconds for an event to be fired. +//! fn wait_three_seconds() -> WaitThreeSeconds { +//! let event = Event::new(); +//! let listener = event.listen(); +//! +//! thread::spawn(move || { +//! thread::sleep(std::time::Duration::from_secs(3)); +//! event.notify(1); +//! }); +//! +//! WaitThreeSeconds { listener: Some(listener) } +//! } +//! +//! struct WaitThreeSeconds { +//! listener: Option, +//! } +//! +//! impl EventListenerFuture for WaitThreeSeconds { +//! type Output = (); +//! +//! fn poll_with_strategy<'a, S: Strategy<'a>>( +//! mut self: Pin<&mut Self>, +//! strategy: &mut S, +//! context: &mut S::Context, +//! ) -> Poll { +//! strategy.poll(&mut self.listener, context) +//! } +//! } +//! +//! // Use the future in a blocking context. +//! let future = wait_three_seconds(); +//! future.wait(); +//! +//! // Use the future in a non-blocking context. +//! futures_lite::future::block_on(async { +//! let future = FutureWrapper::new(wait_three_seconds()); +//! future.await; +//! }); +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![forbid(future_incompatible, missing_docs)] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +use core::future::Future; +use core::marker::PhantomData; +use core::pin::Pin; +use core::task::{Context, Poll}; + +use event_listener::{EventListener, Listener}; + +#[doc(hidden)] +pub use pin_project_lite::pin_project; + +#[doc(no_inline)] +pub use event_listener; + +/// A wrapper around an [`EventListenerFuture`] that can be easily exported for use. +/// +/// This type implements [`Future`], has a `_new()` constructor, and a `wait()` method +/// that uses the [`Blocking`] strategy to poll the future until it is ready. +/// +/// # Examples +/// +/// ``` +/// mod my_future { +/// use event_listener_strategy::{easy_wrapper, EventListenerFuture, Strategy}; +/// use std::pin::Pin; +/// use std::task::Poll; +/// +/// struct MyFuture; +/// +/// impl EventListenerFuture for MyFuture { +/// type Output = (); +/// +/// fn poll_with_strategy<'a, S: Strategy<'a>>( +/// self: Pin<&mut Self>, +/// strategy: &mut S, +/// context: &mut S::Context, +/// ) -> Poll { +/// /* ... */ +/// # Poll::Ready(()) +/// } +/// } +/// +/// easy_wrapper! { +/// /// A future that does something. +/// pub struct MyFutureWrapper(MyFuture => ()); +/// /// Wait for it. +/// pub wait(); +/// } +/// +/// impl MyFutureWrapper { +/// /// Create a new instance of the future. +/// pub fn new() -> Self { +/// Self::_new(MyFuture) +/// } +/// } +/// } +/// +/// use my_future::MyFutureWrapper; +/// +/// // Use the future in a blocking context. +/// let future = MyFutureWrapper::new(); +/// future.wait(); +/// +/// // Use the future in a non-blocking context. +/// futures_lite::future::block_on(async { +/// let future = MyFutureWrapper::new(); +/// future.await; +/// }); +/// ``` +#[macro_export] +macro_rules! easy_wrapper { + ( + $(#[$meta:meta])* + $vis:vis struct $name:ident + + $(< + $( $lifetime:lifetime $(: $lifetime_bound:lifetime)? ),* $(,)? + $( $generics:ident + $(: $generics_bound:path)? + $(: ?$generics_unsized_bound:path)? + $(: $generics_lifetime_bound:lifetime)? + $(= $generics_default:ty)? + ),* $(,)? + >)? + + ($inner:ty => $output:ty) + + $(where + $( $where_clause_ty:ty + $(: $where_clause_bound:path)? + $(: ?$where_clause_unsized_bound:path)? + $(: $where_clause_lifetime_bound:lifetime)? + ),* $(,)? + )? + + ; + + $(#[$wait_meta:meta])* + $wait_vis: vis wait(); + ) => { + $crate::pin_project! { + $(#[$meta])* + $vis struct $name $(< + $( $lifetime $(: $lifetime_bound)? ),* + $( $generics + $(: $generics_bound)? + $(: ?$generics_unsized_bound)? + $(: $generics_lifetime_bound)? + $(= $generics_default)? + ),* + >)? $( + where + $( $where_clause_ty + $(: $where_clause_bound)? + $(: ?$where_clause_unsized_bound)? + $(: $where_clause_lifetime_bound)? + ),* + )? { + #[pin] + _inner: $crate::FutureWrapper<$inner> + } + } + + impl $(< + $( $lifetime $(: $lifetime_bound)? ,)* + $( $generics + $(: $generics_bound)? + $(: ?$generics_unsized_bound)? + $(: $generics_lifetime_bound)? + $(= $generics_default)? + ),* + >)? $name $(< + $( $lifetime ,)* + $( $generics ),* + >)? $( + where + $( $where_clause_ty + $(: $where_clause_bound)? + $(: ?$where_clause_unsized_bound)? + $(: $where_clause_lifetime_bound)? + ),* + )? { + #[inline] + fn _new(inner: $inner) -> Self { + Self { + _inner: $crate::FutureWrapper::new(inner) + } + } + + $(#[$wait_meta])* + #[inline] + $wait_vis fn wait(self) -> $output { + use $crate::EventListenerFuture; + self._inner.into_inner().wait() + } + + pub(crate) fn poll_with_strategy<'__strategy, __S: $crate::Strategy<'__strategy>>( + self: ::core::pin::Pin<&mut Self>, + strategy: &mut __S, + context: &mut __S::Context, + ) -> ::core::task::Poll<$output> { + self.project()._inner.get_pin_mut().poll_with_strategy(strategy, context) + } + } + + impl $(< + $( $lifetime $(: $lifetime_bound)? ,)* + $( $generics + $(: $generics_bound)? + $(: ?$generics_unsized_bound)? + $(: $generics_lifetime_bound)? + $(= $generics_default)? + ),* + >)? ::core::future::Future for $name $( + < + $( $lifetime ,)* + $( $generics ),* + > + )? $( + where + $( $where_clause_ty + $(: $where_clause_bound)? + $(: ?$where_clause_unsized_bound)? + $(: $where_clause_lifetime_bound)? + ),* + )? { + type Output = $output; + + #[inline] + fn poll( + self: ::core::pin::Pin<&mut Self>, + context: &mut ::core::task::Context<'_> + ) -> ::core::task::Poll { + self.project()._inner.poll(context) + } + } + }; +} + +/// A future that runs using the [`event-listener`] crate. +/// +/// This is similar to the [`Future`] trait from libstd, with one notable difference: it takes +/// a strategy that tells it whether to operate in a blocking or non-blocking context. The +/// `poll_with_strategy` method is the equivalent of the `poll` method in this regard; it uses +/// the [`Strategy`] trait to determine how to poll the future. +/// +/// From here, there are two additional things one can do with this trait: +/// +/// - The `wait` method, which uses the [`Blocking`] strategy to poll the future until it is +/// ready, blocking the current thread until it is. +/// - The [`FutureWrapper`] type, which implements [`Future`] and uses the [`NonBlocking`] +/// strategy to poll the future. +pub trait EventListenerFuture { + /// The type of value produced on completion. + type Output; + + /// Poll the future using the provided strategy. + /// + /// This function should use the `Strategy::poll` method to poll the future, and proceed + /// based on the result. + fn poll_with_strategy<'a, S: Strategy<'a>>( + self: Pin<&mut Self>, + strategy: &mut S, + context: &mut S::Context, + ) -> Poll; + + /// Wait for the future to complete, blocking the current thread. + /// + /// This function uses the [`Blocking`] strategy to poll the future until it is ready. + /// + /// The future should only return `Pending` if `Strategy::poll` returns error. Otherwise, + /// this function polls the future in a hot loop. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(all(feature = "std", not(target_family = "wasm"))))] + fn wait(mut self) -> Self::Output + where + Self: Sized, + { + // SAFETY: `self`/`this` is not moved out after this. + let mut this = unsafe { Pin::new_unchecked(&mut self) }; + + loop { + if let Poll::Ready(res) = this + .as_mut() + .poll_with_strategy(&mut Blocking::default(), &mut ()) + { + return res; + } + } + } +} + +pin_project_lite::pin_project! { + /// A wrapper around an [`EventListenerFuture`] that implements [`Future`]. + /// + /// [`Future`]: core::future::Future + #[derive(Debug, Clone)] + pub struct FutureWrapper { + #[pin] + inner: F, + } +} + +impl FutureWrapper { + /// Create a new `FutureWrapper` from the provided future. + #[inline] + pub fn new(inner: F) -> Self { + Self { inner } + } + + /// Consume the `FutureWrapper`, returning the inner future. + #[inline] + pub fn into_inner(self) -> F { + self.inner + } +} + +impl FutureWrapper { + /// Get a reference to the inner future. + #[inline] + pub fn get_ref(&self) -> &F { + &self.inner + } + + /// Get a mutable reference to the inner future. + #[inline] + pub fn get_mut(&mut self) -> &mut F { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner future. + #[inline] + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut F> { + self.project().inner + } + + /// Get a pinned reference to the inner future. + #[inline] + pub fn get_pin_ref(self: Pin<&Self>) -> Pin<&F> { + self.project_ref().inner + } +} + +impl From for FutureWrapper { + #[inline] + fn from(inner: F) -> Self { + Self { inner } + } +} + +impl Future for FutureWrapper { + type Output = F::Output; + + #[inline] + fn poll(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll { + self.project() + .inner + .poll_with_strategy(&mut NonBlocking::default(), context) + } +} + +/// A strategy for polling an [`EventListenerFuture`] or an [`EventListener`]. +/// +/// This trait is used by the [`EventListenerFuture::poll_with_strategy`] method to determine +/// how to poll the future. It can also be used standalone, by calling the [`Strategy::wait`] +/// method. +/// +/// [`EventListenerFuture::poll_with_strategy`]: EventListenerFuture::poll_with_strategy +/// [`EventListener`]: event_listener::EventListener +/// +/// # Examples +/// +/// ``` +/// use event_listener_strategy::{ +/// event_listener::{Event, EventListener}, +/// EventListenerFuture, Strategy, Blocking, NonBlocking +/// }; +/// use std::pin::Pin; +/// +/// async fn wait_on<'a, S: Strategy<'a>>(evl: EventListener, strategy: &mut S) { +/// strategy.wait(evl).await; +/// } +/// +/// # futures_lite::future::block_on(async { +/// // Block on the future. +/// let ev = Event::new(); +/// let listener = ev.listen(); +/// ev.notify(1); +/// +/// wait_on(listener, &mut Blocking::default()).await; +/// +/// // Poll the future. +/// let listener = ev.listen(); +/// ev.notify(1); +/// +/// wait_on(listener, &mut NonBlocking::default()).await; +/// # }); +/// ``` +pub trait Strategy<'a> { + /// The context needed to poll the future. + type Context: ?Sized; + + /// The future returned by the [`Strategy::wait`] method. + type Future: Future + 'a; + + /// Poll the event listener until it is ready. + fn poll + Unpin>( + &mut self, + event_listener: &mut Option, + context: &mut Self::Context, + ) -> Poll; + + /// Wait for the event listener to become ready. + fn wait(&mut self, evl: EventListener) -> Self::Future; +} + +/// A strategy that uses polling to efficiently wait for an event. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +pub struct NonBlocking<'a> { + /// The type `&'a mut &'a T` is invariant over `'a`, like `Context` is. + /// + /// We used to just use `Context` here, but then `Context` became `!Send` + /// and `!Sync`, making all of the futures that use this type `!Send` and + /// `!Sync` as well. So we just take the lifetime invariance and none of + /// the downsides. + _marker: PhantomData<&'a mut &'a ()>, +} + +impl<'a> Strategy<'_> for NonBlocking<'a> { + type Context = Context<'a>; + type Future = EventListener; + + #[inline] + fn wait(&mut self, evl: EventListener) -> Self::Future { + evl + } + + #[inline] + fn poll + Unpin>( + &mut self, + event_listener: &mut Option, + context: &mut Self::Context, + ) -> Poll { + let poll = Pin::new( + event_listener + .as_mut() + .expect("`event_listener` should never be `None`"), + ) + .poll(context); + if poll.is_ready() { + *event_listener = None; + } + poll + } +} + +/// A strategy that blocks the current thread until the event is signalled. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[cfg(all(feature = "std", not(target_family = "wasm")))] +pub struct Blocking { + _private: (), +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +impl Strategy<'_> for Blocking { + type Context = (); + type Future = Ready; + + #[inline] + fn wait(&mut self, evl: EventListener) -> Self::Future { + evl.wait(); + Ready { _private: () } + } + + #[inline] + fn poll + Unpin>( + &mut self, + event_listener: &mut Option, + _context: &mut Self::Context, + ) -> Poll { + let result = event_listener + .take() + .expect("`event_listener` should never be `None`") + .wait(); + Poll::Ready(result) + } +} + +/// A future that is always ready. +#[cfg(feature = "std")] +#[doc(hidden)] +#[derive(Debug, Clone)] +pub struct Ready { + _private: (), +} + +#[cfg(feature = "std")] +impl Future for Ready { + type Output = (); + + #[inline] + fn poll(self: Pin<&mut Self>, _context: &mut Context<'_>) -> Poll { + Poll::Ready(()) + } +} + +#[test] +fn send_and_sync() { + fn assert_send_and_sync() {} + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + { + assert_send_and_sync::(); + assert_send_and_sync::(); + } + + assert_send_and_sync::>(); + assert_send_and_sync::>(); +} diff --git a/external/vendor/event-listener-strategy/tests/easy_wrapper.rs b/external/vendor/event-listener-strategy/tests/easy_wrapper.rs new file mode 100644 index 0000000000..a62ee0231a --- /dev/null +++ b/external/vendor/event-listener-strategy/tests/easy_wrapper.rs @@ -0,0 +1,111 @@ +//! Testing of the `easy_wrapper!` macro. + +#![allow(clippy::multiple_bound_locations)] + +use event_listener_strategy::{easy_wrapper, EventListenerFuture, Strategy}; +use std::{marker::PhantomData, pin::Pin, task::Poll}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn easy_wrapper_generics() { + // Easy case. + struct MyStrategy; + + impl EventListenerFuture for MyStrategy { + type Output = (); + + fn poll_with_strategy<'a, S: Strategy<'a>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + Poll::Ready(()) + } + } + + easy_wrapper! { + struct MyEasyWrapper(MyStrategy => ()); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + MyEasyWrapper::_new(MyStrategy).wait(); + + // Medium case with generics. + struct MyStrategy2 { + _marker: PhantomData, + } + + impl EventListenerFuture for MyStrategy2 { + type Output = T; + + fn poll_with_strategy<'a, S: Strategy<'a>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + unreachable!() + } + } + + easy_wrapper! { + struct MyEasyWrapper2(MyStrategy2 => T); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } + + // Medium mode with lifetime. + struct MyStrategylt<'a> { + _marker: PhantomData<&'a ()>, + } + + impl<'a> EventListenerFuture for MyStrategylt<'a> { + type Output = &'a (); + + fn poll_with_strategy<'b, S: Strategy<'b>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + unreachable!() + } + } + + easy_wrapper! { + struct MyEasyWrapperlt<'a>(MyStrategylt<'a> => &'a ()); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } + + // Hard mode with generic bounds. + struct MyStrategy3<'a, T: ?Sized> + where + T: 'a, + { + _marker: PhantomData<&'a T>, + } + + impl<'a, T: ?Sized> EventListenerFuture for MyStrategy3<'a, T> + where + T: 'a, + { + type Output = &'a T; + + fn poll_with_strategy<'b, S: Strategy<'b>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + unreachable!() + } + } + + easy_wrapper! { + struct MyEasyWrapper3<'a, T: ?Sized>(MyStrategy3<'a, T> => &'a T) where T: 'a; + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } +} diff --git a/external/vendor/event-listener/.cargo-checksum.json b/external/vendor/event-listener/.cargo-checksum.json new file mode 100644 index 0000000000..94a44514f6 --- /dev/null +++ b/external/vendor/event-listener/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"198c7028f6dd675877499866319cb6bdcb7855e0fae970bdc5971542c428a805","CHANGELOG.md":"1c16454159ace4300e4c7ec655720b58cca02e210b182ae77b5d4972245ffc13","Cargo.lock":"c161de60c2d1a7e3f70921a2e42c25706e04005b56f30415418a7bc399baa025","Cargo.toml":"d43c210c87d3e94b514f1e662d0c3f4ca7a8f4d55a3eaaae5a1f69aabc2d6ad1","Cargo.toml.orig":"75d37ea1cd7304e8ff0181cc71a9d57fcbb853c4eed8aba9c5eac4cbcf64c03d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b914d686589b9a168e199e2b44c545fffbb29c686bac5104c1e3ff306f60009b","benches/bench.rs":"a534ba022682b1979d00c6a9b815bd6433a6c950027f17f4d68733f893f7ecff","examples/mutex.rs":"04a3b314a878751e10a4e9c6b8a003b11428daedf603aa05a0dd32fc3828db75","src/intrusive.rs":"2bee912ec71515f8027698c59de0081d89d98bbb29607eb3d1f35f8ec7c5208b","src/lib.rs":"59782e750aab37ec5394a0410d4c2e79429061c3be289deb22b9901a52aefd2e","src/notify.rs":"bb5b9d495941f5513621df5f4d0264e838d0d5e97f75398d9aaabb31a946c43b","src/slab.rs":"b6df60454896805fea4bd4e18c753e6d8278d1f861182296ef5f88d074feb50d","src/slab/node.rs":"7e60763ac1b06601706f59765ca2a02303c14e7a54030bd2ee9c1a7a828d937c","tests/loom.rs":"774b4a8935bd5aebc7b294e5260da5ca543871af3b32e297ea4e73520e2b515a","tests/notify.rs":"5782d6732d7abd7035f93ea9c9f877fc884269052df1845621cdbead37511b7d"},"package":"e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab"} \ No newline at end of file diff --git a/external/vendor/event-listener/.cargo_vcs_info.json b/external/vendor/event-listener/.cargo_vcs_info.json new file mode 100644 index 0000000000..e55e704ff5 --- /dev/null +++ b/external/vendor/event-listener/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "0c18ca2308f0dea9147b94141cc2796e85cb42ae" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/event-listener/CHANGELOG.md b/external/vendor/event-listener/CHANGELOG.md new file mode 100644 index 0000000000..33805f58e7 --- /dev/null +++ b/external/vendor/event-listener/CHANGELOG.md @@ -0,0 +1,161 @@ +# Version 5.4.1 + +- Fix a copy-paste error in `wait_timeout` docs (#152) + +# Version 5.4.0 + +- Add a `no_std` implementation based on the `critical-section` crate, enabled + via the feature of the same name. (#148) + +# Version 5.3.1 + +- Disable some optimizations that, in rare conditions, can cause race conditions + causing notifications to be dropped. (#139) +- Ensure the portable-atomic feature is set properly. (#134) +- Update `portable-atomic-util` to v0.2.0. (#132) +- Document the std feature. (#134) + +# Version 5.3.0 + +- Add a `loom` implementation. This feature is unstable and is not semver-supported. (#126) +- Make the panic message for polling the `EventListener` after it has completed more clear. (#125) + +# Version 5.2.0 + +- Make `StackSlot` `Sync`. (#121) + +# Version 5.1.0 + +- Make `StackSlot` `Send`. (#119) + +# Version 5.0.0 + +- **Breaking:** Rework the API to afford better usage. (#105) + - The heap-based API of the v2.x line is back. + - However, there is a stack-based API as an alternative. +- Add a way to get the total number of listeners. (#114) + +# Version 4.0.3 + +- Relax MSRV to 1.60. (#110) + +# Version 4.0.2 + +- Avoid spinning in `wait_deadline`. (#107) + +# Version 4.0.1 + +- Fix a use-after-move error after an `EventListener` is assigned to listen to + another `Event`. (#101) + +# Version 4.0.0 + +- **Breaking:** Fix a footgun in the `EventListener` type. `EventListener::new()` + now no longer takes an `&Event` as an argument, and `EventListener::listen()` + takes the `&Event` as an argument. Hopefully this should prevent `.await`ing + on a listener without making sure it's listening first. (#94) + +# Version 3.1.0 + +- Implement `UnwindSafe` and `RefUnwindSafe` for `EventListener`. This was unintentionally removed in version 3 (#96). + +# Version 3.0.1 + +- Emphasize that `listen()` must be called on `EventListener` in documentation. (#90) +- Write useful output in `fmt::Debug` implementations. (#86) + +# Version 3.0.0 + +- Use the `parking` crate instead of threading APIs (#27) +- Bump MSRV to 1.59 (#71) +- **Breaking:** Make this crate `no_std`-compatible on `default-features = false`. (#34) +- Create a new `event-listener-strategy` crate for abstracting over blocking/non-blocking operations. (#49) +- **Breaking:** Change the `EventListener` API to be `!Unpin`. (#51) +- Enable a feature for the `portable-atomic` crate. (#53) +- **Breaking:** Add a `Notification` trait which is used to enable tagged events. (#52) +- Add an `is_notified()` method to `Event`. (#48) +- **Breaking:** Make it so `notify()` returns the number of listeners notified. (#57) + +# Version 2.5.3 + +- Fix fence on x86 and miri. + +# Version 2.5.2 + +- Fix stacked borrows violation when `-Zmiri-tag-raw-pointers` is enabled. (#24) + +# Version 2.5.1 + +- Replace spinlock with a mutex. + +# Version 2.5.0 + +- Add `EventListener::discard()`. + +# Version 2.4.0 + +- `Event::new()` is now a const fn. + +# Version 2.3.3 + +- Fix a bug in `List::insert()` that was causing deadlocks. + +# Version 2.3.2 + +- Optimization: use a simple spinlock and cache an `Entry` for less allocation. + +# Version 2.3.1 + +- Optimization: don't initialize `Inner` when notifying `Event`. + +# Version 2.3.0 + +- Implement `UnwindSafe`/`RefUnwindSafe` for `Event`/`EventListener`. + +# Version 2.2.1 + +- Always keep the last waker in `EventListener::poll()`. + +# Version 2.2.0 + +- Add `EventListener::same_event()`. + +# Version 2.1.0 + +- Add `EventListener::listens_to()`. + +# Version 2.0.1 + +- Replace `usize::MAX` with `std::usize::MAX`. + +# Version 2.0.0 + +- Remove `Event::notify_one()` and `Event::notify_all()`. +- Add `Event::notify_relaxed()` and `Event::notify_additional_relaxed()`. +- Dropped notified `EventListener` now notifies one *or* one additional listener. + +# Version 1.2.0 + +- Add `Event::notify_additional()`. + +# Version 1.1.2 + +- Change a `Relaxed` load to `Acquire` load. + +# Version 1.1.1 + +- Fix a bug in `EventListener::wait_timeout()`. + +# Version 1.1.0 + +- Add `EventListener::notify()`. + +# Version 1.0.1 + +- Reduce the complexity of `notify_all()` from O(n) to amortized O(1). +- Fix a bug where entries were notified in wrong order. +- Add tests. + +# Version 1.0.0 + +- Initial version. diff --git a/external/vendor/event-listener/Cargo.lock b/external/vendor/event-listener/Cargo.lock new file mode 100644 index 0000000000..aae970ee87 --- /dev/null +++ b/external/vendor/event-listener/Cargo.lock @@ -0,0 +1,970 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.2.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.5.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", + "loom", + "portable-atomic", +] + +[[package]] +name = "criterion" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools", + "num-traits", + "oorandom", + "regex", + "serde", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "event-listener" +version = "5.4.1" +dependencies = [ + "concurrent-queue", + "criterion", + "critical-section", + "futures-lite", + "loom", + "parking", + "pin-project-lite", + "portable-atomic", + "portable-atomic-util", + "try-lock", + "waker-fn", + "wasm-bindgen-test", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "generator" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d18470a76cb7f8ff746cf1f7470914f900252ec36bbc40b569d74b1258446827" +dependencies = [ + "cc", + "cfg-if", + "libc", + "log", + "rustversion", + "windows", +] + +[[package]] +name = "half" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "minicov" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" +dependencies = [ + "cc", + "walkdir", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" +dependencies = [ + "loom", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.142" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "waker-fn" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +dependencies = [ + "js-sys", + "minicov", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections", + "windows-core", + "windows-future", + "windows-link", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core", +] + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core", + "windows-link", + "windows-threading", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-numerics" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" +dependencies = [ + "windows-core", + "windows-link", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/external/vendor/event-listener/Cargo.toml b/external/vendor/event-listener/Cargo.toml new file mode 100644 index 0000000000..8a23d6b36e --- /dev/null +++ b/external/vendor/event-listener/Cargo.toml @@ -0,0 +1,139 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "event-listener" +version = "5.4.1" +authors = [ + "Stjepan Glavina ", + "John Nunley ", +] +build = false +exclude = ["/.*"] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Notify async tasks or threads" +readme = "README.md" +keywords = [ + "condvar", + "eventcount", + "wake", + "blocking", + "park", +] +categories = [ + "asynchronous", + "concurrency", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/event-listener" + +[features] +default = ["std"] +loom = [ + "concurrent-queue/loom", + "parking?/loom", + "dep:loom", +] +portable-atomic = [ + "portable-atomic-util", + "portable_atomic_crate", + "concurrent-queue/portable-atomic", +] +std = [ + "concurrent-queue/std", + "parking", +] + +[lib] +name = "event_listener" +path = "src/lib.rs" +bench = false + +[[example]] +name = "mutex" +path = "examples/mutex.rs" + +[[test]] +name = "loom" +path = "tests/loom.rs" + +[[test]] +name = "notify" +path = "tests/notify.rs" + +[[bench]] +name = "bench" +path = "benches/bench.rs" +harness = false + +[dependencies.concurrent-queue] +version = "2.4.0" +default-features = false + +[dependencies.critical-section] +version = "1.2.0" +optional = true +default-features = false + +[dependencies.pin-project-lite] +version = "0.2.12" + +[dependencies.portable-atomic-util] +version = "0.2.0" +features = ["alloc"] +optional = true +default-features = false + +[dependencies.portable_atomic_crate] +version = "1.2.0" +optional = true +default-features = false +package = "portable-atomic" + +[dev-dependencies.criterion] +version = "0.7" +features = ["cargo_bench_support"] +default-features = false + +[dev-dependencies.critical-section] +version = "1.2.0" +features = ["std"] + +[dev-dependencies.futures-lite] +version = "2.0.0" + +[dev-dependencies.try-lock] +version = "0.2.5" + +[dev-dependencies.waker-fn] +version = "1" + +[target."cfg(loom)".dependencies.loom] +version = "0.7" +optional = true + +[target.'cfg(not(target_family = "wasm"))'.dependencies.parking] +version = "2.0.0" +optional = true + +[target.'cfg(target_family = "wasm")'.dev-dependencies.wasm-bindgen-test] +version = "0.3" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(loom)"] diff --git a/external/vendor/event-listener/Cargo.toml.orig b/external/vendor/event-listener/Cargo.toml.orig new file mode 100644 index 0000000000..0cb5cf3413 --- /dev/null +++ b/external/vendor/event-listener/Cargo.toml.orig @@ -0,0 +1,67 @@ +[package] +name = "event-listener" +# When publishing a new version: +# - Update CHANGELOG.md +# - Create "v5.x.y" git tag +version = "5.4.1" +authors = ["Stjepan Glavina ", "John Nunley "] +edition = "2021" +rust-version = "1.60" +description = "Notify async tasks or threads" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/event-listener" +keywords = ["condvar", "eventcount", "wake", "blocking", "park"] +categories = ["asynchronous", "concurrency"] +exclude = ["/.*"] + +[features] +default = ["std"] +std = ["concurrent-queue/std", "parking"] +portable-atomic = [ + "portable-atomic-util", + "portable_atomic_crate", + "concurrent-queue/portable-atomic", +] +loom = ["concurrent-queue/loom", "parking?/loom", "dep:loom"] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(loom)'] } + +[dependencies] +concurrent-queue = { version = "2.4.0", default-features = false } +critical-section = { version = "1.2.0", default-features = false, optional = true } +pin-project-lite = "0.2.12" +portable-atomic-util = { version = "0.2.0", default-features = false, optional = true, features = ["alloc"] } + +[target.'cfg(not(target_family = "wasm"))'.dependencies] +parking = { version = "2.0.0", optional = true } + +[target.'cfg(loom)'.dependencies] +loom = { version = "0.7", optional = true } + +[dependencies.portable_atomic_crate] +package = "portable-atomic" +version = "1.2.0" +default-features = false +optional = true + +[dev-dependencies] +critical-section = { version = "1.2.0", features = ["std"] } +futures-lite = "2.0.0" +try-lock = "0.2.5" +waker-fn = "1" + +[dev-dependencies.criterion] +version = "0.7" +default-features = false +features = ["cargo_bench_support"] + +[target.'cfg(target_family = "wasm")'.dev-dependencies] +wasm-bindgen-test = "0.3" + +[[bench]] +name = "bench" +harness = false + +[lib] +bench = false diff --git a/external/vendor/event-listener/LICENSE-APACHE b/external/vendor/event-listener/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/event-listener/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/event-listener/LICENSE-MIT b/external/vendor/event-listener/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/event-listener/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/event-listener/README.md b/external/vendor/event-listener/README.md new file mode 100644 index 0000000000..80aaef98dc --- /dev/null +++ b/external/vendor/event-listener/README.md @@ -0,0 +1,86 @@ +# event-listener + +[![Build](https://github.com/smol-rs/event-listener/workflows/CI/badge.svg)]( +https://github.com/smol-rs/event-listener/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/event-listener) +[![Cargo](https://img.shields.io/crates/v/event-listener.svg)]( +https://crates.io/crates/event-listener) +[![Documentation](https://docs.rs/event-listener/badge.svg)]( +https://docs.rs/event-listener) + +Notify async tasks or threads. + +This is a synchronization primitive similar to [eventcounts] invented by Dmitry Vyukov. + +You can use this crate to turn non-blocking data structures into async or blocking data +structures. See a [simple mutex] implementation that exposes an async and a blocking interface +for acquiring locks. + +[eventcounts]: https://www.1024cores.net/home/lock-free-algorithms/eventcounts +[simple mutex]: ./examples/mutex.rs + +## Examples + +Wait until another thread sets a boolean flag: + +```rust +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::thread; +use std::time::Duration; +use event_listener::Event; + +let flag = Arc::new(AtomicBool::new(false)); +let event = Arc::new(Event::new()); + +// Spawn a thread that will set the flag after 1 second. +thread::spawn({ + let flag = flag.clone(); + let event = event.clone(); + move || { + // Wait for a second. + thread::sleep(Duration::from_secs(1)); + + // Set the flag. + flag.store(true, Ordering::SeqCst); + + // Notify all listeners that the flag has been set. + event.notify(usize::MAX); + } +}); + +// Wait until the flag is set. +loop { + // Check the flag. + if flag.load(Ordering::SeqCst) { + break; + } + + // Start listening for events. + let listener = event.listen(); + + // Check the flag again after creating the listener. + if flag.load(Ordering::SeqCst) { + break; + } + + // Wait for a notification and continue the loop. + listener.wait(); +} +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/event-listener/benches/bench.rs b/external/vendor/event-listener/benches/bench.rs new file mode 100644 index 0000000000..d9e0db16d1 --- /dev/null +++ b/external/vendor/event-listener/benches/bench.rs @@ -0,0 +1,26 @@ +use std::iter; + +use criterion::{criterion_group, criterion_main, Criterion}; +use event_listener::{Event, Listener}; + +const COUNT: usize = 8000; + +fn bench_events(c: &mut Criterion) { + c.bench_function("notify_and_wait", |b| { + let ev = Event::new(); + let mut handles = Vec::with_capacity(COUNT); + + b.iter(|| { + handles.extend(iter::repeat_with(|| ev.listen()).take(COUNT)); + + ev.notify(COUNT); + + for handle in handles.drain(..) { + handle.wait(); + } + }); + }); +} + +criterion_group!(benches, bench_events); +criterion_main!(benches); diff --git a/external/vendor/event-listener/examples/mutex.rs b/external/vendor/event-listener/examples/mutex.rs new file mode 100644 index 0000000000..59b03f792c --- /dev/null +++ b/external/vendor/event-listener/examples/mutex.rs @@ -0,0 +1,183 @@ +//! A simple mutex implementation. +//! +//! This mutex exposes both blocking and async methods for acquiring a lock. + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +mod example { + #![allow(dead_code)] + + use std::ops::{Deref, DerefMut}; + use std::sync::{mpsc, Arc}; + use std::thread; + use std::time::{Duration, Instant}; + + use event_listener::{listener, Event, Listener}; + use try_lock::{Locked, TryLock}; + + /// A simple mutex. + struct Mutex { + /// Blocked lock operations. + lock_ops: Event, + + /// The inner non-blocking mutex. + data: TryLock, + } + + unsafe impl Send for Mutex {} + unsafe impl Sync for Mutex {} + + impl Mutex { + /// Creates a mutex. + fn new(t: T) -> Mutex { + Mutex { + lock_ops: Event::new(), + data: TryLock::new(t), + } + } + + /// Attempts to acquire a lock. + fn try_lock(&self) -> Option> { + self.data.try_lock().map(|l| MutexGuard { + lock_ops: &self.lock_ops, + locked: Some(l), + }) + } + + /// Blocks until a lock is acquired. + fn lock(&self) -> MutexGuard<'_, T> { + loop { + // Attempt grabbing a lock. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Set up an event listener. + listener!(self.lock_ops => listener); + + // Try again. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Wait for a notification. + listener.wait(); + } + } + + /// Blocks until a lock is acquired or the timeout is reached. + fn lock_timeout(&self, timeout: Duration) -> Option> { + let deadline = Instant::now() + timeout; + + loop { + // Attempt grabbing a lock. + if let Some(guard) = self.try_lock() { + return Some(guard); + } + + // Set up an event listener. + listener!(self.lock_ops => listener); + + // Try again. + if let Some(guard) = self.try_lock() { + return Some(guard); + } + + // Wait until a notification is received. + listener.wait_deadline(deadline)?; + } + } + + /// Acquires a lock asynchronously. + async fn lock_async(&self) -> MutexGuard<'_, T> { + loop { + // Attempt grabbing a lock. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Set up an event listener. + listener!(self.lock_ops => listener); + + // Try again. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Wait until a notification is received. + listener.await; + } + } + } + + /// A guard holding a lock. + struct MutexGuard<'a, T> { + lock_ops: &'a Event, + locked: Option>, + } + + impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + self.locked.as_deref().unwrap() + } + } + + impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + self.locked.as_deref_mut().unwrap() + } + } + + impl Drop for MutexGuard<'_, T> { + fn drop(&mut self) { + self.locked = None; + self.lock_ops.notify(1); + } + } + + pub(super) fn entry() { + const N: usize = 10; + + // A shared counter. + let counter = Arc::new(Mutex::new(0)); + + // A channel that signals when all threads are done. + let (tx, rx) = mpsc::channel(); + + // Spawn a bunch of threads incrementing the counter. + for _ in 0..N { + let counter = counter.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + let mut counter = counter.lock(); + *counter += 1; + + // If this is the last increment, signal that we're done. + if *counter == N { + tx.send(()).unwrap(); + } + }); + } + + // Wait until the last thread increments the counter. + rx.recv().unwrap(); + + // The counter must equal the number of threads. + assert_eq!(*counter.lock(), N); + + println!("Done!"); + } +} + +#[cfg(any(target_family = "wasm", not(feature = "std")))] +mod example { + pub(super) fn entry() { + println!("This example is not supported on wasm yet."); + } +} + +fn main() { + example::entry(); +} diff --git a/external/vendor/event-listener/src/intrusive.rs b/external/vendor/event-listener/src/intrusive.rs new file mode 100644 index 0000000000..71502370f7 --- /dev/null +++ b/external/vendor/event-listener/src/intrusive.rs @@ -0,0 +1,455 @@ +//! Intrusive linked list-based implementation of `event-listener`. +//! +//! This implementation crates an intrusive linked list of listeners. This list +//! is secured using either a libstd mutex or a critical section. + +use crate::notify::{GenericNotify, Internal, Notification}; +use crate::sync::atomic::Ordering; +use crate::sync::cell::{Cell, UnsafeCell}; +use crate::{RegisterResult, State, TaskRef}; + +#[cfg(feature = "critical-section")] +use core::cell::RefCell; +#[cfg(all(feature = "std", not(feature = "critical-section")))] +use core::ops::{Deref, DerefMut}; + +use core::marker::PhantomPinned; +use core::mem; +use core::pin::Pin; +use core::ptr::NonNull; + +pub(super) struct List( + /// libstd-based implementation uses a normal Muetx to secure the data. + #[cfg(all(feature = "std", not(feature = "critical-section")))] + crate::sync::Mutex>, + /// Critical-section-based implementation uses a CS cell that wraps a RefCell. + #[cfg(feature = "critical-section")] + critical_section::Mutex>>, +); + +struct Inner { + /// The head of the linked list. + head: Option>>, + + /// The tail of the linked list. + tail: Option>>, + + /// The first unnotified listener. + next: Option>>, + + /// Total number of listeners. + len: usize, + + /// The number of notified listeners. + notified: usize, +} + +impl List { + /// Create a new, empty event listener list. + pub(super) fn new() -> Self { + let inner = Inner { + head: None, + tail: None, + next: None, + len: 0, + notified: 0, + }; + + #[cfg(feature = "critical-section")] + { + Self(critical_section::Mutex::new(RefCell::new(inner))) + } + + #[cfg(not(feature = "critical-section"))] + Self(crate::sync::Mutex::new(inner)) + } + + /// Get the total number of listeners without blocking. + #[cfg(all(feature = "std", not(feature = "critical-section")))] + pub(crate) fn try_total_listeners(&self) -> Option { + self.0.try_lock().ok().map(|list| list.len) + } + + /// Get the total number of listeners without blocking. + #[cfg(feature = "critical-section")] + pub(crate) fn try_total_listeners(&self) -> Option { + Some(self.total_listeners()) + } + + /// Get the total number of listeners with blocking. + #[cfg(all(feature = "std", not(feature = "critical-section")))] + pub(crate) fn total_listeners(&self) -> usize { + self.0.lock().unwrap_or_else(|e| e.into_inner()).len + } + + /// Get the total number of listeners with blocking. + #[cfg(feature = "critical-section")] + #[allow(unused)] + pub(crate) fn total_listeners(&self) -> usize { + critical_section::with(|cs| self.0.borrow(cs).borrow().len) + } +} + +impl crate::Inner { + #[cfg(all(feature = "std", not(feature = "critical-section")))] + fn with_inner(&self, f: impl FnOnce(&mut Inner) -> R) -> R { + struct ListLock<'a, 'b, T> { + lock: crate::sync::MutexGuard<'a, Inner>, + inner: &'b crate::Inner, + } + + impl Deref for ListLock<'_, '_, T> { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.lock + } + } + + impl DerefMut for ListLock<'_, '_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.lock + } + } + + impl Drop for ListLock<'_, '_, T> { + fn drop(&mut self) { + update_notified(&self.inner.notified, &self.lock); + } + } + + let mut list = ListLock { + inner: self, + lock: self.list.0.lock().unwrap_or_else(|e| e.into_inner()), + }; + f(&mut list) + } + + #[cfg(feature = "critical-section")] + fn with_inner(&self, f: impl FnOnce(&mut Inner) -> R) -> R { + struct ListWrapper<'a, T> { + inner: &'a crate::Inner, + list: &'a mut Inner, + } + + impl Drop for ListWrapper<'_, T> { + fn drop(&mut self) { + update_notified(&self.inner.notified, self.list); + } + } + + critical_section::with(move |cs| { + let mut list = self.list.0.borrow_ref_mut(cs); + let wrapper = ListWrapper { + inner: self, + list: &mut *list, + }; + + f(wrapper.list) + }) + } + + /// Add a new listener to the list. + pub(crate) fn insert(&self, mut listener: Pin<&mut Option>>) { + self.with_inner(|inner| { + listener.as_mut().set(Some(Listener { + link: UnsafeCell::new(Link { + state: Cell::new(State::Created), + prev: Cell::new(inner.tail), + next: Cell::new(None), + }), + _pin: PhantomPinned, + })); + let listener = listener.as_pin_mut().unwrap(); + + { + let entry_guard = listener.link.get(); + // SAFETY: We are locked, so we can access the inner `link`. + let entry = unsafe { entry_guard.deref() }; + + // Replace the tail with the new entry. + match inner.tail.replace(entry.into()) { + None => inner.head = Some(entry.into()), + Some(t) => unsafe { t.as_ref().next.set(Some(entry.into())) }, + }; + } + + // If there are no unnotified entries, this is the first one. + if inner.next.is_none() { + inner.next = inner.tail; + } + + // Bump the entry count. + inner.len += 1; + }); + } + + /// Remove a listener from the list. + pub(crate) fn remove( + &self, + listener: Pin<&mut Option>>, + propagate: bool, + ) -> Option> { + self.with_inner(|inner| inner.remove(listener, propagate)) + } + + /// Notifies a number of entries. + #[cold] + pub(crate) fn notify(&self, notify: impl Notification) -> usize { + self.with_inner(|inner| inner.notify(notify)) + } + + /// Register a task to be notified when the event is triggered. + /// + /// Returns `true` if the listener was already notified, and `false` otherwise. If the listener + /// isn't inserted, returns `None`. + pub(crate) fn register( + &self, + mut listener: Pin<&mut Option>>, + task: TaskRef<'_>, + ) -> RegisterResult { + self.with_inner(|inner| { + let entry_guard = match listener.as_mut().as_pin_mut() { + Some(listener) => listener.link.get(), + None => return RegisterResult::NeverInserted, + }; + // SAFETY: We are locked, so we can access the inner `link`. + let entry = unsafe { entry_guard.deref() }; + + // Take out the state and check it. + match entry.state.replace(State::NotifiedTaken) { + State::Notified { tag, .. } => { + // We have been notified, remove the listener. + inner.remove(listener, false); + RegisterResult::Notified(tag) + } + + State::Task(other_task) => { + // Only replace the task if it's different. + entry.state.set(State::Task({ + if !task.will_wake(other_task.as_task_ref()) { + task.into_task() + } else { + other_task + } + })); + + RegisterResult::Registered + } + + _ => { + // We have not been notified, register the task. + entry.state.set(State::Task(task.into_task())); + RegisterResult::Registered + } + } + }) + } +} + +impl Inner { + fn remove( + &mut self, + mut listener: Pin<&mut Option>>, + propagate: bool, + ) -> Option> { + let entry_guard = listener.as_mut().as_pin_mut()?.link.get(); + let entry = unsafe { entry_guard.deref() }; + + let prev = entry.prev.get(); + let next = entry.next.get(); + + // Unlink from the previous entry. + match prev { + None => self.head = next, + Some(p) => unsafe { + p.as_ref().next.set(next); + }, + } + + // Unlink from the next entry. + match next { + None => self.tail = prev, + Some(n) => unsafe { + n.as_ref().prev.set(prev); + }, + } + + // If this was the first unnotified entry, update the next pointer. + if self.next == Some(entry.into()) { + self.next = next; + } + + // The entry is now fully unlinked, so we can now take it out safely. + let entry = unsafe { + listener + .get_unchecked_mut() + .take() + .unwrap() + .link + .into_inner() + }; + + // This State::Created is immediately dropped and exists as a workaround for the absence of + // loom::cell::Cell::into_inner. The intent is `let mut state = entry.state.into_inner();` + // + // refs: https://github.com/tokio-rs/loom/pull/341 + let mut state = entry.state.replace(State::Created); + + // Update the notified count. + if state.is_notified() { + self.notified -= 1; + + if propagate { + let state = mem::replace(&mut state, State::NotifiedTaken); + if let State::Notified { additional, tag } = state { + let tags = { + let mut tag = Some(tag); + move || tag.take().expect("tag already taken") + }; + self.notify(GenericNotify::new(1, additional, tags)); + } + } + } + self.len -= 1; + + Some(state) + } + + #[cold] + fn notify(&mut self, mut notify: impl Notification) -> usize { + let mut n = notify.count(Internal::new()); + let is_additional = notify.is_additional(Internal::new()); + + if !is_additional { + if n < self.notified { + return 0; + } + n -= self.notified; + } + + let original_count = n; + while n > 0 { + n -= 1; + + // Notify the next entry. + match self.next { + None => return original_count - n - 1, + + Some(e) => { + // Get the entry and move the pointer forwards. + let entry = unsafe { e.as_ref() }; + self.next = entry.next.get(); + + // Set the state to `Notified` and notify. + let tag = notify.next_tag(Internal::new()); + if let State::Task(task) = entry.state.replace(State::Notified { + additional: is_additional, + tag, + }) { + task.wake(); + } + + // Bump the notified count. + self.notified += 1; + } + } + } + + original_count - n + } +} + +fn update_notified(slot: &crate::sync::atomic::AtomicUsize, list: &Inner) { + // Update the notified count. + let notified = if list.notified < list.len { + list.notified + } else { + usize::MAX + }; + + slot.store(notified, Ordering::Release); +} + +pub(crate) struct Listener { + /// The inner link in the linked list. + /// + /// # Safety + /// + /// This can only be accessed while the central mutex is locked. + link: UnsafeCell>, + + /// This listener cannot be moved after being pinned. + _pin: PhantomPinned, +} + +struct Link { + /// The current state of the listener. + state: Cell>, + + /// The previous link in the linked list. + prev: Cell>>>, + + /// The next link in the linked list. + next: Cell>>>, +} + +#[cfg(test)] +mod tests { + use super::*; + use futures_lite::pin; + + #[cfg(target_family = "wasm")] + use wasm_bindgen_test::wasm_bindgen_test as test; + + macro_rules! make_listeners { + ($($id:ident),*) => { + $( + let $id = Option::>::None; + pin!($id); + )* + }; + } + + #[test] + fn insert() { + let inner = crate::Inner::new(); + make_listeners!(listen1, listen2, listen3); + + // Register the listeners. + inner.insert(listen1.as_mut()); + inner.insert(listen2.as_mut()); + inner.insert(listen3.as_mut()); + + assert_eq!(inner.list.try_total_listeners(), Some(3)); + + // Remove one. + assert_eq!(inner.remove(listen2, false), Some(State::Created)); + assert_eq!(inner.list.try_total_listeners(), Some(2)); + + // Remove another. + assert_eq!(inner.remove(listen1, false), Some(State::Created)); + assert_eq!(inner.list.try_total_listeners(), Some(1)); + } + + #[test] + fn drop_non_notified() { + let inner = crate::Inner::new(); + make_listeners!(listen1, listen2, listen3); + + // Register the listeners. + inner.insert(listen1.as_mut()); + inner.insert(listen2.as_mut()); + inner.insert(listen3.as_mut()); + + // Notify one. + inner.notify(GenericNotify::new(1, false, || ())); + + // Remove one. + inner.remove(listen3, true); + + // Remove the rest. + inner.remove(listen1, true); + inner.remove(listen2, true); + } +} diff --git a/external/vendor/event-listener/src/lib.rs b/external/vendor/event-listener/src/lib.rs new file mode 100644 index 0000000000..cb6cdc0902 --- /dev/null +++ b/external/vendor/event-listener/src/lib.rs @@ -0,0 +1,1559 @@ +//! Notify async tasks or threads. +//! +//! This is a synchronization primitive similar to [eventcounts] invented by Dmitry Vyukov. +//! +//! You can use this crate to turn non-blocking data structures into async or blocking data +//! structures. See a [simple mutex] implementation that exposes an async and a blocking interface +//! for acquiring locks. +//! +//! [eventcounts]: https://www.1024cores.net/home/lock-free-algorithms/eventcounts +//! [simple mutex]: https://github.com/smol-rs/event-listener/blob/master/examples/mutex.rs +//! +//! # Examples +//! +//! Wait until another thread sets a boolean flag: +//! +//! ``` +//! # #[cfg(not(target_family = "wasm"))] { // Listener::wait is unavailable on WASM +//! use std::sync::atomic::{AtomicBool, Ordering}; +//! use std::sync::Arc; +//! use std::thread; +//! use std::time::Duration; +//! use std::usize; +//! use event_listener::{Event, Listener}; +//! +//! let flag = Arc::new(AtomicBool::new(false)); +//! let event = Arc::new(Event::new()); +//! +//! // Spawn a thread that will set the flag after 1 second. +//! thread::spawn({ +//! let flag = flag.clone(); +//! let event = event.clone(); +//! move || { +//! // Wait for a second. +//! thread::sleep(Duration::from_secs(1)); +//! +//! // Set the flag. +//! flag.store(true, Ordering::SeqCst); +//! +//! // Notify all listeners that the flag has been set. +//! event.notify(usize::MAX); +//! } +//! }); +//! +//! // Wait until the flag is set. +//! loop { +//! // Check the flag. +//! if flag.load(Ordering::SeqCst) { +//! break; +//! } +//! +//! // Start listening for events. +//! let mut listener = event.listen(); +//! +//! // Check the flag again after creating the listener. +//! if flag.load(Ordering::SeqCst) { +//! break; +//! } +//! +//! // Wait for a notification and continue the loop. +//! listener.wait(); +//! } +//! # } +//! ``` +//! +//! # Features +//! +//! - The `std` feature (enabled by default) enables the use of the Rust standard library. Disable it for `no_std` +//! support. +//! +//! - The `critical-section` feature enables usage of the [`critical-section`] crate to enable a +//! more efficient implementation of `event-listener` for `no_std` platforms. +//! +//! - The `portable-atomic` feature enables the use of the [`portable-atomic`] crate to provide +//! atomic operations on platforms that don't support them. +//! +//! [`critical-section`]: https://crates.io/crates/critical-section +//! [`portable-atomic`]: https://crates.io/crates/portable-atomic + +#![cfg_attr(not(feature = "std"), no_std)] +#![allow(clippy::multiple_bound_locations)] // This is a WONTFIX issue with pin-project-lite +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std as alloc; + +#[cfg_attr( + any(feature = "std", feature = "critical-section"), + path = "intrusive.rs" +)] +#[cfg_attr( + not(any(feature = "std", feature = "critical-section")), + path = "slab.rs" +)] +mod sys; + +mod notify; + +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; + +use core::borrow::Borrow; +use core::fmt; +use core::future::Future; +use core::mem::ManuallyDrop; +use core::pin::Pin; +use core::ptr; +use core::task::{Context, Poll, Waker}; + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +use { + parking::{Parker, Unparker}, + std::time::{Duration, Instant}, +}; + +use sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use sync::Arc; + +#[cfg(not(loom))] +use sync::WithMut; + +use notify::NotificationPrivate; +pub use notify::{IntoNotification, Notification}; + +/// Inner state of [`Event`]. +struct Inner { + /// The number of notified entries, or `usize::MAX` if all of them have been notified. + /// + /// If there are no entries, this value is set to `usize::MAX`. + notified: AtomicUsize, + + /// Inner queue of event listeners. + /// + /// On `std` platforms, this is an intrusive linked list. On `no_std` platforms, this is a + /// more traditional `Vec` of listeners, with an atomic queue used as a backup for high + /// contention. + list: sys::List, +} + +impl Inner { + fn new() -> Self { + Self { + notified: AtomicUsize::new(usize::MAX), + list: sys::List::new(), + } + } +} + +/// A synchronization primitive for notifying async tasks and threads. +/// +/// Listeners can be registered using [`Event::listen()`]. There are two ways to notify listeners: +/// +/// 1. [`Event::notify()`] notifies a number of listeners. +/// 2. [`Event::notify_additional()`] notifies a number of previously unnotified listeners. +/// +/// If there are no active listeners at the time a notification is sent, it simply gets lost. +/// +/// There are two ways for a listener to wait for a notification: +/// +/// 1. In an asynchronous manner using `.await`. +/// 2. In a blocking manner by calling [`EventListener::wait()`] on it. +/// +/// If a notified listener is dropped without receiving a notification, dropping will notify +/// another active listener. Whether one *additional* listener will be notified depends on what +/// kind of notification was delivered. +/// +/// Listeners are registered and notified in the first-in first-out fashion, ensuring fairness. +pub struct Event { + /// A pointer to heap-allocated inner state. + /// + /// This pointer is initially null and gets lazily initialized on first use. Semantically, it + /// is an `Arc` so it's important to keep in mind that it contributes to the [`Arc`]'s + /// reference count. + inner: AtomicPtr>, +} + +unsafe impl Send for Event {} +unsafe impl Sync for Event {} + +impl core::panic::UnwindSafe for Event {} +impl core::panic::RefUnwindSafe for Event {} + +impl fmt::Debug for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_inner() { + Some(inner) => { + let notified_count = inner.notified.load(Ordering::Relaxed); + let total_count = match inner.list.try_total_listeners() { + Some(total_count) => total_count, + None => { + return f + .debug_tuple("Event") + .field(&format_args!("")) + .finish() + } + }; + + f.debug_struct("Event") + .field("listeners_notified", ¬ified_count) + .field("listeners_total", &total_count) + .finish() + } + None => f + .debug_tuple("Event") + .field(&format_args!("")) + .finish(), + } + } +} + +impl Default for Event { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl Event { + /// Creates a new `Event` with a tag type. + /// + /// Tagging cannot be implemented efficiently on `no_std`, so this is only available when the + /// `std` feature is enabled. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::::with_tag(); + /// ``` + #[cfg(all(feature = "std", not(loom)))] + #[inline] + pub const fn with_tag() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + #[cfg(all(feature = "std", loom))] + #[inline] + pub fn with_tag() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + + /// Tell whether any listeners are currently notified. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let listener = event.listen(); + /// assert!(!event.is_notified()); + /// + /// event.notify(1); + /// assert!(event.is_notified()); + /// ``` + #[inline] + pub fn is_notified(&self) -> bool { + self.try_inner() + .map_or(false, |inner| inner.notified.load(Ordering::Acquire) > 0) + } + + /// Returns a guard listening for a notification. + /// + /// This method emits a `SeqCst` fence after registering a listener. For now, this method + /// is an alias for calling [`EventListener::new()`], pinning it to the heap, and then + /// inserting it into a list. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// let listener = event.listen(); + /// ``` + /// + /// # Caveats + /// + /// The above example is equivalent to this code: + /// + /// ```no_compile + /// use event_listener::{Event, EventListener}; + /// + /// let event = Event::new(); + /// let mut listener = Box::pin(EventListener::new()); + /// listener.listen(&event); + /// ``` + /// + /// It creates a new listener, pins it to the heap, and inserts it into the linked list + /// of listeners. While this type of usage is simple, it may be desired to eliminate this + /// heap allocation. In this case, consider using the [`EventListener::new`] constructor + /// directly, which allows for greater control over where the [`EventListener`] is + /// allocated. However, users of this `new` method must be careful to ensure that the + /// [`EventListener`] is `listen`ing before waiting on it; panics may occur otherwise. + #[cold] + pub fn listen(&self) -> EventListener { + let inner = ManuallyDrop::new(unsafe { Arc::from_raw(self.inner()) }); + + // Allocate the listener on the heap and insert it. + let mut listener = Box::pin(InnerListener { + event: Arc::clone(&inner), + listener: None, + }); + listener.as_mut().listen(); + + // Return the listener. + EventListener { listener } + } + + /// Notifies a number of active listeners. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// The [`Notification`] trait is used to define what kind of notification is delivered. + /// The default implementation (implemented on `usize`) is a notification that only notifies + /// *at least* the specified number of listeners. + /// + /// In certain cases, this function emits a `SeqCst` fence before notifying listeners. + /// + /// This function returns the number of [`EventListener`]s that were notified by this call. + /// + /// # Caveats + /// + /// If the `std` feature is disabled, the notification will be delayed under high contention, + /// such as when another thread is taking a while to `notify` the event. In this circumstance, + /// this function will return `0` instead of the number of listeners actually notified. Therefore + /// if the `std` feature is disabled the return value of this function should not be relied upon + /// for soundness and should be used only as a hint. + /// + /// If the `std` feature is enabled, no spurious returns are possible, since the `std` + /// implementation uses system locking primitives to ensure there is no unavoidable + /// contention. + /// + /// # Examples + /// + /// Use the default notification strategy: + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(2); + /// ``` + /// + /// Notify without emitting a `SeqCst` fence. This uses the [`relaxed`] notification strategy. + /// This is equivalent to calling [`Event::notify_relaxed()`]. + /// + /// [`relaxed`]: IntoNotification::relaxed + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1.relaxed()); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(2.relaxed()); + /// ``` + /// + /// Notify additional listeners. In contrast to [`Event::notify()`], this method will notify `n` + /// *additional* listeners that were previously unnotified. This uses the [`additional`] + /// notification strategy. This is equivalent to calling [`Event::notify_additional()`]. + /// + /// [`additional`]: IntoNotification::additional + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1.additional()); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(1.additional()); + /// event.notify(1.additional()); + /// ``` + /// + /// Notifies with the [`additional`] and [`relaxed`] strategies at the same time. This is + /// equivalent to calling [`Event::notify_additional_relaxed()`]. + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1.additional().relaxed()); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(1.additional().relaxed()); + /// event.notify(1.additional().relaxed()); + /// ``` + #[inline] + pub fn notify(&self, notify: impl IntoNotification) -> usize { + let notify = notify.into_notification(); + + // Make sure the notification comes after whatever triggered it. + notify.fence(notify::Internal::new()); + + let inner = unsafe { &*self.inner() }; + inner.notify(notify) + } + + /// Return a reference to the inner state if it has been initialized. + #[inline] + fn try_inner(&self) -> Option<&Inner> { + let inner = self.inner.load(Ordering::Acquire); + unsafe { inner.as_ref() } + } + + /// Returns a raw, initialized pointer to the inner state. + /// + /// This returns a raw pointer instead of reference because `from_raw` + /// requires raw/mut provenance: . + fn inner(&self) -> *const Inner { + let mut inner = self.inner.load(Ordering::Acquire); + + // If this is the first use, initialize the state. + if inner.is_null() { + // Allocate the state on the heap. + let new = Arc::new(Inner::::new()); + + // Convert the state to a raw pointer. + let new = Arc::into_raw(new) as *mut Inner; + + // Replace the null pointer with the new state pointer. + inner = self + .inner + .compare_exchange(inner, new, Ordering::AcqRel, Ordering::Acquire) + .unwrap_or_else(|x| x); + + // Check if the old pointer value was indeed null. + if inner.is_null() { + // If yes, then use the new state pointer. + inner = new; + } else { + // If not, that means a concurrent operation has initialized the state. + // In that case, use the old pointer and deallocate the new one. + unsafe { + drop(Arc::from_raw(new)); + } + } + } + + inner + } + + /// Get the number of listeners currently listening to this [`Event`]. + /// + /// This call returns the number of [`EventListener`]s that are currently listening to + /// this event. It does this by acquiring the internal event lock and reading the listener + /// count. Therefore it is only available for `std`-enabled platforms. + /// + /// # Caveats + /// + /// This function returns just a snapshot of the number of listeners at this point in time. + /// Due to the nature of multi-threaded CPUs, it is possible that this number will be + /// inaccurate by the time that this function returns. + /// + /// It is possible for the actual number to change at any point. Therefore, the number should + /// only ever be used as a hint. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// + /// assert_eq!(event.total_listeners(), 0); + /// + /// let listener1 = event.listen(); + /// assert_eq!(event.total_listeners(), 1); + /// + /// let listener2 = event.listen(); + /// assert_eq!(event.total_listeners(), 2); + /// + /// drop(listener1); + /// drop(listener2); + /// assert_eq!(event.total_listeners(), 0); + /// ``` + #[cfg(feature = "std")] + #[inline] + pub fn total_listeners(&self) -> usize { + if let Some(inner) = self.try_inner() { + inner.list.total_listeners() + } else { + 0 + } + } +} + +impl Event<()> { + /// Creates a new [`Event`]. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// ``` + #[inline] + #[cfg(not(loom))] + pub const fn new() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + + #[inline] + #[cfg(loom)] + pub fn new() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + + /// Notifies a number of active listeners without emitting a `SeqCst` fence. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// In contrast to [`Event::notify_additional()`], this method only makes sure *at least* `n` + /// listeners among the active ones are notified. + /// + /// Unlike [`Event::notify()`], this method does not emit a `SeqCst` fence. + /// + /// This method only works for untagged events. In other cases, it is recommended to instead + /// use [`Event::notify()`] like so: + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// let event = Event::new(); + /// + /// // Old way: + /// event.notify_relaxed(1); + /// + /// // New way: + /// event.notify(1.relaxed()); + /// ``` + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, IntoNotification}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify_relaxed(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify_relaxed(2); + /// ``` + #[inline] + pub fn notify_relaxed(&self, n: usize) -> usize { + self.notify(n.relaxed()) + } + + /// Notifies a number of active and still unnotified listeners. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// In contrast to [`Event::notify()`], this method will notify `n` *additional* listeners that + /// were previously unnotified. + /// + /// This method emits a `SeqCst` fence before notifying listeners. + /// + /// This method only works for untagged events. In other cases, it is recommended to instead + /// use [`Event::notify()`] like so: + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// let event = Event::new(); + /// + /// // Old way: + /// event.notify_additional(1); + /// + /// // New way: + /// event.notify(1.additional()); + /// ``` + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify_additional(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify_additional(1); + /// event.notify_additional(1); + /// ``` + #[inline] + pub fn notify_additional(&self, n: usize) -> usize { + self.notify(n.additional()) + } + + /// Notifies a number of active and still unnotified listeners without emitting a `SeqCst` + /// fence. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// In contrast to [`Event::notify()`], this method will notify `n` *additional* listeners that + /// were previously unnotified. + /// + /// Unlike [`Event::notify_additional()`], this method does not emit a `SeqCst` fence. + /// + /// This method only works for untagged events. In other cases, it is recommended to instead + /// use [`Event::notify()`] like so: + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// let event = Event::new(); + /// + /// // Old way: + /// event.notify_additional_relaxed(1); + /// + /// // New way: + /// event.notify(1.additional().relaxed()); + /// ``` + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify_additional_relaxed(1); + /// event.notify_additional_relaxed(1); + /// ``` + #[inline] + pub fn notify_additional_relaxed(&self, n: usize) -> usize { + self.notify(n.additional().relaxed()) + } +} + +impl Drop for Event { + #[inline] + fn drop(&mut self) { + self.inner.with_mut(|&mut inner| { + // If the state pointer has been initialized, drop it. + if !inner.is_null() { + unsafe { + drop(Arc::from_raw(inner)); + } + } + }) + } +} + +/// A handle that is listening to an [`Event`]. +/// +/// This trait represents a type waiting for a notification from an [`Event`]. See the +/// [`EventListener`] type for more documentation on this trait's usage. +pub trait Listener: Future + __sealed::Sealed { + /// Blocks until a notification is received. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener = event.listen(); + /// + /// // Notify `listener`. + /// event.notify(1); + /// + /// // Receive the notification. + /// listener.wait(); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait(self) -> T; + + /// Blocks until a notification is received or a timeout is reached. + /// + /// Returns `Some` if a notification was received. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener = event.listen(); + /// + /// // There are no notification so this times out. + /// assert!(listener.wait_timeout(Duration::from_secs(1)).is_none()); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_timeout(self, timeout: Duration) -> Option; + + /// Blocks until a notification is received or a deadline is reached. + /// + /// Returns `true` if a notification was received. + /// + /// # Examples + /// + /// ``` + /// use std::time::{Duration, Instant}; + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener = event.listen(); + /// + /// // There are no notification so this times out. + /// assert!(listener.wait_deadline(Instant::now() + Duration::from_secs(1)).is_none()); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_deadline(self, deadline: Instant) -> Option; + + /// Drops this listener and discards its notification (if any) without notifying another + /// active listener. + /// + /// Returns `true` if a notification was discarded. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener1 = event.listen(); + /// let mut listener2 = event.listen(); + /// + /// event.notify(1); + /// + /// assert!(listener1.discard()); + /// assert!(!listener2.discard()); + /// ``` + fn discard(self) -> bool; + + /// Returns `true` if this listener listens to the given `Event`. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let listener = event.listen(); + /// + /// assert!(listener.listens_to(&event)); + /// ``` + fn listens_to(&self, event: &Event) -> bool; + + /// Returns `true` if both listeners listen to the same `Event`. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// + /// assert!(listener1.same_event(&listener2)); + /// ``` + fn same_event(&self, other: &Self) -> bool; +} + +/// Implement the `Listener` trait using the underlying `InnerListener`. +macro_rules! forward_impl_to_listener { + ($gen:ident => $ty:ty) => { + impl<$gen> crate::Listener<$gen> for $ty { + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait(mut self) -> $gen { + self.listener_mut().wait_internal(None).unwrap() + } + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_timeout(mut self, timeout: std::time::Duration) -> Option<$gen> { + self.listener_mut() + .wait_internal(std::time::Instant::now().checked_add(timeout)) + } + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_deadline(mut self, deadline: std::time::Instant) -> Option<$gen> { + self.listener_mut().wait_internal(Some(deadline)) + } + + fn discard(mut self) -> bool { + self.listener_mut().discard() + } + + #[inline] + fn listens_to(&self, event: &Event<$gen>) -> bool { + core::ptr::eq::>( + &*self.listener().event, + event.inner.load(core::sync::atomic::Ordering::Acquire), + ) + } + + #[inline] + fn same_event(&self, other: &$ty) -> bool { + core::ptr::eq::>(&*self.listener().event, &*other.listener().event) + } + } + + impl<$gen> Future for $ty { + type Output = $gen; + + #[inline] + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<$gen> { + self.listener_mut().poll_internal(cx) + } + } + }; +} + +/// A guard waiting for a notification from an [`Event`]. +/// +/// There are two ways for a listener to wait for a notification: +/// +/// 1. In an asynchronous manner using `.await`. +/// 2. In a blocking manner by calling [`EventListener::wait()`] on it. +/// +/// If a notified listener is dropped without receiving a notification, dropping will notify +/// another active listener. Whether one *additional* listener will be notified depends on what +/// kind of notification was delivered. +/// +/// See the [`Listener`] trait for the functionality exposed by this type. +/// +/// This structure allocates the listener on the heap. +pub struct EventListener { + listener: Pin>>>>, +} + +unsafe impl Send for EventListener {} +unsafe impl Sync for EventListener {} + +impl core::panic::UnwindSafe for EventListener {} +impl core::panic::RefUnwindSafe for EventListener {} +impl Unpin for EventListener {} + +impl fmt::Debug for EventListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("EventListener").finish_non_exhaustive() + } +} + +impl EventListener { + #[inline] + fn listener(&self) -> &InnerListener>> { + &self.listener + } + + #[inline] + fn listener_mut(&mut self) -> Pin<&mut InnerListener>>> { + self.listener.as_mut() + } +} + +forward_impl_to_listener! { T => EventListener } + +/// Create a stack-based event listener for an [`Event`]. +/// +/// [`EventListener`] allocates the listener on the heap. While this works for most use cases, in +/// practice this heap allocation can be expensive for repeated uses. This method allows for +/// allocating the listener on the stack instead. +/// +/// There are limitations to using this macro instead of the [`EventListener`] type, however. +/// Firstly, it is significantly less flexible. The listener is locked to the current stack +/// frame, meaning that it can't be returned or put into a place where it would go out of +/// scope. For instance, this will not work: +/// +/// ```compile_fail +/// use event_listener::{Event, Listener, listener}; +/// +/// fn get_listener(event: &Event) -> impl Listener { +/// listener!(event => cant_return_this); +/// cant_return_this +/// } +/// ``` +/// +/// In addition, the types involved in creating this listener are not able to be named. Therefore +/// it cannot be used in hand-rolled futures or similar structures. +/// +/// The type created by this macro implements [`Listener`], allowing it to be used in cases where +/// [`EventListener`] would normally be used. +/// +/// ## Example +/// +/// To use this macro, replace cases where you would normally use this... +/// +/// ```no_compile +/// let listener = event.listen(); +/// ``` +/// +/// ...with this: +/// +/// ```no_compile +/// listener!(event => listener); +/// ``` +/// +/// Here is the top level example from this crate's documentation, but using [`listener`] instead +/// of [`EventListener`]. +/// +/// ``` +/// # #[cfg(not(target_family = "wasm"))] { // Listener::wait is unavailable on WASM +/// use std::sync::atomic::{AtomicBool, Ordering}; +/// use std::sync::Arc; +/// use std::thread; +/// use std::time::Duration; +/// use std::usize; +/// use event_listener::{Event, listener, IntoNotification, Listener}; +/// +/// let flag = Arc::new(AtomicBool::new(false)); +/// let event = Arc::new(Event::new()); +/// +/// // Spawn a thread that will set the flag after 1 second. +/// thread::spawn({ +/// let flag = flag.clone(); +/// let event = event.clone(); +/// move || { +/// // Wait for a second. +/// thread::sleep(Duration::from_secs(1)); +/// +/// // Set the flag. +/// flag.store(true, Ordering::SeqCst); +/// +/// // Notify all listeners that the flag has been set. +/// event.notify(usize::MAX); +/// } +/// }); +/// +/// // Wait until the flag is set. +/// loop { +/// // Check the flag. +/// if flag.load(Ordering::SeqCst) { +/// break; +/// } +/// +/// // Start listening for events. +/// // NEW: Changed to a stack-based listener. +/// listener!(event => listener); +/// +/// // Check the flag again after creating the listener. +/// if flag.load(Ordering::SeqCst) { +/// break; +/// } +/// +/// // Wait for a notification and continue the loop. +/// listener.wait(); +/// } +/// # } +/// ``` +#[macro_export] +macro_rules! listener { + ($event:expr => $listener:ident) => { + let mut $listener = $crate::__private::StackSlot::new(&$event); + // SAFETY: We shadow $listener so it can't be moved after. + let mut $listener = unsafe { $crate::__private::Pin::new_unchecked(&mut $listener) }; + #[allow(unused_mut)] + let mut $listener = $listener.listen(); + }; +} + +pin_project_lite::pin_project! { + #[project(!Unpin)] + #[project = ListenerProject] + struct InnerListener>> + where + B: Unpin, + { + // The reference to the original event. + event: B, + + // The inner state of the listener. + // + // This is only ever `None` during initialization. After `listen()` has completed, this + // should be `Some`. + #[pin] + listener: Option>, + } + + impl>> PinnedDrop for InnerListener + where + B: Unpin, + { + fn drop(mut this: Pin<&mut Self>) { + // If we're being dropped, we need to remove ourself from the list. + let this = this.project(); + (*this.event).borrow().remove(this.listener, true); + } + } +} + +unsafe impl> + Unpin + Send> Send for InnerListener {} +unsafe impl> + Unpin + Sync> Sync for InnerListener {} + +impl> + Unpin> InnerListener { + /// Insert this listener into the linked list. + #[inline] + fn listen(self: Pin<&mut Self>) { + let this = self.project(); + (*this.event).borrow().insert(this.listener); + } + + /// Wait until the provided deadline. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_internal(mut self: Pin<&mut Self>, deadline: Option) -> Option { + fn parker_and_task() -> (Parker, Task) { + let parker = Parker::new(); + let unparker = parker.unparker(); + (parker, Task::Unparker(unparker)) + } + + crate::sync::thread_local! { + /// Cached thread-local parker/unparker pair. + static PARKER: (Parker, Task) = parker_and_task(); + } + + // Try to borrow the thread-local parker/unparker pair. + PARKER + .try_with({ + let this = self.as_mut(); + |(parker, unparker)| this.wait_with_parker(deadline, parker, unparker.as_task_ref()) + }) + .unwrap_or_else(|_| { + // If the pair isn't accessible, we may be being called in a destructor. + // Just create a new pair. + let (parker, unparker) = parking::pair(); + self.as_mut() + .wait_with_parker(deadline, &parker, TaskRef::Unparker(&unparker)) + }) + } + + /// Wait until the provided deadline using the specified parker/unparker pair. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_with_parker( + self: Pin<&mut Self>, + deadline: Option, + parker: &Parker, + unparker: TaskRef<'_>, + ) -> Option { + let mut this = self.project(); + let inner = (*this.event).borrow(); + + // Set the listener's state to `Task`. + if let Some(tag) = inner.register(this.listener.as_mut(), unparker).notified() { + // We were already notified, so we don't need to park. + return Some(tag); + } + + // Wait until a notification is received or the timeout is reached. + loop { + match deadline { + None => parker.park(), + + #[cfg(loom)] + Some(_deadline) => { + panic!("parking does not support timeouts under loom"); + } + + #[cfg(not(loom))] + Some(deadline) => { + // Make sure we're not timed out already. + let now = Instant::now(); + if now >= deadline { + // Remove our entry and check if we were notified. + return inner + .remove(this.listener.as_mut(), false) + .expect("We never removed ourself from the list") + .notified(); + } + parker.park_deadline(deadline); + } + } + + // See if we were notified. + if let Some(tag) = inner.register(this.listener.as_mut(), unparker).notified() { + return Some(tag); + } + } + } + + /// Drops this listener and discards its notification (if any) without notifying another + /// active listener. + fn discard(self: Pin<&mut Self>) -> bool { + let this = self.project(); + (*this.event) + .borrow() + .remove(this.listener, false) + .map_or(false, |state| state.is_notified()) + } + + /// Poll this listener for a notification. + fn poll_internal(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let inner = (*this.event).borrow(); + + // Try to register the listener. + match inner + .register(this.listener, TaskRef::Waker(cx.waker())) + .notified() + { + Some(tag) => { + // We were already notified, so we don't need to park. + Poll::Ready(tag) + } + + None => { + // We're now waiting for a notification. + Poll::Pending + } + } + } +} + +/// The state of a listener. +#[derive(PartialEq)] +enum State { + /// The listener was just created. + Created, + + /// The listener has received a notification. + /// + /// The `bool` is `true` if this was an "additional" notification. + Notified { + /// Whether or not this is an "additional" notification. + additional: bool, + + /// The tag associated with the notification. + tag: T, + }, + + /// A task is waiting for a notification. + Task(Task), + + /// Empty hole used to replace a notified listener. + NotifiedTaken, +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Created => f.write_str("Created"), + Self::Notified { additional, .. } => f + .debug_struct("Notified") + .field("additional", additional) + .finish(), + Self::Task(_) => f.write_str("Task(_)"), + Self::NotifiedTaken => f.write_str("NotifiedTaken"), + } + } +} + +impl State { + fn is_notified(&self) -> bool { + matches!(self, Self::Notified { .. } | Self::NotifiedTaken) + } + + /// If this state was notified, return the tag associated with the notification. + #[allow(unused)] + fn notified(self) -> Option { + match self { + Self::Notified { tag, .. } => Some(tag), + Self::NotifiedTaken => panic!("listener was already notified but taken"), + _ => None, + } + } +} + +/// The result of registering a listener. +#[derive(Debug, PartialEq)] +enum RegisterResult { + /// The listener was already notified. + Notified(T), + + /// The listener has been registered. + Registered, + + /// The listener was never inserted into the list. + NeverInserted, +} + +impl RegisterResult { + /// Whether or not the listener was notified. + /// + /// Panics if the listener was never inserted into the list. + fn notified(self) -> Option { + match self { + Self::Notified(tag) => Some(tag), + Self::Registered => None, + Self::NeverInserted => panic!("{}", NEVER_INSERTED_PANIC), + } + } +} + +/// A task that can be woken up. +#[derive(Debug, Clone)] +enum Task { + /// A waker that wakes up a future. + Waker(Waker), + + /// An unparker that wakes up a thread. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Unparker(Unparker), +} + +impl Task { + fn as_task_ref(&self) -> TaskRef<'_> { + match self { + Self::Waker(waker) => TaskRef::Waker(waker), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Self::Unparker(unparker) => TaskRef::Unparker(unparker), + } + } + + fn wake(self) { + match self { + Self::Waker(waker) => waker.wake(), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Self::Unparker(unparker) => { + unparker.unpark(); + } + } + } +} + +impl PartialEq for Task { + fn eq(&self, other: &Self) -> bool { + self.as_task_ref().will_wake(other.as_task_ref()) + } +} + +/// A reference to a task. +#[derive(Clone, Copy)] +enum TaskRef<'a> { + /// A waker that wakes up a future. + Waker(&'a Waker), + + /// An unparker that wakes up a thread. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Unparker(&'a Unparker), +} + +impl TaskRef<'_> { + /// Tells if this task will wake up the other task. + #[allow(unreachable_patterns)] + fn will_wake(self, other: Self) -> bool { + match (self, other) { + (Self::Waker(a), Self::Waker(b)) => a.will_wake(b), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + (Self::Unparker(_), Self::Unparker(_)) => { + // TODO: Use unreleased will_unpark API. + false + } + _ => false, + } + } + + /// Converts this task reference to a task by cloning. + fn into_task(self) -> Task { + match self { + Self::Waker(waker) => Task::Waker(waker.clone()), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Self::Unparker(unparker) => Task::Unparker(unparker.clone()), + } + } +} + +const NEVER_INSERTED_PANIC: &str = "\ +EventListener was not inserted into the linked list, make sure you're not polling \ +EventListener/listener! after it has finished"; + +#[cfg(not(loom))] +/// Synchronization primitive implementation. +mod sync { + #[cfg(not(feature = "portable-atomic"))] + pub(super) use alloc::sync::Arc; + #[cfg(not(feature = "portable-atomic"))] + pub(super) use core::sync::atomic; + + #[cfg(feature = "portable-atomic")] + pub(super) use portable_atomic_crate as atomic; + #[cfg(feature = "portable-atomic")] + pub(super) use portable_atomic_util::Arc; + + #[allow(unused)] + #[cfg(all(feature = "std", not(feature = "critical-section"), not(loom)))] + pub(super) use std::sync::{Mutex, MutexGuard}; + #[cfg(all(feature = "std", not(target_family = "wasm"), not(loom)))] + pub(super) use std::thread_local; + + pub(super) trait WithMut { + type Output; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Output) -> R; + } + + impl WithMut for atomic::AtomicPtr { + type Output = *mut T; + + #[inline] + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Output) -> R, + { + f(self.get_mut()) + } + } + + pub(crate) mod cell { + pub(crate) use core::cell::Cell; + + /// This newtype around *mut T exists for interoperability with loom::cell::ConstPtr, + /// which works as a guard and performs additional logic to track access scope. + pub(crate) struct ConstPtr(*mut T); + impl ConstPtr { + pub(crate) unsafe fn deref(&self) -> &T { + &*self.0 + } + + #[allow(unused)] // std code does not need this + pub(crate) unsafe fn deref_mut(&mut self) -> &mut T { + &mut *self.0 + } + } + + /// This UnsafeCell wrapper exists for interoperability with loom::cell::UnsafeCell, and + /// only contains the interface that is needed for this crate. + #[derive(Debug, Default)] + pub(crate) struct UnsafeCell(core::cell::UnsafeCell); + + impl UnsafeCell { + pub(crate) fn new(data: T) -> UnsafeCell { + UnsafeCell(core::cell::UnsafeCell::new(data)) + } + + pub(crate) fn get(&self) -> ConstPtr { + ConstPtr(self.0.get()) + } + + #[allow(dead_code)] // no_std does not need this + pub(crate) fn into_inner(self) -> T { + self.0.into_inner() + } + } + } +} + +#[cfg(loom)] +/// Synchronization primitive implementation. +mod sync { + pub(super) use loom::sync::{atomic, Arc, Mutex, MutexGuard}; + pub(super) use loom::{cell, thread_local}; +} + +fn __test_send_and_sync() { + fn _assert_send() {} + fn _assert_sync() {} + + _assert_send::>(); + _assert_sync::>(); + _assert_send::>(); + _assert_sync::>(); + _assert_send::>(); + _assert_sync::>(); + _assert_send::>(); + _assert_sync::>(); +} + +#[doc(hidden)] +mod __sealed { + use super::{EventListener, __private::StackListener}; + + pub trait Sealed {} + impl Sealed for EventListener {} + impl Sealed for StackListener<'_, '_, T> {} +} + +/// Semver exempt module. +#[doc(hidden)] +pub mod __private { + pub use core::pin::Pin; + + use super::{Event, Inner, InnerListener}; + use core::fmt; + use core::future::Future; + use core::task::{Context, Poll}; + + pin_project_lite::pin_project! { + /// Space on the stack where a stack-based listener can be allocated. + #[doc(hidden)] + #[project(!Unpin)] + pub struct StackSlot<'ev, T> { + #[pin] + listener: InnerListener> + } + } + + impl fmt::Debug for StackSlot<'_, T> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StackSlot").finish_non_exhaustive() + } + } + + impl core::panic::UnwindSafe for StackSlot<'_, T> {} + impl core::panic::RefUnwindSafe for StackSlot<'_, T> {} + unsafe impl Send for StackSlot<'_, T> {} + unsafe impl Sync for StackSlot<'_, T> {} + + impl<'ev, T> StackSlot<'ev, T> { + /// Create a new `StackSlot` on the stack. + #[inline] + #[doc(hidden)] + pub fn new(event: &'ev Event) -> Self { + let inner = unsafe { &*event.inner() }; + Self { + listener: InnerListener { + event: inner, + listener: None, + }, + } + } + + /// Start listening on this `StackSlot`. + #[inline] + #[doc(hidden)] + pub fn listen(mut self: Pin<&mut Self>) -> StackListener<'ev, '_, T> { + // Insert ourselves into the list. + self.as_mut().project().listener.listen(); + + // We are now listening. + StackListener { slot: self } + } + } + + /// A stack-based `EventListener`. + #[doc(hidden)] + pub struct StackListener<'ev, 'stack, T> { + slot: Pin<&'stack mut StackSlot<'ev, T>>, + } + + impl core::panic::UnwindSafe for StackListener<'_, '_, T> {} + impl core::panic::RefUnwindSafe for StackListener<'_, '_, T> {} + impl Unpin for StackListener<'_, '_, T> {} + + impl fmt::Debug for StackListener<'_, '_, T> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StackListener").finish_non_exhaustive() + } + } + + impl<'ev, T> StackListener<'ev, '_, T> { + #[inline] + fn listener(&self) -> &InnerListener> { + &self.slot.listener + } + + #[inline] + fn listener_mut(&mut self) -> Pin<&mut InnerListener>> { + self.slot.as_mut().project().listener + } + } + + forward_impl_to_listener! { T => StackListener<'_, '_, T> } +} diff --git a/external/vendor/event-listener/src/notify.rs b/external/vendor/event-listener/src/notify.rs new file mode 100644 index 0000000000..2591c9d98b --- /dev/null +++ b/external/vendor/event-listener/src/notify.rs @@ -0,0 +1,626 @@ +//! The `Notification` trait for specifying notification. + +use crate::sync::atomic::{self, Ordering}; +#[cfg(feature = "std")] +use core::fmt; + +pub(crate) use __private::Internal; + +/// The type of notification to use with an [`Event`]. +/// +/// This is hidden and sealed to prevent changes to this trait from being breaking. +/// +/// [`Event`]: crate::Event +#[doc(hidden)] +pub trait NotificationPrivate { + /// The tag data associated with a notification. + type Tag; + + /// Emit a fence to ensure that the notification is visible to the listeners. + fn fence(&self, internal: Internal); + + /// Whether or not the number of currently waiting listeners should be subtracted from `count()`. + fn is_additional(&self, internal: Internal) -> bool; + + /// Get the number of listeners to wake. + fn count(&self, internal: Internal) -> usize; + + /// Get a tag to be associated with a notification. + /// + /// This method is expected to be called `count()` times. + fn next_tag(&mut self, internal: Internal) -> Self::Tag; +} + +/// A notification that can be used to notify an [`Event`]. +/// +/// This type is used by the [`Event::notify()`] function to determine how many listeners to wake up, whether +/// or not to subtract additional listeners, and other properties. The actual internal data is hidden in a +/// private trait and is intentionally not exposed. This means that users cannot manually implement the +/// [`Notification`] trait. However, it also means that changing the underlying trait is not a semver breaking +/// change. +/// +/// Users can create types that implement notifications using the combinators on the [`IntoNotification`] type. +/// Typical construction of a [`Notification`] starts with a numeric literal (like `3usize`) and then optionally +/// adding combinators. +/// +/// # Example +/// +/// ``` +/// use event_listener::{Event, IntoNotification, Notification}; +/// +/// fn notify(ev: &Event, notify: impl Notification) { +/// ev.notify(notify); +/// } +/// +/// notify(&Event::new(), 1.additional()); +/// ``` +/// +/// [`Event`]: crate::Event +pub trait Notification: NotificationPrivate {} +impl Notification for N {} + +/// Notify a given number of unnotifed listeners. +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Notify(usize); + +impl Notify { + /// Create a new `Notify` with the given number of listeners to notify. + fn new(count: usize) -> Self { + Self(count) + } +} + +impl NotificationPrivate for Notify { + type Tag = (); + + fn is_additional(&self, _: Internal) -> bool { + false + } + + fn fence(&self, _: Internal) { + full_fence(); + } + + fn count(&self, _: Internal) -> usize { + self.0 + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag {} +} + +/// Make the underlying notification additional. +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Additional(N); + +impl Additional { + /// Create a new `Additional` with the given notification. + fn new(inner: N) -> Self { + Self(inner) + } +} + +impl NotificationPrivate for Additional +where + N: Notification + ?Sized, +{ + type Tag = N::Tag; + + fn is_additional(&self, _: Internal) -> bool { + true + } + + fn fence(&self, i: Internal) { + self.0.fence(i); + } + + fn count(&self, i: Internal) -> usize { + self.0.count(i) + } + + fn next_tag(&mut self, i: Internal) -> Self::Tag { + self.0.next_tag(i) + } +} + +/// Don't emit a fence for this notification. +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Relaxed(N); + +impl Relaxed { + /// Create a new `Relaxed` with the given notification. + fn new(inner: N) -> Self { + Self(inner) + } +} + +impl NotificationPrivate for Relaxed +where + N: Notification + ?Sized, +{ + type Tag = N::Tag; + + fn is_additional(&self, i: Internal) -> bool { + self.0.is_additional(i) + } + + fn fence(&self, _: Internal) { + // Don't emit a fence. + } + + fn count(&self, i: Internal) -> usize { + self.0.count(i) + } + + fn next_tag(&mut self, i: Internal) -> Self::Tag { + self.0.next_tag(i) + } +} + +/// Use a tag to notify listeners. +#[cfg(feature = "std")] +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Tag { + tag: T, + inner: N, +} + +#[cfg(feature = "std")] +impl Tag { + /// Create a new `Tag` with the given tag and notification. + fn new(tag: T, inner: N) -> Self + where + N: Sized, + { + Self { tag, inner } + } +} + +#[cfg(feature = "std")] +impl NotificationPrivate for Tag +where + N: Notification + ?Sized, + T: Clone, +{ + type Tag = T; + + fn is_additional(&self, i: Internal) -> bool { + self.inner.is_additional(i) + } + + fn fence(&self, i: Internal) { + self.inner.fence(i); + } + + fn count(&self, i: Internal) -> usize { + self.inner.count(i) + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag { + self.tag.clone() + } +} + +/// Use a function to generate a tag to notify listeners. +#[cfg(feature = "std")] +#[doc(hidden)] +pub struct TagWith { + tag: F, + inner: N, +} + +#[cfg(feature = "std")] +impl fmt::Debug for TagWith { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + struct Ellipses; + + impl fmt::Debug for Ellipses { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("..") + } + } + + f.debug_struct("TagWith") + .field("tag", &Ellipses) + .field("inner", &self.inner) + .finish() + } +} + +#[cfg(feature = "std")] +impl TagWith { + /// Create a new `TagFn` with the given tag function and notification. + fn new(tag: F, inner: N) -> Self { + Self { tag, inner } + } +} + +#[cfg(feature = "std")] +impl NotificationPrivate for TagWith +where + N: Notification + ?Sized, + F: FnMut() -> T, +{ + type Tag = T; + + fn is_additional(&self, i: Internal) -> bool { + self.inner.is_additional(i) + } + + fn fence(&self, i: Internal) { + self.inner.fence(i); + } + + fn count(&self, i: Internal) -> usize { + self.inner.count(i) + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag { + (self.tag)() + } +} + +/// A generic notification. +#[derive(Debug)] +pub(crate) struct GenericNotify { + /// Number of listeners to notify. + count: usize, + + /// Whether this notification is additional. + additional: bool, + + /// Generate tags. + tags: F, +} + +impl> GenericNotify { + pub(crate) fn new(count: usize, additional: bool, tags: F) -> Self { + Self { + count, + additional, + tags, + } + } +} + +impl> NotificationPrivate for GenericNotify { + type Tag = T; + + fn is_additional(&self, _: Internal) -> bool { + self.additional + } + + fn fence(&self, _: Internal) { + // Don't emit a fence. + } + + fn count(&self, _: Internal) -> usize { + self.count + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag { + self.tags.next_tag() + } +} + +/// The producer for a generic notification. +pub(crate) trait TagProducer { + type Tag; + + /// Get the next tag. + fn next_tag(&mut self) -> Self::Tag; +} + +impl T> TagProducer for F { + type Tag = T; + + fn next_tag(&mut self) -> T { + (self)() + } +} + +/// A value that can be converted into a [`Notification`]. +/// +/// This trait adds onto the [`Notification`] trait by providing combinators that can be applied to all +/// notification types as well as numeric literals. This transforms what would normally be: +/// +/// ``` +/// use event_listener::Event; +/// +/// let event = Event::new(); +/// +/// // Note that each use case needs its own function, leading to bloat. +/// event.notify(1); +/// event.notify_additional(3); +/// event.notify_relaxed(5); +/// event.notify_additional_relaxed(2); +/// ``` +/// +/// into this: +/// +/// ``` +/// use event_listener::{Event, IntoNotification, Listener}; +/// +/// let event = Event::new(); +/// +/// event.notify(1); +/// event.notify(3.additional()); +/// event.notify(5.relaxed()); +/// event.notify(2.additional().relaxed()); +/// ``` +/// +/// This trait is implemented for all types that implement [`Notification`], as well as for non-floating-point +/// numeric literals (`usize`, `i32`, etc). +/// +/// This function can be thought of as being analogous to [`std::iter::IntoIterator`], but for [`Notification`]. +pub trait IntoNotification: __private::Sealed { + /// The tag data associated with a notification. + /// + /// By default, most [`Event`]s will use the unit type, `()`. However, this can be used to pass data along to + /// the listener. + type Tag; + + /// The notification type. + /// + /// Tells what kind of underlying type that the [`Notification`] is. You probably don't need to worry about + /// this. + type Notify: Notification; + + /// Convert this value into a notification. + /// + /// This allows the user to convert an [`IntoNotification`] into a [`Notification`]. + /// + /// # Panics + /// + /// This function panics if the value represents a negative number of notifications. + /// + /// # Examples + /// + /// ``` + /// use event_listener::IntoNotification; + /// + /// let _ = 3.into_notification(); + /// ``` + fn into_notification(self) -> Self::Notify; + + /// Convert this value into an additional notification. + /// + /// By default, notifications ignore listeners that are already notified. Generally, this happens when there + /// is an [`EventListener`] that has been woken up, but hasn't been polled to completion or waited on yet. + /// For instance, if you have three notified listeners and you call `event.notify(5)`, only two listeners + /// will be woken up. + /// + /// This default behavior is generally desired. For instance, if you are writing a `Mutex` implementation + /// powered by an [`Event`], you usually only want one consumer to be notified at a time. If you notified + /// a listener when another listener is already notified, you would have unnecessary contention for your + /// lock, as both listeners fight over the lock. Therefore, you would call `event.notify(1)` to make sure + /// *at least* one listener is awake. + /// + /// Sometimes, this behavior is not desired. For instance, if you are writing an MPMC channel, it is desirable + /// for multiple listeners to be reading from the underlying queue at once. In this case, you would instead + /// call `event.notify(1.additional())`. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, IntoNotification, Listener}; + /// + /// let event = Event::new(); + /// + /// let mut l1 = event.listen(); + /// let mut l2 = event.listen(); + /// + /// // This will only wake up the first listener, as the second call observes that there is already a + /// // notified listener. + /// event.notify(1); + /// event.notify(1); + /// + /// // This call wakes up the other listener. + /// event.notify(1.additional()); + /// ``` + fn additional(self) -> Additional + where + Self: Sized, + { + Additional::new(self.into_notification()) + } + + /// Don't emit a fence for this notification. + /// + /// Usually, notifications emit a `SeqCst` atomic fence before any listeners are woken up. This ensures + /// that notification state isn't inconsistent before any wakers are woken up. However, it may be + /// desirable to omit this fence in certain cases. + /// + /// - You are running the [`Event`] on a single thread, where no synchronization needs to occur. + /// - You are emitting the `SeqCst` fence yourself. + /// + /// In these cases, `relaxed()` can be used to avoid emitting the `SeqCst` fence. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, IntoNotification, Listener}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(1.relaxed()); + /// event.notify(1.relaxed()); + /// ``` + fn relaxed(self) -> Relaxed + where + Self: Sized, + { + Relaxed::new(self.into_notification()) + } + + /// Use a tag with this notification. + /// + /// In many cases, it is desired to send additional information to the listener of the [`Event`]. For instance, + /// it is possible to optimize a `Mutex` implementation by locking directly on the next listener, without + /// needing to ever unlock the mutex at all. + /// + /// The tag provided is cloned to provide the tag for all listeners. In cases where this is not flexible + /// enough, use [`IntoNotification::with_tag()`] instead. + /// + /// Tagging functions cannot be implemented efficiently for `no_std`, so this is only available + /// when the `std` feature is enabled. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{IntoNotification, Listener, Event}; + /// + /// let event = Event::::with_tag(); + /// + /// let mut listener1 = event.listen(); + /// let mut listener2 = event.listen(); + /// + /// // Notify with `true` then `false`. + /// event.notify(1.additional().tag(true)); + /// event.notify(1.additional().tag(false)); + /// + /// # #[cfg(not(target_family = "wasm"))] { // Listener::wait is unavailable on WASM + /// assert_eq!(listener1.wait(), true); + /// assert_eq!(listener2.wait(), false); + /// # } + /// ``` + #[cfg(feature = "std")] + fn tag(self, tag: T) -> Tag + where + Self: Sized + IntoNotification, + { + Tag::new(tag, self.into_notification()) + } + + /// Use a function to generate a tag with this notification. + /// + /// In many cases, it is desired to send additional information to the listener of the [`Event`]. For instance, + /// it is possible to optimize a `Mutex` implementation by locking directly on the next listener, without + /// needing to ever unlock the mutex at all. + /// + /// Tagging functions cannot be implemented efficiently for `no_std`, so this is only available + /// when the `std` feature is enabled. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{IntoNotification, Listener, Event}; + /// + /// let event = Event::::with_tag(); + /// + /// let mut listener1 = event.listen(); + /// let mut listener2 = event.listen(); + /// + /// // Notify with `true` then `false`. + /// event.notify(1.additional().tag_with(|| true)); + /// event.notify(1.additional().tag_with(|| false)); + /// + /// # #[cfg(not(target_family = "wasm"))] { // Listener::wait is unavailable on WASM + /// assert_eq!(listener1.wait(), true); + /// assert_eq!(listener2.wait(), false); + /// # } + /// ``` + #[cfg(feature = "std")] + fn tag_with(self, tag: F) -> TagWith + where + Self: Sized + IntoNotification, + F: FnMut() -> T, + { + TagWith::new(tag, self.into_notification()) + } +} + +impl IntoNotification for N { + type Tag = N::Tag; + type Notify = N; + + fn into_notification(self) -> Self::Notify { + self + } +} + +macro_rules! impl_for_numeric_types { + ($($ty:ty)*) => {$( + impl IntoNotification for $ty { + type Tag = (); + type Notify = Notify; + + #[allow(unused_comparisons)] + fn into_notification(self) -> Self::Notify { + if self < 0 { + panic!("negative notification count"); + } + + Notify::new(self.try_into().expect("overflow")) + } + } + + impl __private::Sealed for $ty {} + )*}; +} + +impl_for_numeric_types! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } + +/// Equivalent to `atomic::fence(Ordering::SeqCst)`, but in some cases faster. +#[inline] +pub(super) fn full_fence() { + #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri), not(loom)))] + { + use core::{arch::asm, cell::UnsafeCell}; + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. A `lock ` instruction. + // + // Both instructions have the effect of a full barrier, but empirical benchmarks have shown + // that the second one is sometimes a bit faster. + let a = UnsafeCell::new(0_usize); + // It is common to use `lock or` here, but when using a local variable, `lock not`, which + // does not change the flag, should be slightly more efficient. + // Refs: https://www.felixcloutier.com/x86/not + unsafe { + #[cfg(target_pointer_width = "64")] + asm!("lock not qword ptr [{0}]", in(reg) a.get(), options(nostack, preserves_flags)); + #[cfg(target_pointer_width = "32")] + asm!("lock not dword ptr [{0:e}]", in(reg) a.get(), options(nostack, preserves_flags)); + } + return; + } + #[allow(unreachable_code)] + { + atomic::fence(Ordering::SeqCst); + } +} + +mod __private { + /// Make sure the NotificationPrivate trait can't be implemented outside of this crate. + #[doc(hidden)] + #[derive(Debug)] + pub struct Internal(()); + + impl Internal { + pub(crate) fn new() -> Self { + Self(()) + } + } + + #[doc(hidden)] + pub trait Sealed {} + impl Sealed for N {} +} diff --git a/external/vendor/event-listener/src/slab.rs b/external/vendor/event-listener/src/slab.rs new file mode 100644 index 0000000000..59e1c21aa5 --- /dev/null +++ b/external/vendor/event-listener/src/slab.rs @@ -0,0 +1,1429 @@ +//! Implementation of `event-listener` built exclusively on atomics. +//! +//! On `no_std`, we don't have access to `Mutex`, so we can't use intrusive linked lists like the `std` +//! implementation. Normally, we would use a concurrent atomic queue to store listeners, but benchmarks +//! show that using queues in this way is very slow, especially for the single threaded use-case. +//! +//! We've found that it's easier to assume that the `Event` won't be under high contention in most use +//! cases. Therefore, we use a spinlock that protects a linked list of listeners, and fall back to an +//! atomic queue if the lock is contended. Benchmarks show that this is about 20% slower than the std +//! implementation, but still much faster than using a queue. + +#[path = "slab/node.rs"] +mod node; + +use node::{Node, NothingProducer, TaskWaiting}; + +use crate::notify::{GenericNotify, Internal, Notification}; +use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::cell::{Cell, ConstPtr, UnsafeCell}; +use crate::sync::Arc; +use crate::{RegisterResult, State, Task, TaskRef}; + +use core::fmt; +use core::marker::PhantomData; +use core::mem; +use core::num::NonZeroUsize; +use core::ops; +use core::pin::Pin; + +use alloc::vec::Vec; + +impl crate::Inner { + /// Locks the list. + fn try_lock(&self) -> Option> { + self.list.inner.try_lock().map(|guard| ListGuard { + inner: self, + guard: Some(guard), + tasks: alloc::vec![], + }) + } + + /// Force a queue update. + fn queue_update(&self) { + // Locking and unlocking the mutex will drain the queue if there is no contention. + drop(self.try_lock()); + } + + /// Add a new listener to the list. + /// + /// Does nothing if the list is already registered. + pub(crate) fn insert(&self, mut listener: Pin<&mut Option>>) { + if listener.as_ref().as_pin_ref().is_some() { + // Already inserted. + return; + } + + match self.try_lock() { + Some(mut lock) => { + let key = lock.insert(State::Created); + *listener = Some(Listener::HasNode(key)); + } + + None => { + // Push it to the queue. + let (node, task_waiting) = Node::listener(); + self.list.queue.push(node).unwrap(); + *listener = Some(Listener::Queued(task_waiting)); + + // Force a queue update. + self.queue_update(); + } + } + } + + /// Remove a listener from the list. + pub(crate) fn remove( + &self, + mut listener: Pin<&mut Option>>, + propagate: bool, + ) -> Option> { + loop { + let state = match listener.as_mut().take() { + Some(Listener::HasNode(key)) => { + match self.try_lock() { + Some(mut list) => { + // Fast path removal. + list.remove(key, propagate) + } + + None => { + // Slow path removal. + // This is why intrusive lists don't work on no_std. + let node = Node::RemoveListener { + listener: key, + propagate, + }; + + self.list.queue.push(node).unwrap(); + + // Force a queue update. + self.queue_update(); + + None + } + } + } + + Some(Listener::Queued(tw)) => { + // Make sure it's not added after the queue is drained. + if let Some(key) = tw.cancel() { + // If it was already added, set up our listener and try again. + *listener = Some(Listener::HasNode(key)); + continue; + } + + None + } + + None => None, + + _ => unreachable!(), + }; + + return state; + } + } + + /// Notifies a number of entries. + #[cold] + pub(crate) fn notify(&self, notify: impl Notification) -> usize { + match self.try_lock() { + Some(mut guard) => { + // Notify the listeners. + guard.notify(notify) + } + + None => { + // Push it to the queue. + let node = Node::Notify(GenericNotify::new( + notify.count(Internal::new()), + notify.is_additional(Internal::new()), + NothingProducer::default(), + )); + + self.list.queue.push(node).unwrap(); + + // Force a queue update. + self.queue_update(); + + // We haven't notified anyone yet. + 0 + } + } + } + + /// Register a task to be notified when the event is triggered. + /// + /// Returns `true` if the listener was already notified, and `false` otherwise. If the listener + /// isn't inserted, returns `None`. + pub(crate) fn register( + &self, + mut listener: Pin<&mut Option>>, + task: TaskRef<'_>, + ) -> RegisterResult { + loop { + match listener.as_mut().take() { + Some(Listener::HasNode(key)) => { + *listener = Some(Listener::HasNode(key)); + match self.try_lock() { + Some(mut guard) => { + // Fast path registration. + return guard.register(listener, task); + } + + None => { + // Wait for the lock. + let node = Node::Waiting(task.into_task()); + self.list.queue.push(node).unwrap(); + + // Force a queue update. + self.queue_update(); + + return RegisterResult::Registered; + } + } + } + + Some(Listener::Queued(task_waiting)) => { + // Force a queue update. + self.queue_update(); + + // Are we done yet? + match task_waiting.status() { + Some(key) => { + assert!(key.get() != usize::MAX); + + // We're inserted now, adjust state. + *listener = Some(Listener::HasNode(key)); + } + + None => { + // We're still queued, so register the task. + task_waiting.register(task.into_task()); + *listener = Some(Listener::Queued(task_waiting)); + + // Force a queue update. + self.queue_update(); + + return RegisterResult::Registered; + } + } + } + + None => return RegisterResult::NeverInserted, + + _ => unreachable!(), + } + } + } +} + +#[derive(Debug)] +pub(crate) struct List { + /// The inner list. + inner: Mutex>, + + /// The queue of pending operations. + queue: concurrent_queue::ConcurrentQueue>, +} + +impl List { + pub(super) fn new() -> List { + List { + inner: Mutex::new(ListenerSlab::new()), + queue: concurrent_queue::ConcurrentQueue::unbounded(), + } + } + + /// Try to get the total number of listeners without blocking. + pub(super) fn try_total_listeners(&self) -> Option { + self.inner.try_lock().map(|lock| lock.listeners.len()) + } +} + +/// The guard returned by [`Inner::lock`]. +pub(crate) struct ListGuard<'a, T> { + /// Reference to the inner state. + pub(crate) inner: &'a crate::Inner, + + /// The locked list. + pub(crate) guard: Option>>, + + /// Tasks to wake up once this guard is dropped. + tasks: Vec, +} + +impl ListGuard<'_, T> { + #[cold] + fn process_nodes_slow(&mut self, start_node: Node) { + let guard = self.guard.as_mut().unwrap(); + + // Process the start node. + self.tasks.extend(start_node.apply(guard)); + + // Process all remaining nodes. + while let Ok(node) = self.inner.list.queue.pop() { + self.tasks.extend(node.apply(guard)); + } + } + + #[inline] + fn process_nodes(&mut self) { + // Process every node left in the queue. + if let Ok(start_node) = self.inner.list.queue.pop() { + self.process_nodes_slow(start_node); + } + } +} + +impl ops::Deref for ListGuard<'_, T> { + type Target = ListenerSlab; + + fn deref(&self) -> &Self::Target { + self.guard.as_ref().unwrap() + } +} + +impl ops::DerefMut for ListGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.guard.as_mut().unwrap() + } +} + +impl Drop for ListGuard<'_, T> { + fn drop(&mut self) { + while self.guard.is_some() { + // Process every node left in the queue. + self.process_nodes(); + + // Update the atomic `notified` counter. + let list = self.guard.take().unwrap(); + let notified = if list.notified < list.len { + list.notified + } else { + usize::MAX + }; + + self.inner.notified.store(notified, Ordering::Release); + + // Drop the actual lock. + drop(list); + + // Wakeup all tasks. + for task in self.tasks.drain(..) { + task.wake(); + } + + // There is a deadlock where a node is pushed to the end of the queue after we've finished + // process_nodes() but before we've finished dropping the lock. This can lead to some + // notifications not being properly delivered, or listeners not being added to the list. + // Therefore check before we finish dropping if there is anything left in the queue, and + // if so, lock it again and force a queue update. + if !self.inner.list.queue.is_empty() { + self.guard = self.inner.list.inner.try_lock(); + } + } + } +} + +/// An entry representing a registered listener. +enum Entry { + /// Contains the listener state. + Listener { + /// The state of the listener. + state: Cell>, + + /// The previous listener in the list. + prev: Cell>, + + /// The next listener in the list. + next: Cell>, + }, + + /// An empty slot that contains the index of the next empty slot. + Empty(NonZeroUsize), + + /// Sentinel value. + Sentinel, +} + +struct TakenState<'a, T> { + slot: &'a Cell>, + state: State, +} + +impl Drop for TakenState<'_, T> { + fn drop(&mut self) { + self.slot + .set(mem::replace(&mut self.state, State::NotifiedTaken)); + } +} + +impl fmt::Debug for TakenState<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.state, f) + } +} + +impl PartialEq for TakenState<'_, T> { + fn eq(&self, other: &Self) -> bool { + self.state == other.state + } +} + +impl<'a, T> TakenState<'a, T> { + fn new(slot: &'a Cell>) -> Self { + let state = slot.replace(State::NotifiedTaken); + Self { slot, state } + } +} + +impl fmt::Debug for Entry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Entry::Listener { state, next, prev } => f + .debug_struct("Listener") + .field("state", &TakenState::new(state)) + .field("prev", prev) + .field("next", next) + .finish(), + Entry::Empty(next) => f.debug_tuple("Empty").field(next).finish(), + Entry::Sentinel => f.debug_tuple("Sentinel").finish(), + } + } +} + +impl PartialEq for Entry { + fn eq(&self, other: &Entry) -> bool { + match (self, other) { + ( + Self::Listener { + state: state1, + prev: prev1, + next: next1, + }, + Self::Listener { + state: state2, + prev: prev2, + next: next2, + }, + ) => { + if TakenState::new(state1) != TakenState::new(state2) { + return false; + } + + prev1.get() == prev2.get() && next1.get() == next2.get() + } + (Self::Empty(next1), Self::Empty(next2)) => next1 == next2, + (Self::Sentinel, Self::Sentinel) => true, + _ => false, + } + } +} + +impl Entry { + fn state(&self) -> &Cell> { + match self { + Entry::Listener { state, .. } => state, + _ => unreachable!(), + } + } + + fn prev(&self) -> &Cell> { + match self { + Entry::Listener { prev, .. } => prev, + _ => unreachable!(), + } + } + + fn next(&self) -> &Cell> { + match self { + Entry::Listener { next, .. } => next, + _ => unreachable!(), + } + } +} + +/// A linked list of entries. +pub(crate) struct ListenerSlab { + /// The raw list of entries. + listeners: Vec>, + + /// First entry in the list. + head: Option, + + /// Last entry in the list. + tail: Option, + + /// The first unnotified entry in the list. + start: Option, + + /// The number of notified entries in the list. + notified: usize, + + /// The total number of listeners. + len: usize, + + /// The index of the first `Empty` entry, or the length of the list plus one if there + /// are no empty entries. + first_empty: NonZeroUsize, +} + +impl fmt::Debug for ListenerSlab { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ListenerSlab") + .field("listeners", &self.listeners) + .field("head", &self.head) + .field("tail", &self.tail) + .field("start", &self.start) + .field("notified", &self.notified) + .field("len", &self.len) + .field("first_empty", &self.first_empty) + .finish() + } +} + +impl ListenerSlab { + /// Create a new, empty list. + pub(crate) fn new() -> Self { + Self { + listeners: alloc::vec![Entry::Sentinel], + head: None, + tail: None, + start: None, + notified: 0, + len: 0, + first_empty: unsafe { NonZeroUsize::new_unchecked(1) }, + } + } + + /// Inserts a new entry into the list. + pub(crate) fn insert(&mut self, state: State) -> NonZeroUsize { + // Add the new entry into the list. + let key = { + let entry = Entry::Listener { + state: Cell::new(state), + prev: Cell::new(self.tail), + next: Cell::new(None), + }; + + let key = self.first_empty; + if self.first_empty.get() == self.listeners.len() { + // No empty entries, so add a new entry. + self.listeners.push(entry); + + // SAFETY: Guaranteed to not overflow, since the Vec would have panicked already. + self.first_empty = unsafe { NonZeroUsize::new_unchecked(self.listeners.len()) }; + } else { + // There is an empty entry, so replace it. + let slot = &mut self.listeners[key.get()]; + let next = match mem::replace(slot, entry) { + Entry::Empty(next) => next, + _ => unreachable!(), + }; + + self.first_empty = next; + } + + key + }; + + // Replace the tail with the new entry. + match mem::replace(&mut self.tail, Some(key)) { + None => self.head = Some(key), + Some(tail) => { + let tail = &self.listeners[tail.get()]; + tail.next().set(Some(key)); + } + } + + // If there are no listeners that have been notified, then the new listener is the next + // listener to be notified. + if self.start.is_none() { + self.start = Some(key); + } + + // Increment the length. + self.len += 1; + + key + } + + /// Removes an entry from the list and returns its state. + pub(crate) fn remove(&mut self, key: NonZeroUsize, propagate: bool) -> Option> { + let entry = &self.listeners[key.get()]; + let prev = entry.prev().get(); + let next = entry.next().get(); + + // Unlink from the previous entry. + match prev { + None => self.head = next, + Some(p) => self.listeners[p.get()].next().set(next), + } + + // Unlink from the next entry. + match next { + None => self.tail = prev, + Some(n) => self.listeners[n.get()].prev().set(prev), + } + + // If this was the first unnotified entry, move the pointer to the next one. + if self.start == Some(key) { + self.start = next; + } + + // Extract the state. + let entry = mem::replace( + &mut self.listeners[key.get()], + Entry::Empty(self.first_empty), + ); + self.first_empty = key; + + let mut state = match entry { + Entry::Listener { state, .. } => state.into_inner(), + _ => unreachable!(), + }; + + // Update the counters. + if state.is_notified() { + self.notified = self.notified.saturating_sub(1); + + if propagate { + // Propagate the notification to the next entry. + let state = mem::replace(&mut state, State::NotifiedTaken); + if let State::Notified { tag, additional } = state { + let tags = { + let mut tag = Some(tag); + move || tag.take().expect("called more than once") + }; + + self.notify(GenericNotify::new(1, additional, tags)); + } + } + } + self.len -= 1; + + Some(state) + } + + /// Notifies a number of listeners. + #[cold] + pub(crate) fn notify(&mut self, mut notify: impl Notification) -> usize { + let mut n = notify.count(Internal::new()); + let is_additional = notify.is_additional(Internal::new()); + if !is_additional { + // Make sure we're not notifying more than we have. + if n <= self.notified { + return 0; + } + n -= self.notified; + } + + let original_count = n; + while n > 0 { + n -= 1; + + // Notify the next entry. + match self.start { + None => return original_count - n - 1, + + Some(e) => { + // Get the entry and move the pointer forwards. + let entry = &self.listeners[e.get()]; + self.start = entry.next().get(); + + // Set the state to `Notified` and notify. + let tag = notify.next_tag(Internal::new()); + if let State::Task(task) = entry.state().replace(State::Notified { + tag, + additional: is_additional, + }) { + task.wake(); + } + + // Bump the notified count. + self.notified += 1; + } + } + } + + original_count - n + } + + /// Register a task to be notified when the event is triggered. + /// + /// Returns `true` if the listener was already notified, and `false` otherwise. If the listener + /// isn't inserted, returns `None`. + pub(crate) fn register( + &mut self, + mut listener: Pin<&mut Option>>, + task: TaskRef<'_>, + ) -> RegisterResult { + let key = match *listener { + Some(Listener::HasNode(key)) => key, + _ => return RegisterResult::NeverInserted, + }; + + let entry = &self.listeners[key.get()]; + + // Take the state out and check it. + match entry.state().replace(State::NotifiedTaken) { + State::Notified { tag, .. } => { + // The listener was already notified, so we don't need to do anything. + self.remove(key, false); + *listener = None; + RegisterResult::Notified(tag) + } + + State::Task(other_task) => { + // Only replace the task if it's not the same as the one we're registering. + if task.will_wake(other_task.as_task_ref()) { + entry.state().set(State::Task(other_task)); + } else { + entry.state().set(State::Task(task.into_task())); + } + + RegisterResult::Registered + } + + _ => { + // Register the task. + entry.state().set(State::Task(task.into_task())); + RegisterResult::Registered + } + } + } +} + +pub(crate) enum Listener { + /// The listener has a node inside of the linked list. + HasNode(NonZeroUsize), + + /// The listener has an entry in the queue that may or may not have a task waiting. + Queued(Arc), + + /// Eat the generic type for consistency. + _EatGenericType(PhantomData), +} + +impl fmt::Debug for Listener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::HasNode(key) => f.debug_tuple("HasNode").field(key).finish(), + Self::Queued(tw) => f.debug_tuple("Queued").field(tw).finish(), + Self::_EatGenericType(_) => unreachable!(), + } + } +} + +impl Unpin for Listener {} + +impl PartialEq for Listener { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::HasNode(a), Self::HasNode(b)) => a == b, + (Self::Queued(a), Self::Queued(b)) => Arc::ptr_eq(a, b), + _ => false, + } + } +} + +/// A simple mutex type that optimistically assumes that the lock is uncontended. +pub(crate) struct Mutex { + /// The inner value. + value: UnsafeCell, + + /// Whether the mutex is locked. + locked: AtomicBool, +} + +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(lock) = self.try_lock() { + f.debug_tuple("Mutex").field(&*lock).finish() + } else { + f.write_str("Mutex { }") + } + } +} + +impl Mutex { + /// Create a new mutex. + pub(crate) fn new(value: T) -> Self { + Self { + value: UnsafeCell::new(value), + locked: AtomicBool::new(false), + } + } + + /// Lock the mutex. + pub(crate) fn try_lock(&self) -> Option> { + // Try to lock the mutex. + if self + .locked + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + // We have successfully locked the mutex. + Some(MutexGuard { + mutex: self, + guard: self.value.get(), + }) + } else { + self.try_lock_slow() + } + } + + #[cold] + fn try_lock_slow(&self) -> Option> { + // Assume that the contention is short-term. + // Spin for a while to see if the mutex becomes unlocked. + let mut spins = 100u32; + + loop { + if self + .locked + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + // We have successfully locked the mutex. + return Some(MutexGuard { + mutex: self, + guard: self.value.get(), + }); + } + + // Use atomic loads instead of compare-exchange. + while self.locked.load(Ordering::Relaxed) { + // Return None once we've exhausted the number of spins. + spins = spins.checked_sub(1)?; + } + } + } +} + +pub(crate) struct MutexGuard<'a, T> { + mutex: &'a Mutex, + guard: ConstPtr, +} + +impl<'a, T> Drop for MutexGuard<'a, T> { + fn drop(&mut self) { + self.mutex.locked.store(false, Ordering::Release); + } +} + +impl<'a, T> ops::Deref for MutexGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { self.guard.deref() } + } +} + +impl<'a, T> ops::DerefMut for MutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { self.guard.deref_mut() } + } +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(target_family = "wasm")] + use wasm_bindgen_test::wasm_bindgen_test as test; + + #[test] + fn smoke_mutex() { + let mutex = Mutex::new(0); + + { + let mut guard = mutex.try_lock().unwrap(); + *guard += 1; + } + + { + let mut guard = mutex.try_lock().unwrap(); + *guard += 1; + } + + let guard = mutex.try_lock().unwrap(); + assert_eq!(*guard, 2); + } + + #[test] + fn smoke_listener_slab() { + let mut listeners = ListenerSlab::<()>::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove one. + assert_eq!(listeners.remove(key2, false), Some(State::Created)); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(2).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key1)), + next: Cell::new(None), + } + ); + } + + #[test] + fn listener_slab_notify() { + let mut listeners = ListenerSlab::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + // Notify one. + listeners.notify(GenericNotify::new(1, true, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: true, + tag: () + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove the notified listener. + assert_eq!( + listeners.remove(key1, false), + Some(State::Notified { + additional: true, + tag: () + }) + ); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key2)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(1).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + } + + #[test] + fn listener_slab_register() { + let woken = Arc::new(AtomicBool::new(false)); + let waker = waker_fn::waker_fn({ + let woken = woken.clone(); + move || woken.store(true, Ordering::SeqCst) + }); + + let mut listeners = ListenerSlab::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + // Register one. + assert_eq!( + listeners.register( + Pin::new(&mut Some(Listener::HasNode(key2))), + TaskRef::Waker(&waker) + ), + RegisterResult::Registered + ); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Notify the listener. + listeners.notify(GenericNotify::new(2, false, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 2); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key3)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + assert!(woken.load(Ordering::SeqCst)); + assert_eq!( + listeners.register( + Pin::new(&mut Some(Listener::HasNode(key2))), + TaskRef::Waker(&waker) + ), + RegisterResult::Notified(()) + ); + } + + #[test] + fn listener_slab_notify_prop() { + let woken = Arc::new(AtomicBool::new(false)); + let waker = waker_fn::waker_fn({ + let woken = woken.clone(); + move || woken.store(true, Ordering::SeqCst) + }); + + let mut listeners = ListenerSlab::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + // Register one. + assert_eq!( + listeners.register( + Pin::new(&mut Some(Listener::HasNode(key2))), + TaskRef::Waker(&waker) + ), + RegisterResult::Registered + ); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Notify the first listener. + listeners.notify(GenericNotify::new(1, false, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Calling notify again should not change anything. + listeners.notify(GenericNotify::new(1, false, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove the first listener. + assert_eq!( + listeners.remove(key1, false), + Some(State::Notified { + additional: false, + tag: () + }) + ); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key2)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(1).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!(*listeners.listeners[2].prev(), Cell::new(None)); + assert_eq!(*listeners.listeners[2].next(), Cell::new(Some(key3))); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Notify the second listener. + listeners.notify(GenericNotify::new(1, false, || ())); + assert!(woken.load(Ordering::SeqCst)); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key2)); + assert_eq!(listeners.start, Some(key3)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(1).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove and propagate the second listener. + assert_eq!(listeners.remove(key2, true), Some(State::NotifiedTaken)); + + // The third listener should be notified. + assert_eq!(listeners.len, 1); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key3)); + assert_eq!(listeners.start, None); + assert_eq!(listeners.first_empty, NonZeroUsize::new(2).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[2], + Entry::Empty(NonZeroUsize::new(1).unwrap()) + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(None), + } + ); + + // Remove the third listener. + assert_eq!( + listeners.remove(key3, false), + Some(State::Notified { + additional: false, + tag: () + }) + ); + } + + #[test] + fn uncontended_inner() { + let inner = crate::Inner::new(); + + // Register two listeners. + let (mut listener1, mut listener2, mut listener3) = (None, None, None); + inner.insert(Pin::new(&mut listener1)); + inner.insert(Pin::new(&mut listener2)); + inner.insert(Pin::new(&mut listener3)); + + assert_eq!( + listener1, + Some(Listener::HasNode(NonZeroUsize::new(1).unwrap())) + ); + assert_eq!( + listener2, + Some(Listener::HasNode(NonZeroUsize::new(2).unwrap())) + ); + + // Register a waker in the second listener. + let woken = Arc::new(AtomicBool::new(false)); + let waker = waker_fn::waker_fn({ + let woken = woken.clone(); + move || woken.store(true, Ordering::SeqCst) + }); + assert_eq!( + inner.register(Pin::new(&mut listener2), TaskRef::Waker(&waker)), + RegisterResult::Registered + ); + + // Notify the first listener. + inner.notify(GenericNotify::new(1, false, || ())); + assert!(!woken.load(Ordering::SeqCst)); + + // Another notify should do nothing. + inner.notify(GenericNotify::new(1, false, || ())); + assert!(!woken.load(Ordering::SeqCst)); + + // Receive the notification. + assert_eq!( + inner.register(Pin::new(&mut listener1), TaskRef::Waker(&waker)), + RegisterResult::Notified(()) + ); + + // First listener is already removed. + assert!(listener1.is_none()); + + // Notify the second listener. + inner.notify(GenericNotify::new(1, false, || ())); + assert!(woken.load(Ordering::SeqCst)); + + // Remove the second listener and propagate the notification. + assert_eq!( + inner.remove(Pin::new(&mut listener2), true), + Some(State::NotifiedTaken) + ); + + // Second listener is already removed. + assert!(listener2.is_none()); + + // Third listener should be notified. + assert_eq!( + inner.register(Pin::new(&mut listener3), TaskRef::Waker(&waker)), + RegisterResult::Notified(()) + ); + } +} diff --git a/external/vendor/event-listener/src/slab/node.rs b/external/vendor/event-listener/src/slab/node.rs new file mode 100644 index 0000000000..8901eb27e6 --- /dev/null +++ b/external/vendor/event-listener/src/slab/node.rs @@ -0,0 +1,249 @@ +//! An operation that can be delayed. + +//! The node that makes up queues. + +use crate::notify::{GenericNotify, Internal, NotificationPrivate, TagProducer}; +use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::sync::Arc; +use crate::sys::ListenerSlab; +use crate::{State, Task}; + +use alloc::boxed::Box; + +use core::fmt; +use core::marker::PhantomData; +use core::mem; +use core::num::NonZeroUsize; +use core::ptr; + +pub(crate) struct NothingProducer(PhantomData); + +impl Default for NothingProducer { + fn default() -> Self { + Self(PhantomData) + } +} + +impl fmt::Debug for NothingProducer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NothingProducer").finish() + } +} + +impl TagProducer for NothingProducer { + type Tag = T; + + fn next_tag(&mut self) -> Self::Tag { + // This has to be a zero-sized type with no drop handler. + assert_eq!(mem::size_of::(), 0); + assert!(!mem::needs_drop::()); + + // SAFETY: As this is a ZST without a drop handler, zero is valid. + unsafe { mem::zeroed() } + } +} + +/// A node in the backup queue. +pub(crate) enum Node { + /// This node is requesting to add a listener. + // For some reason, the MSRV build says this variant is never constructed. + #[allow(dead_code)] + AddListener { + /// The state of the listener that wants to be added. + task_waiting: Arc, + }, + + /// This node is notifying a listener. + Notify(GenericNotify>), + + /// This node is removing a listener. + RemoveListener { + /// The ID of the listener to remove. + listener: NonZeroUsize, + + /// Whether to propagate notifications to the next listener. + propagate: bool, + }, + + /// We are waiting for the mutex to lock, so they can manipulate it. + Waiting(Task), +} + +impl fmt::Debug for Node { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::AddListener { .. } => f.write_str("AddListener"), + Self::Notify(notify) => f + .debug_struct("Notify") + .field("count", ¬ify.count(Internal::new())) + .field("is_additional", ¬ify.is_additional(Internal::new())) + .finish(), + Self::RemoveListener { + listener, + propagate, + } => f + .debug_struct("RemoveListener") + .field("listener", listener) + .field("propagate", propagate) + .finish(), + Self::Waiting(_) => f.write_str("Waiting"), + } + } +} + +#[derive(Debug)] +pub(crate) struct TaskWaiting { + /// The task that is being waited on. + task: AtomicCell, + + /// The ID of the new entry. + /// + /// This is set to zero when the task is still queued, or usize::MAX when the node should not + /// be added at all. + entry_id: AtomicUsize, +} + +impl Node { + pub(crate) fn listener() -> (Self, Arc) { + // Create a new `TaskWaiting` structure. + let task_waiting = Arc::new(TaskWaiting { + task: AtomicCell::new(), + entry_id: AtomicUsize::new(0), + }); + + ( + Self::AddListener { + task_waiting: task_waiting.clone(), + }, + task_waiting, + ) + } + + /// Apply the node to the list. + pub(super) fn apply(self, list: &mut ListenerSlab) -> Option { + match self { + Node::AddListener { task_waiting } => { + // If we're cancelled, do nothing. + if task_waiting.entry_id.load(Ordering::Relaxed) == usize::MAX { + return task_waiting.task.take().map(|t| *t); + } + + // Add a new entry to the list. + let key = list.insert(State::Created); + assert!(key.get() != usize::MAX); + + // Send the new key to the listener and wake it if necessary. + let old_value = task_waiting.entry_id.swap(key.get(), Ordering::Release); + + // If we're cancelled, remove ourselves from the list. + if old_value == usize::MAX { + list.remove(key, false); + } + + return task_waiting.task.take().map(|t| *t); + } + Node::Notify(notify) => { + // Notify the next `count` listeners. + list.notify(notify); + } + Node::RemoveListener { + listener, + propagate, + } => { + // Remove the listener from the list. + list.remove(listener, propagate); + } + Node::Waiting(task) => { + return Some(task); + } + } + + None + } +} + +impl TaskWaiting { + /// Determine if we are still queued. + /// + /// Returns `Some` with the entry ID if we are no longer queued. + pub(crate) fn status(&self) -> Option { + NonZeroUsize::new(self.entry_id.load(Ordering::Acquire)) + } + + /// Register a listener. + pub(crate) fn register(&self, task: Task) { + // Set the task. + if let Some(task) = self.task.replace(Some(Box::new(task))) { + task.wake(); + } + + // If the entry ID is non-zero, then we are no longer queued. + if self.status().is_some() { + // Wake the task. + if let Some(task) = self.task.take() { + task.wake(); + } + } + } + + /// Mark this listener as cancelled, indicating that it should not be inserted into the list. + /// + /// If this listener was already inserted into the list, returns the entry ID. Otherwise returns + /// `None`. + pub(crate) fn cancel(&self) -> Option { + // Set the entry ID to usize::MAX. + let id = self.entry_id.swap(usize::MAX, Ordering::Release); + + // Wake the task. + if let Some(task) = self.task.take() { + task.wake(); + } + + // Return the entry ID if we were queued. + NonZeroUsize::new(id) + } +} + +/// A shared pointer to a value. +/// +/// The inner value is a `Box`. +#[derive(Debug)] +struct AtomicCell(AtomicPtr); + +impl AtomicCell { + /// Create a new `AtomicCell`. + fn new() -> Self { + Self(AtomicPtr::new(ptr::null_mut())) + } + + /// Swap the value out. + fn replace(&self, value: Option>) -> Option> { + let old_value = match value { + Some(value) => self.0.swap(Box::into_raw(value), Ordering::AcqRel), + // Acquire is needed to synchronize with the store of a non-null ptr, but since a null ptr + // will never be dereferenced, there is no need to synchronize the store of a null ptr. + None => self.0.swap(ptr::null_mut(), Ordering::Acquire), + }; + + if old_value.is_null() { + None + } else { + // SAFETY: + // - AcqRel/Acquire ensures that it does not read a pointer to potentially invalid memory. + // - We've checked that old_value is not null. + // - We do not store invalid pointers other than null in self.0. + Some(unsafe { Box::from_raw(old_value) }) + } + } + + /// Take the value out. + fn take(&self) -> Option> { + self.replace(None) + } +} + +impl Drop for AtomicCell { + fn drop(&mut self) { + self.take(); + } +} diff --git a/external/vendor/event-listener/tests/loom.rs b/external/vendor/event-listener/tests/loom.rs new file mode 100644 index 0000000000..1cd92e26ad --- /dev/null +++ b/external/vendor/event-listener/tests/loom.rs @@ -0,0 +1,211 @@ +#![cfg(loom)] +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::Context; + +use event_listener::{Event, EventListener}; +use waker_fn::waker_fn; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +fn is_notified(listener: &mut EventListener) -> bool { + let waker = waker_fn(|| ()); + Pin::new(listener) + .poll(&mut Context::from_waker(&waker)) + .is_ready() +} + +#[test] +fn notify() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + + assert_eq!(event.notify(2), 2); + assert_eq!(event.notify(1), 0); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }); +} + +#[test] +fn notify_additional() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(1), 0); + assert_eq!(event.notify_additional(1), 1); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }) +} + +#[test] +fn notify_one() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l2)); + }); +} + +#[test] +fn notify_all() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(usize::MAX), 2); + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + }); +} + +#[test] +fn drop_notified() { + loom::model(|| { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }); +} + +#[test] +fn drop_notified2() { + loom::model(|| { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(2), 2); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }); +} + +#[test] +fn drop_notified_additional() { + loom::model(|| { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + let mut l4 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(2), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(is_notified(&mut l3)); + assert!(!is_notified(&mut l4)); + }); +} + +#[test] +fn drop_non_notified() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l3); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + }) +} + +#[test] +fn notify_all_fair() { + loom::model(|| { + let event = Event::new(); + let v = Arc::new(Mutex::new(vec![])); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + let waker1 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(1)) + }; + let waker2 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(2)) + }; + let waker3 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(3)) + }; + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_pending()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_pending()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_pending()); + + assert_eq!(event.notify(usize::MAX), 3); + assert_eq!(&*v.lock().unwrap(), &[1, 2, 3]); + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_ready()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_ready()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_ready()); + }) +} diff --git a/external/vendor/event-listener/tests/notify.rs b/external/vendor/event-listener/tests/notify.rs new file mode 100644 index 0000000000..c37dc9a784 --- /dev/null +++ b/external/vendor/event-listener/tests/notify.rs @@ -0,0 +1,192 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::Context; + +use event_listener::{Event, EventListener}; +use waker_fn::waker_fn; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +fn is_notified(listener: &mut EventListener) -> bool { + let waker = waker_fn(|| ()); + Pin::new(listener) + .poll(&mut Context::from_waker(&waker)) + .is_ready() +} + +#[test] +fn notify() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + + assert_eq!(event.notify(2), 2); + assert_eq!(event.notify(1), 0); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn notify_additional() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(1), 0); + assert_eq!(event.notify_additional(1), 1); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn notify_one() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l2)); +} + +#[test] +fn notify_all() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(usize::MAX), 2); + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); +} + +#[test] +fn drop_notified() { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn drop_notified2() { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(2), 2); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn drop_notified_additional() { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + let mut l4 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(2), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(is_notified(&mut l3)); + assert!(!is_notified(&mut l4)); +} + +#[test] +fn drop_non_notified() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l3); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); +} + +#[test] +fn notify_all_fair() { + let event = Event::new(); + let v = Arc::new(Mutex::new(vec![])); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + let waker1 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(1)) + }; + let waker2 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(2)) + }; + let waker3 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(3)) + }; + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_pending()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_pending()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_pending()); + + assert_eq!(event.notify(usize::MAX), 3); + assert_eq!(&*v.lock().unwrap(), &[1, 2, 3]); + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_ready()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_ready()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_ready()); +} diff --git a/external/vendor/grounded/.cargo-checksum.json b/external/vendor/grounded/.cargo-checksum.json new file mode 100644 index 0000000000..f754c5afbc --- /dev/null +++ b/external/vendor/grounded/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a4d17e01a46bb460ce118b74646f5452648e8c212ab0e7592ca3cfff1b4129b0","Cargo.toml":"75d048b8bbab714d8d5805256112796fdf5f7662cb11b8a88a4fac43711bf571","Cargo.toml.orig":"fd0120e7f096d454aaca648404c1817ac376ed9bd5d110e4b34cda40c5460f78","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"177540cad091a40e8071db310bc3b6115c4e329a92a234609b60c154b008a888","README.md":"34e8e5fa08d2735ad5dda6084c09978c3978e5886db0cdb370d4767080c39abb","src/alloc_single.rs":"40de300a511817397517ef61e1a7efcea5537756d032c1c1621dac9f44d3a1aa","src/const_init.rs":"ee73477ba827e16f67bd7e581da438eb64145ad907919e4f4508de7a27904800","src/lib.rs":"7cc59f73fe1f3aeb901f6f64fee6d6e1ff05f6c371f0ee3af3c1665bf59f96b9","src/uninit.rs":"fd67733928bf0102f04ee4e934973cf94c20238612803fdfc3aa38d5360e5155"},"package":"917d82402c7eb9755fdd87d52117701dae9e413a6abb309fac2a13af693b6080"} \ No newline at end of file diff --git a/external/vendor/grounded/.cargo_vcs_info.json b/external/vendor/grounded/.cargo_vcs_info.json new file mode 100644 index 0000000000..11525e78f0 --- /dev/null +++ b/external/vendor/grounded/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "4a4acb27fac0ab3559ae9b822c3a8bc42a25cc4d" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/grounded/Cargo.toml b/external/vendor/grounded/Cargo.toml new file mode 100644 index 0000000000..3917086abd --- /dev/null +++ b/external/vendor/grounded/Cargo.toml @@ -0,0 +1,37 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "grounded" +version = "0.2.0" +authors = ["James Munns "] +description = "A toolkit for managing unsafe statics" +documentation = "https://docs.rs/grounded/" +readme = "README.md" +license = "MIT OR Apache-2.0" +repository = "https://github.com/jamesmunns/grounded" + +[package.metadata.docs.rs] +features = ["cas"] +rustdoc-args = [ + "--cfg", + "doc_cfg", +] + +[dependencies.portable-atomic] +version = "1.3" +default-features = false + +[features] +cas = ["portable-atomic/require-cas"] +critical-section = ["portable-atomic/critical-section"] +default = [] diff --git a/external/vendor/grounded/Cargo.toml.orig b/external/vendor/grounded/Cargo.toml.orig new file mode 100644 index 0000000000..19174498c9 --- /dev/null +++ b/external/vendor/grounded/Cargo.toml.orig @@ -0,0 +1,25 @@ +[package] +name = "grounded" +version = "0.2.0" +authors = ["James Munns "] +edition = "2021" +readme = "README.md" +repository = "https://github.com/jamesmunns/grounded" +description = "A toolkit for managing unsafe statics" +license = "MIT OR Apache-2.0" +documentation = "https://docs.rs/grounded/" + +[dependencies.portable-atomic] +version = "1.3" +default-features = false + +[features] +default = [] +# components that require compare-and-swap operations +cas = ["portable-atomic/require-cas"] +# Allow for use on non-atomic systems by use of critical-sections +critical-section = ["portable-atomic/critical-section"] + +[package.metadata.docs.rs] +features = ["cas"] +rustdoc-args = ["--cfg", "doc_cfg"] diff --git a/external/vendor/grounded/LICENSE-APACHE b/external/vendor/grounded/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/grounded/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/grounded/LICENSE-MIT b/external/vendor/grounded/LICENSE-MIT new file mode 100644 index 0000000000..ee10ccacc1 --- /dev/null +++ b/external/vendor/grounded/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2019 Anthony James Munns + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/grounded/README.md b/external/vendor/grounded/README.md new file mode 100644 index 0000000000..a63d92f678 --- /dev/null +++ b/external/vendor/grounded/README.md @@ -0,0 +1,23 @@ +# Grounded + +Building blocks for handling potentially unsafe statics. + +This crate aims to provide useful and sound components that serve as building blocks for `static` datatypes that are common, and often necessary, in embedded systems. + +In some cases, fully safe methods and types will be provided. In other cases, "harm reduction" tools will be provided to make it easier to build sound abstractions and avoid undefined behavior. + +## License + +Licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or + ) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or ) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/grounded/src/alloc_single.rs b/external/vendor/grounded/src/alloc_single.rs new file mode 100644 index 0000000000..a9c690ba86 --- /dev/null +++ b/external/vendor/grounded/src/alloc_single.rs @@ -0,0 +1,166 @@ +//! Utilities for allocating a single item, using a box-like smart pointer + +use core::{ + ops::{Deref, DerefMut}, + sync::atomic::Ordering, +}; +use portable_atomic::AtomicBool; + +use crate::{const_init::ConstInit, uninit::GroundedCell}; + +/// AllocSingle is our one-element allocator pool +/// +/// If your type implements [ConstInit], consider using +/// [AllocSingle::alloc_const_val] instead of [AllocSingle::alloc] +/// to avoid unnecessary stack usage. +/// +/// This does require use of CAS atomics. You must enable the `cas` +/// feature, and if your target does not have native atomic CAS, you +/// must also enable the `critical-section` feature. +/// +/// ```rust +/// use grounded::alloc_single::AllocSingle; +/// +/// static SINGLE: AllocSingle<[u8; 256]> = AllocSingle::new(); +/// +/// // alloc a single item +/// let mut s1 = SINGLE.alloc([4; 256]).unwrap(); +/// s1.iter().for_each(|b| assert_eq!(*b, 4)); +/// +/// // we can't alloc while `s1` is still live +/// assert!(SINGLE.alloc([5; 256]).is_none()); +/// +/// // now drop it +/// drop(s1); +/// +/// // and we can alloc again +/// let mut s2 = SINGLE.alloc([7; 256]).unwrap(); +/// s2.iter().for_each(|b| assert_eq!(*b, 7)); +/// ``` +pub struct AllocSingle { + taken: AtomicBool, + storage: GroundedCell, +} + +impl AllocSingle { + /// Create a new, uninitalized, single-element allocation pool + pub const fn new() -> Self { + Self { + taken: AtomicBool::new(false), + storage: GroundedCell::uninit(), + } + } + + /// Attempts to allocate a single item. Returns None and + /// discards `t` if an allocation is already live. + #[inline] + pub fn alloc(&self, t: T) -> Option> { + // Set taken, and if it was already taken before, we can't + // allocate + if self.taken.swap(true, Ordering::AcqRel) { + // already taken + return None; + } + let new = SingleBox { single: self }; + // Initialize by moving t into the storage + unsafe { + new.as_ptr().write(t); + } + Some(new) + } +} + +impl AllocSingle { + /// Attempts to allocate a single item, using `ConstInit::VAL` as + /// the initializer. Returns None if the item is already allocated + pub fn alloc_const_val(&self) -> Option> { + // Set taken, and if it was already taken before, we can't + // allocate + if self.taken.swap(true, Ordering::AcqRel) { + // already taken + return None; + } + let new = SingleBox { single: self }; + // Initialize by writing t into the storage + unsafe { + new.as_ptr().write(T::VAL); + } + Some(new) + } +} + +pub struct SingleBox<'a, T> { + single: &'a AllocSingle, +} + +impl<'a, T> SingleBox<'a, T> { + fn as_ptr(&self) -> *mut T { + self.single.storage.get() + } +} + +impl<'a, T> Drop for SingleBox<'a, T> { + fn drop(&mut self) { + // When we drop the SingleBox, mark the AllocSingle as available again + unsafe { self.as_ptr().drop_in_place() } + self.single.taken.store(false, Ordering::Release); + } +} + +impl<'a, T> Deref for SingleBox<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.as_ptr() } + } +} + +impl<'a, T> DerefMut for SingleBox<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.as_ptr() } + } +} + +#[cfg(test)] +pub mod test { + use super::AllocSingle; + use crate::const_init::ConstInit; + use core::ops::Deref; + + #[derive(Debug)] + struct Demo([u8; 512]); + + impl ConstInit for Demo { + const VAL: Self = Demo([44u8; 512]); + } + + #[test] + fn smoke() { + static SINGLE: AllocSingle<[u8; 1024]> = AllocSingle::new(); + static SINGLE_DEMO: AllocSingle = AllocSingle::new(); + + { + let buf = [0xAF; 1024]; + let mut bx = SINGLE.alloc(buf).unwrap(); + println!("{:?}", bx.as_slice()); + bx.iter_mut().for_each(|b| *b = 123); + println!("{:?}", bx.as_slice()); + + // Second alloc fails + let buf2 = [0x01; 1024]; + assert!(SINGLE.alloc(buf2).is_none()); + } + + // bx is dropped because we left scope, which means we can + // alloc again + let buf3 = [0x42; 1024]; + let mut bx2 = SINGLE.alloc(buf3).unwrap(); + println!("{:?}", bx2.as_slice()); + bx2.iter_mut().for_each(|b| *b = 231); + println!("{:?}", bx2.as_slice()); + + // look ma no stack + let bx3 = SINGLE_DEMO.alloc_const_val().unwrap(); + println!("{:?}", bx3.deref()); + } +} diff --git a/external/vendor/grounded/src/const_init.rs b/external/vendor/grounded/src/const_init.rs new file mode 100644 index 0000000000..8d8a2f6c5c --- /dev/null +++ b/external/vendor/grounded/src/const_init.rs @@ -0,0 +1,50 @@ +//! Const Init +//! +//! A trait that is like `Default`, but const + +/// A trait that is like `Default`, but const +pub trait ConstInit { + /// The constant default value + const VAL: Self; +} + +// Here's some impls that roughly match the default +// value of these types + +macro_rules! impl_const_init_for { + ($(($tyname:ty, $val:expr),)+) => { + $( + impl ConstInit for $tyname { + const VAL: Self = $val; + } + )+ + }; +} + +impl_const_init_for! { + (u8, 0), + (u16, 0), + (u32, 0), + (u64, 0), + (u128, 0), + (i8, 0), + (i16, 0), + (i32, 0), + (i64, 0), + (i128, 0), + (f32, 0.0), + (f64, 0.0), + (bool, false), + ((), ()), +} + +impl ConstInit for [T; N] +where + T: ConstInit, +{ + const VAL: Self = [T::VAL; N]; +} + +impl ConstInit for Option { + const VAL: Self = None; +} diff --git a/external/vendor/grounded/src/lib.rs b/external/vendor/grounded/src/lib.rs new file mode 100644 index 0000000000..1ced4646fd --- /dev/null +++ b/external/vendor/grounded/src/lib.rs @@ -0,0 +1,8 @@ +#![cfg_attr(not(test), no_std)] +#![doc = include_str!("../README.md")] + +pub mod const_init; +pub mod uninit; + +#[cfg(feature = "cas")] +pub mod alloc_single; diff --git a/external/vendor/grounded/src/uninit.rs b/external/vendor/grounded/src/uninit.rs new file mode 100644 index 0000000000..68965bf650 --- /dev/null +++ b/external/vendor/grounded/src/uninit.rs @@ -0,0 +1,317 @@ +//! Helpers for dealing with statics that are (potentially) uninitialized at the +//! start of a program. + +use core::{cell::UnsafeCell, mem::MaybeUninit}; + +use crate::const_init::ConstInit; + +/// ## GroundedCell +/// +/// [GroundedCell] is a type that contains a single `T`. The contained T is wrapped +/// with: +/// +/// * An [UnsafeCell] - as synchronization *must* be provided by the wrapping user +/// * A [MaybeUninit] - as the contents will not be initialized at program start. +/// +/// This type is intended to be used as a building block for other types, such as +/// runtime initialized constants, data within uninitialized memory/linker sections, +/// or similar. +/// +/// This type may be used to provide inner mutability, when accessed through the +/// [GroundedCell::get()] interface. +/// +/// [GroundedCell] is also `#[repr(transparent)], as are `UnsafeCell` and `MaybeUninit`, +/// which means that it will have the same layout and alignment as `T`. +#[repr(transparent)] +pub struct GroundedCell { + inner: UnsafeCell>, +} + +unsafe impl Sync for GroundedCell {} + +impl GroundedCell { + /// Create a new GroundedCell with the cell initialized with + /// the value of [ConstInit::VAL]. + /// + /// ```rust + /// use grounded::uninit::GroundedCell; + /// + /// static EXAMPLE: GroundedCell<[u8; 1024]> = GroundedCell::const_init(); + /// ``` + pub const fn const_init() -> Self { + Self { + inner: UnsafeCell::new(MaybeUninit::new(T::VAL)), + } + } +} + +impl GroundedCell { + /// Create an uninitialized `GroundedCell`. + /// + /// ```rust + /// use grounded::uninit::GroundedCell; + /// + /// static EXAMPLE: GroundedCell = GroundedCell::uninit(); + /// ``` + pub const fn uninit() -> Self { + Self { + inner: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Obtain a mutable pointer to the contained T. + /// + /// No claims are made on the validity of the T (it may be invalid or uninitialized), + /// and the caller is required to guarantee synchronization of access, e.g. guaranteeing + /// that access is shared XOR mutable for the duration of any references created from this + /// pointer. + /// + /// ```rust + /// use grounded::uninit::GroundedCell; + /// static EXAMPLE: GroundedCell = GroundedCell::uninit(); + /// + /// let ptr: *mut u32 = EXAMPLE.get(); + /// assert_ne!(core::ptr::null_mut(), ptr); + /// ``` + pub fn get(&self) -> *mut T { + let mu_ptr: *mut MaybeUninit = self.inner.get(); + let t_ptr: *mut T = mu_ptr.cast::(); + t_ptr + } +} + +/// ## GroundedArrayCell +/// +/// [GroundedArrayCell] is a type that contains a contiguous array of `[T; N]`. +/// The contained [T; N] is wrapped with: +/// +/// * An [UnsafeCell] - as synchronization *must* be provided by the wrapping user +/// * A [MaybeUninit] - as the contents will not be initialized at program start. +/// +/// This type is intended to be used as a building block for other types, such as +/// runtime initialized constants, data within uninitialized memory/linker sections, +/// or similar. +/// +/// This type may be used to provide inner mutability, when accessed through the +/// [GroundedArrayCell::get_ptr_len()] interface. +/// +/// [GroundedArrayCell] is also `#[repr(transparent)], as are `UnsafeCell` and `MaybeUninit`, +/// which means that it will have the same layout and alignment as `[T; N]`. +#[repr(transparent)] +pub struct GroundedArrayCell { + inner: UnsafeCell>, +} + +unsafe impl Sync for GroundedArrayCell {} + +impl GroundedArrayCell { + /// Create a new GroundedArrayCell with all cells initialized with + /// the value of [ConstInit::VAL]. + /// + /// If your type's implementation of [ConstInit] happens to be all zeroes, like it + /// is for many integer and boolean primitives, it is likely your static will end + /// up in `.bss`. + /// + /// ```rust + /// use grounded::uninit::GroundedArrayCell; + /// + /// static EXAMPLE: GroundedArrayCell = GroundedArrayCell::const_init(); + /// ``` + pub const fn const_init() -> Self { + Self { + inner: UnsafeCell::new(MaybeUninit::new(<[T; N] as ConstInit>::VAL)), + } + } +} + +impl GroundedArrayCell { + /// Create an uninitialized `GroundedArrayCell`. + /// + /// ```rust + /// use grounded::uninit::GroundedArrayCell; + /// + /// static EXAMPLE: GroundedArrayCell = GroundedArrayCell::uninit(); + /// ``` + pub const fn uninit() -> Self { + Self { + inner: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Initialize each element from the provided value, if `T: Copy`. + /// + /// ## Safety + /// + /// The caller must ensure that no other access is made to the data contained within this + /// cell for the duration of this function + #[inline] + pub unsafe fn initialize_all_copied(&self, val: T) + where + T: Copy, + { + let (mut ptr, len) = self.get_ptr_len(); + let end = ptr.add(len); + while ptr != end { + ptr.write(val); + ptr = ptr.add(1); + } + } + + /// Initialize each item, using a provided closure on a per-element basis + /// + /// ## Safety + /// + /// The caller must ensure that no other access is made to the data contained within this + /// cell for the duration of this function + #[inline] + pub unsafe fn initialize_all_with T>(&self, mut f: F) { + let (mut ptr, len) = self.get_ptr_len(); + let end = ptr.add(len); + while ptr != end { + ptr.write(f()); + ptr = ptr.add(1); + } + } + + /// Obtain a mutable starting pointer to the contained [T; N]. + /// + /// No claims are made on the validity of the [T; N] (they may be partially or wholly + /// invalid or uninitialized), and the caller is required to guarantee synchronization of + /// access, e.g. guaranteeing that access is shared XOR mutable for the duration of any + /// references (including slices) created from this pointer. + /// + /// ```rust + /// use grounded::uninit::GroundedArrayCell; + /// static EXAMPLE: GroundedArrayCell = GroundedArrayCell::uninit(); + /// + /// let ptr: *mut u8 = EXAMPLE.as_mut_ptr(); + /// assert_ne!(core::ptr::null_mut(), ptr); + /// ``` + #[inline] + pub fn as_mut_ptr(&self) -> *mut T { + let mu_ptr: *mut MaybeUninit<[T; N]> = self.inner.get(); + let arr_ptr: *mut [T; N] = mu_ptr.cast::<[T; N]>(); + let t_ptr: *mut T = arr_ptr.cast::(); + t_ptr + } + + /// Obtain a mutable starting pointer and length to the contained [T; N]. + /// + /// No claims are made on the validity of the [T; N] (they may be partially or wholly + /// invalid or uninitialized), and the caller is required to guarantee synchronization of + /// access, e.g. guaranteeing that access is shared XOR mutable for the duration of any + /// references (including slices) created from this pointer. + /// + /// ```rust + /// use grounded::uninit::GroundedArrayCell; + /// static EXAMPLE: GroundedArrayCell = GroundedArrayCell::uninit(); + /// + /// let (ptr, len): (*mut u8, usize) = EXAMPLE.get_ptr_len(); + /// assert_ne!(core::ptr::null_mut(), ptr); + /// assert_eq!(len, 128); + /// ``` + /// + /// ## NOTE + /// + /// This method is suggested to only be used for actions such as initializing the entire + /// range. If you are building a data structure that provides partial access safely, such + /// as a channel, bip-buffer, or similar, consider using one of the following methods, which + /// can help avoid cases where strict provenance is invalidated by creation of an aliasing + /// slice: + /// + /// * For getting a single element: + /// * [Self::get_element_unchecked()] + /// * [Self::get_element_mut_unchecked()] + /// * For getting a subslice: + /// * [Self::get_subslice_unchecked()] + /// * [Self::get_subslice_mut_unchecked()] + #[inline] + pub fn get_ptr_len(&self) -> (*mut T, usize) { + (self.as_mut_ptr(), N) + } + + /// Obtain a reference to a single element, which can be thought of as `&data[offset]`. + /// + /// The reference is created **without** creating the entire slice this cell represents. + /// This is important, if a mutable reference of a disjoint region is currently live. + /// + /// ## Safety + /// + /// The caller **must** ensure all of the following: + /// + /// * The selected element has been initialized with a valid value prior to calling + /// this function + /// * No `&mut` slices or references may overlap the produced reference for the duration the reference is live + /// * No modifications (even via pointers) are made to to the element pointed to + /// while the reference is live + /// * `offset` is < N + #[inline] + pub unsafe fn get_element_unchecked(&self, offset: usize) -> &'_ T { + &*self.as_mut_ptr().add(offset) + } + + /// Obtain a mutable reference to a single element, which can be thought of as `&mut data[offset]`. + /// + /// The reference is created **without** creating the entire slice this cell represents. + /// This is important, if a mutable reference of a disjoint region is currently live. + /// + /// ## Safety + /// + /// The caller **must** ensure all of the following: + /// + /// * The selected element has been initialized with a valid value prior to calling + /// this function + /// * No slices or references of any kind may overlap the produced reference for the duration + /// the reference is live + /// * No modifications (even via pointers) are made to to the element pointed to + /// while the reference is live + /// * `offset` is < N + #[allow(clippy::mut_from_ref)] + #[inline] + pub unsafe fn get_element_mut_unchecked(&self, offset: usize) -> &mut T { + &mut *self.as_mut_ptr().add(offset) + } + + /// Obtain a subslice starting at `offset`, of length `len`, which + /// can be thought of as `&data[offset..][..len]`. + /// + /// The subslice is created **without** creating the entire slice this cell represents. + /// This is important, if a mutable reference of a disjoint region is currently live. + /// + /// ## Safety + /// + /// The caller **must** ensure all of the following: + /// + /// * All elements in this region have been initialized with a valid value prior to calling + /// this function + /// * No `&mut` slices may overlap the produced slice for the duration the slice is live + /// * No modifications (even via pointers) are made to data within the range of this slice + /// while the slice is live + /// * `offset` and `offset + len` are <= N + #[inline] + pub unsafe fn get_subslice_unchecked(&self, offset: usize, len: usize) -> &'_ [T] { + core::slice::from_raw_parts(self.as_mut_ptr().add(offset), len) + } + + /// Obtain a mutable subslice starting at `offset`, of length `len`, which + /// can be thought of as `&mut data[offset..][..len]`. + /// + /// The subslice is created **without** creating the entire slice this cell represents. + /// This is important, if ANY reference of a disjoint region is currently live. + /// + /// ## Safety + /// + /// The caller **must** ensure all of the following: + /// + /// * All elements in this region have been initialized with a valid value prior to calling + /// this function + /// * No ``&` or &mut` slices may overlap the produced slice for the duration the slice is live + /// * No modifications (even via pointers) are made to data within the range of this slice + /// while the slice is live + /// * `offset` and `offset + len` are <= N + #[allow(clippy::mut_from_ref)] + #[inline] + pub unsafe fn get_subslice_mut_unchecked(&self, offset: usize, len: usize) -> &'_ mut [T] { + core::slice::from_raw_parts_mut(self.as_mut_ptr().add(offset), len) + } +} diff --git a/external/vendor/portable-atomic-util/.cargo-checksum.json b/external/vendor/portable-atomic-util/.cargo-checksum.json new file mode 100644 index 0000000000..41b15fd212 --- /dev/null +++ b/external/vendor/portable-atomic-util/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"8b3e9d3e1898ce2e488b123ed2f3b16e7ccc092b79c72da60159ac4c6c5e3b10","CHANGELOG.md":"85fdf8e820edd87073019ec27c85ad225cbe2f5f40fb07c179e83d9a7e54073e","Cargo.toml":"e7afe5c97eebabff0273878e4a9f8d15712ab2657e6e747da72fb2fae2077a90","Cargo.toml.orig":"0630231c18c8d93d913a2b48fcd13aae54588ce0af53c96f947ec6e649eb26bf","LICENSE-APACHE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b7115dd8064dd2ba28e856bd82ca35801f122667f9509247b56e6cc9725a3778","build.rs":"2c6f887d6ff6c277254a19973318c92e3e99c3379f4c801a0931705a7c166eb8","src/arc.rs":"6f118a0c097dc70b3445f86e54cb7c8e473c20c0b4dfc971c97eaeb83390dc54","src/lib.rs":"7f7efc79e169422de23d8f469036267b4ef10d9df5f2587a353965394d7a6111","src/task.rs":"377a8f30d4b29eae997a554d7bebbf2127e409178dd2a7512cc7aa9c00fbc984","tests/arc.rs":"2c9144a45d0ce59d17b27aa332eaf048165e447ffb191fa0528ddeab8aaf92e6","version.rs":"07fb421f30f7be7788f85223bca1e38074e3ec8d0415abc8566f3ef9e3933f88"},"package":"d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507"} \ No newline at end of file diff --git a/external/vendor/portable-atomic-util/.cargo_vcs_info.json b/external/vendor/portable-atomic-util/.cargo_vcs_info.json new file mode 100644 index 0000000000..6106db81cd --- /dev/null +++ b/external/vendor/portable-atomic-util/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "e7b7a56af43a2131a71e2e96c3f50b60eb4e3b6b" + }, + "path_in_vcs": "portable-atomic-util" +} \ No newline at end of file diff --git a/external/vendor/portable-atomic-util/CHANGELOG.md b/external/vendor/portable-atomic-util/CHANGELOG.md new file mode 100644 index 0000000000..a8a302e007 --- /dev/null +++ b/external/vendor/portable-atomic-util/CHANGELOG.md @@ -0,0 +1,106 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](https://semver.org). + +Releases may yanked if there is a security bug, a soundness bug, or a regression. + + + +## [Unreleased] + +## [0.2.4] - 2024-11-23 + +- Add unstable `portable_atomic_unstable_coerce_unsized` cfg (requires Rust nightly). ([#195](https://github.com/taiki-e/portable-atomic/pull/195), thanks @brodycj) + +- Respect [`RUSTC_BOOTSTRAP=-1` recently added in nightly](https://github.com/rust-lang/rust/pull/132993) in rustc version detection. ([5b2847a](https://github.com/taiki-e/portable-atomic/commit/5b2847a8b99aa2a57a6c80f5a47327b2764f08cc)) + +## [0.2.3] - 2024-10-17 + +- Add `new_uninit`/`new_uninit_slice`/`assume_init` to `Arc` at Rust 1.36+. (align to the [std `Arc` change in Rust 1.82](https://github.com/rust-lang/rust/pull/129401)) ([362dc9a](https://github.com/taiki-e/portable-atomic/commit/362dc9af2779c81aa346e89c4d3f3eef71cf29ed)) + +- Support `make_mut` on `Arc<[T]>` and `Arc` at Rust 1.36+. (align to the [std `Arc` change in Rust 1.81](https://github.com/rust-lang/rust/pull/116113)) ([362dc9a](https://github.com/taiki-e/portable-atomic/commit/362dc9af2779c81aa346e89c4d3f3eef71cf29ed)) + +## [0.2.2] - 2024-07-11 + +- Fix [build issue with `esp` toolchain](https://github.com/taiki-e/semihosting/issues/11). ([f8ea85e](https://github.com/taiki-e/portable-atomic/commit/f8ea85e1aa46fa00bc865633fb40b05f8a0c823b)) + +## [0.2.1] - 2024-06-22 + +**Note:** This release has been yanked due to an issue fixed in 0.2.2. + +- Support `impl Error for Arc` in no-std at Rust 1.81+. ([30b9f90](https://github.com/taiki-e/portable-atomic/commit/30b9f90346dfad14ab00f1c7e1f988f941330bcf)) + +- Implement `Default` for `Arc<[T]>` and `Arc` at Rust 1.51+. (align to the [std `Arc` change in Rust 1.80](https://github.com/rust-lang/rust/pull/124640)) ([c6ee296](https://github.com/taiki-e/portable-atomic/commit/c6ee29606984863d008c2cf2209751ed0fa43b14)) + +- Implement `{AsFd, AsRawFd}` for `Arc` on HermitOS. ([b778244](https://github.com/taiki-e/portable-atomic/commit/b778244917e17bfc431c9add4d028ff26d00e3b7)) + +## [0.2.0] - 2024-05-07 + +- Rewrite `Arc` based on `std::sync::Arc`'s implementation. ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + + This fixes accidental API differences with std ([#139](https://github.com/taiki-e/portable-atomic/issues/139), [#140](https://github.com/taiki-e/portable-atomic/issues/140)) and adds many missing APIs compared to std: + - Add `Arc::{downcast, into_inner, make_mut, new_cyclic}` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `{fmt::Display, fmt::Pointer, Error, From, From>, From>, AsFd, AsRawFd, AsHandle, AsSocket}` for `Arc` ([#142](https://github.com/taiki-e/portable-atomic/pull/142), [78690d7](https://github.com/taiki-e/portable-atomic/commit/78690d7cad3b394119ea147c5773f67806a6ac09), [aba0930](https://github.com/taiki-e/portable-atomic/commit/aba0930269d7075b81810b49bbbbb6c5edc85ea0)) + - Implement `{From<&[T]>, From>, From<[T; N]>, FromIterator}` for `Arc<[T]>` ([#142](https://github.com/taiki-e/portable-atomic/pull/142), [5e9f693](https://github.com/taiki-e/portable-atomic/commit/5e9f693dcb43c35187ca95ce1c824e0cb1d3c4f8)) + - Implement `TryFrom>` for `Arc<[T; N]>` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `From>` for `Arc<[u8]>` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `{From<&str>, From}` for `Arc` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `{Read, Write, Seek}` for `Arc` ([591ece5](https://github.com/taiki-e/portable-atomic/commit/591ece5bde0f19f1895853791924ee55c51ee61e)) + - Remove `T: UnwindSafe` bound from `impl UnwindSafe for Arc` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + +- Add `task::Wake`. ([#145](https://github.com/taiki-e/portable-atomic/pull/145)) + + This is equivalent to `std::task::Wake`, but using `portable_atomic_util::Arc` as a reference-counted pointer. + +- Respect `RUSTC_WRAPPER` in rustc version detection. + +## [0.1.5] - 2023-12-17 + +- Improve offset calculation in `Arc::{into_raw,as_ptr,from_ptr}`. ([#141](https://github.com/taiki-e/portable-atomic/pull/141), thanks @gtsiam) + +## [0.1.4] - 2023-12-16 + +- Fix a bug where `Arc::{into_raw,as_ptr}` returned invalid pointers for larger alignment types. ([#138](https://github.com/taiki-e/portable-atomic/pull/138), thanks @notgull) + +## [0.1.3] - 2023-05-06 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +- Enable `portable-atomic`'s `require-cas` feature to display helpful error messages to users on targets requiring additional action on the user side to provide atomic CAS. ([#100](https://github.com/taiki-e/portable-atomic/pull/100)) + +## [0.1.2] - 2023-04-04 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +- Implement `AsRef`, `Borrow`, and `Unpin` on `Arc`. ([#92](https://github.com/taiki-e/portable-atomic/pull/92) [#93](https://github.com/taiki-e/portable-atomic/pull/93), thanks @notgull) + +## [0.1.1] - 2023-03-24 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +- Prevent weak counter overflow in `Arc::downgrade`. ([#83](https://github.com/taiki-e/portable-atomic/pull/83)) + + This fixes [a potential unsoundness recently found in the standard library's `Arc`](https://github.com/rust-lang/rust/issues/108706). + +## [0.1.0] - 2023-01-15 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +Initial release + +[Unreleased]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.4...HEAD +[0.2.4]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.3...portable-atomic-util-0.2.4 +[0.2.3]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.2...portable-atomic-util-0.2.3 +[0.2.2]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.1...portable-atomic-util-0.2.2 +[0.2.1]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.0...portable-atomic-util-0.2.1 +[0.2.0]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.5...portable-atomic-util-0.2.0 +[0.1.5]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.4...portable-atomic-util-0.1.5 +[0.1.4]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.3...portable-atomic-util-0.1.4 +[0.1.3]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.2...portable-atomic-util-0.1.3 +[0.1.2]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.1...portable-atomic-util-0.1.2 +[0.1.1]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.0...portable-atomic-util-0.1.1 +[0.1.0]: https://github.com/taiki-e/portable-atomic/releases/tag/portable-atomic-util-0.1.0 diff --git a/external/vendor/portable-atomic-util/Cargo.toml b/external/vendor/portable-atomic-util/Cargo.toml new file mode 100644 index 0000000000..353f7a2a20 --- /dev/null +++ b/external/vendor/portable-atomic-util/Cargo.toml @@ -0,0 +1,195 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.34" +name = "portable-atomic-util" +version = "0.2.4" +build = "build.rs" +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Synchronization primitives built with portable-atomic. +""" +readme = "README.md" +keywords = ["atomic"] +categories = [ + "concurrency", + "data-structures", + "embedded", + "no-std", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/taiki-e/portable-atomic" + +[package.metadata.cargo_check_external_types] +allowed_external_types = [] + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +name = "portable_atomic_util" +path = "src/lib.rs" +doc-scrape-examples = false + +[[test]] +name = "arc" +path = "tests/arc.rs" + +[dependencies.portable-atomic] +version = "1.5.1" +features = ["require-cas"] +default-features = false + +[dev-dependencies.build-context] +version = "0.1" + +[features] +alloc = [] +default = [] +std = ["alloc"] + +[lints.clippy] +all = "warn" +as_ptr_cast_mut = "warn" +as_underscore = "warn" +default_union_representation = "warn" +inline_asm_x86_att_syntax = "warn" +pedantic = "warn" +trailing_empty_array = "warn" +transmute_undefined_repr = "warn" +undocumented_unsafe_blocks = "warn" + +[lints.clippy.bool_assert_comparison] +level = "allow" +priority = 1 + +[lints.clippy.borrow_as_ptr] +level = "allow" +priority = 1 + +[lints.clippy.cast_lossless] +level = "allow" +priority = 1 + +[lints.clippy.declare_interior_mutable_const] +level = "allow" +priority = 1 + +[lints.clippy.doc_markdown] +level = "allow" +priority = 1 + +[lints.clippy.float_cmp] +level = "allow" +priority = 1 + +[lints.clippy.incompatible_msrv] +level = "allow" +priority = 1 + +[lints.clippy.lint_groups_priority] +level = "allow" +priority = 1 + +[lints.clippy.manual_assert] +level = "allow" +priority = 1 + +[lints.clippy.manual_range_contains] +level = "allow" +priority = 1 + +[lints.clippy.missing_errors_doc] +level = "allow" +priority = 1 + +[lints.clippy.module_name_repetitions] +level = "allow" +priority = 1 + +[lints.clippy.naive_bytecount] +level = "allow" +priority = 1 + +[lints.clippy.nonminimal_bool] +level = "allow" +priority = 1 + +[lints.clippy.range_plus_one] +level = "allow" +priority = 1 + +[lints.clippy.similar_names] +level = "allow" +priority = 1 + +[lints.clippy.single_match] +level = "allow" +priority = 1 + +[lints.clippy.single_match_else] +level = "allow" +priority = 1 + +[lints.clippy.struct_excessive_bools] +level = "allow" +priority = 1 + +[lints.clippy.struct_field_names] +level = "allow" +priority = 1 + +[lints.clippy.too_many_arguments] +level = "allow" +priority = 1 + +[lints.clippy.too_many_lines] +level = "allow" +priority = 1 + +[lints.clippy.type_complexity] +level = "allow" +priority = 1 + +[lints.clippy.unreadable_literal] +level = "allow" +priority = 1 + +[lints.rust] +deprecated_safe = "warn" +improper_ctypes = "warn" +improper_ctypes_definitions = "warn" +non_ascii_idents = "warn" +rust_2018_idioms = "warn" +single_use_lifetimes = "warn" +unreachable_pub = "warn" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = [ + 'cfg(target_arch,values("xtensa"))', + 'cfg(target_os,values("psx"))', + 'cfg(target_env,values("psx"))', + 'cfg(target_feature,values("lse2","lse128","rcpc3"))', + 'cfg(target_feature,values("quadword-atomics"))', + 'cfg(target_feature,values("zaamo","zabha"))', + 'cfg(target_pointer_width,values("128"))', + "cfg(portable_atomic_test_outline_atomics_detect_false,qemu,valgrind)", + "cfg(portable_atomic_no_outline_atomics,portable_atomic_outline_atomics)", + "cfg(portable_atomic_unstable_coerce_unsized)", +] diff --git a/external/vendor/portable-atomic-util/Cargo.toml.orig b/external/vendor/portable-atomic-util/Cargo.toml.orig new file mode 100644 index 0000000000..23bc4f2daf --- /dev/null +++ b/external/vendor/portable-atomic-util/Cargo.toml.orig @@ -0,0 +1,52 @@ +[package] +name = "portable-atomic-util" +version = "0.2.4" #publish:version +edition = "2018" +rust-version = "1.34" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/taiki-e/portable-atomic" +keywords = ["atomic"] +categories = ["concurrency", "data-structures", "embedded", "no-std"] +description = """ +Synchronization primitives built with portable-atomic. +""" + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] + +[package.metadata.cargo_check_external_types] +# The following are external types that are allowed to be exposed in our public API. +allowed_external_types = [ +] + +[lib] +doc-scrape-examples = false + +[features] +default = [] + +# Use `std`. +# +# Note: +# - This implicitly enables the `alloc` feature. +std = ["alloc"] + +# Use `alloc`. +# +# Note: +# - The MSRV when this feature is enabled and the `std` feature is *not* enabled is Rust 1.36 that `alloc` crate stabilized. +alloc = [] + +# TODO: https://github.com/taiki-e/portable-atomic/issues/1 +# # Provides generic `atomic` type. +# generic = [] + +[dependencies] +portable-atomic = { version = "1.5.1", path = "..", default-features = false, features = ["require-cas"] } + +[dev-dependencies] +build-context = "0.1" + +[lints] +workspace = true diff --git a/external/vendor/portable-atomic-util/LICENSE-APACHE b/external/vendor/portable-atomic-util/LICENSE-APACHE new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/external/vendor/portable-atomic-util/LICENSE-APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/external/vendor/portable-atomic-util/LICENSE-MIT b/external/vendor/portable-atomic-util/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/portable-atomic-util/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/portable-atomic-util/README.md b/external/vendor/portable-atomic-util/README.md new file mode 100644 index 0000000000..199d10d96e --- /dev/null +++ b/external/vendor/portable-atomic-util/README.md @@ -0,0 +1,77 @@ +# portable-atomic-util + +[![crates.io](https://img.shields.io/crates/v/portable-atomic-util?style=flat-square&logo=rust)](https://crates.io/crates/portable-atomic-util) +[![docs.rs](https://img.shields.io/badge/docs.rs-portable--atomic--util-blue?style=flat-square&logo=docs.rs)](https://docs.rs/portable-atomic-util) +[![license](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue?style=flat-square)](#license) +[![msrv](https://img.shields.io/badge/msrv-1.34-blue?style=flat-square&logo=rust)](https://www.rust-lang.org) +[![github actions](https://img.shields.io/github/actions/workflow/status/taiki-e/portable-atomic/ci.yml?branch=main&style=flat-square&logo=github)](https://github.com/taiki-e/portable-atomic/actions) +[![cirrus ci](https://img.shields.io/cirrus/github/taiki-e/portable-atomic/main?style=flat-square&logo=cirrusci)](https://cirrus-ci.com/github/taiki-e/portable-atomic) + + +Synchronization primitives built with [portable-atomic]. + +- Provide `Arc`. (optional, requires the `std` or `alloc` feature) +- Provide `task::Wake`. (optional, requires the `std` or `alloc` feature) + + +See [#1] for other primitives being considered for addition to this crate. + +## Optional features + +- **`std`**
+ Use `std`. + + Note: + - This implicitly enables the `alloc` feature. + +- **`alloc`**
+ Use `alloc`. + + Note: + - The MSRV when this feature is enabled and the `std` feature is *not* enabled is Rust 1.36 that `alloc` crate stabilized. + + + +[portable-atomic]: https://github.com/taiki-e/portable-atomic +[#1]: https://github.com/taiki-e/portable-atomic/issues/1 + +## Optional cfg + +One of the ways to enable cfg is to set [rustflags in the cargo config](https://doc.rust-lang.org/cargo/reference/config.html#targettriplerustflags): + +```toml +# .cargo/config.toml +[target.] +rustflags = ["--cfg", "portable_atomic_unstable_coerce_unsized"] +``` + +Or set environment variable: + +```sh +RUSTFLAGS="--cfg portable_atomic_unstable_coerce_unsized" cargo ... +``` + +-
**`--cfg portable_atomic_unstable_coerce_unsized`**
+ Support coercing of `Arc` to `Arc` as in `std::sync::Arc`. + + + + This cfg requires Rust nightly because this coercing requires [unstable `CoerceUnsized` trait](https://doc.rust-lang.org/nightly/core/ops/trait.CoerceUnsized.html). + + See [this issue comment](https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569) for another known workaround. + + **Note:** This cfg is unstable and outside of the normal semver guarantees and minor or patch versions of portable-atomic-util may make breaking changes to them at any time. + + + +## License + +Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or +[MIT license](LICENSE-MIT) at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/portable-atomic-util/build.rs b/external/vendor/portable-atomic-util/build.rs new file mode 100644 index 0000000000..b0638d1ef1 --- /dev/null +++ b/external/vendor/portable-atomic-util/build.rs @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// The rustc-cfg emitted by the build script are *not* public API. + +// version.rs is shared with portable-atomic's build script, and portable-atomic-util only uses a part of it. +#[allow(dead_code)] +#[path = "version.rs"] +mod version; +use self::version::{rustc_version, Version}; + +use std::env; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=version.rs"); + + let version = match rustc_version() { + Some(version) => version, + None => { + if env::var_os("PORTABLE_ATOMIC_DENY_WARNINGS").is_some() { + panic!("unable to determine rustc version") + } + println!( + "cargo:warning={}: unable to determine rustc version; assuming latest stable rustc (1.{})", + env!("CARGO_PKG_NAME"), + Version::LATEST.minor + ); + Version::LATEST + } + }; + + if version.minor >= 80 { + // Custom cfgs set by build script. Not public API. + // grep -F 'cargo:rustc-cfg=' build.rs | grep -Ev '^ *//' | sed -E 's/^.*cargo:rustc-cfg=//; s/(=\\)?".*$//' | LC_ALL=C sort -u | tr '\n' ',' | sed -E 's/,$/\n/' + println!( + "cargo:rustc-check-cfg=cfg(portable_atomic_no_alloc,portable_atomic_no_alloc_layout_extras,portable_atomic_no_core_unwind_safe,portable_atomic_no_error_in_core,portable_atomic_no_futures_api,portable_atomic_no_io_safety,portable_atomic_no_io_vec,portable_atomic_no_maybe_uninit,portable_atomic_no_min_const_generics,portable_atomic_no_track_caller,portable_atomic_no_unsafe_op_in_unsafe_fn,portable_atomic_sanitize_thread)" + ); + } + + // Note that cfgs are `no_`*, not `has_*`. This allows treating as the latest + // stable rustc is used when the build script doesn't run. This is useful + // for non-cargo build systems that don't run the build script. + + // alloc stabilized in Rust 1.36 (nightly-2019-04-15) https://github.com/rust-lang/rust/pull/59675 + if !version.probe(36, 2019, 4, 14) { + println!("cargo:rustc-cfg=portable_atomic_no_alloc"); + } + // std::{future,task} stabilized in Rust 1.36 (nightly-2019-04-25) https://github.com/rust-lang/rust/pull/59739 + if !version.probe(36, 2019, 4, 24) { + println!("cargo:rustc-cfg=portable_atomic_no_futures_api"); + } + // {read,write}_vectored stabilized in Rust 1.36 (nightly-2019-04-30) https://github.com/rust-lang/rust/pull/60334 + if !version.probe(36, 2019, 4, 29) { + println!("cargo:rustc-cfg=portable_atomic_no_io_vec"); + } + // MaybeUninit stabilized in Rust 1.36 (nightly-2019-05-21) https://github.com/rust-lang/rust/pull/60445 + if !version.probe(36, 2019, 5, 20) { + println!("cargo:rustc-cfg=portable_atomic_no_maybe_uninit"); + } + // Layout::{align_to,pad_to_align,extend,array} stabilized in Rust 1.44 (nightly-2020-04-22) https://github.com/rust-lang/rust/pull/69362 + if !version.probe(44, 2020, 4, 21) { + println!("cargo:rustc-cfg=portable_atomic_no_alloc_layout_extras"); + } + // track_caller stabilized in Rust 1.46 (nightly-2020-07-02): https://github.com/rust-lang/rust/pull/72445 + if !version.probe(46, 2020, 7, 1) { + println!("cargo:rustc-cfg=portable_atomic_no_track_caller"); + } + // min_const_generics stabilized in Rust 1.51 (nightly-2020-12-28): https://github.com/rust-lang/rust/pull/79135 + if !version.probe(51, 2020, 12, 27) { + println!("cargo:rustc-cfg=portable_atomic_no_min_const_generics"); + } + // unsafe_op_in_unsafe_fn stabilized in Rust 1.52 (nightly-2021-03-11): https://github.com/rust-lang/rust/pull/79208 + if !version.probe(52, 2021, 3, 10) { + println!("cargo:rustc-cfg=portable_atomic_no_unsafe_op_in_unsafe_fn"); + } + // https://github.com/rust-lang/rust/pull/84662 merged in Rust 1.56 (nightly-2021-08-02). + if !version.probe(56, 2021, 8, 1) { + println!("cargo:rustc-cfg=portable_atomic_no_core_unwind_safe"); + } + // io_safety stabilized in Rust 1.63 (nightly-2022-06-16): https://github.com/rust-lang/rust/pull/95118 + if !version.probe(63, 2022, 6, 15) { + println!("cargo:rustc-cfg=portable_atomic_no_io_safety"); + } + // error_in_core stabilized in Rust 1.81 (nightly-2024-06-09): https://github.com/rust-lang/rust/pull/125951 + if !version.probe(81, 2024, 6, 8) { + println!("cargo:rustc-cfg=portable_atomic_no_error_in_core"); + } + + if version.nightly { + // `cfg(sanitize = "..")` is not stabilized. + let sanitize = env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); + if sanitize.contains("thread") { + println!("cargo:rustc-cfg=portable_atomic_sanitize_thread"); + } + } +} diff --git a/external/vendor/portable-atomic-util/src/arc.rs b/external/vendor/portable-atomic-util/src/arc.rs new file mode 100644 index 0000000000..56f4c4b7d9 --- /dev/null +++ b/external/vendor/portable-atomic-util/src/arc.rs @@ -0,0 +1,3114 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// This module is based on alloc::sync::Arc. +// +// The code has been adjusted to work with stable Rust (and optionally support some unstable features). +// +// Source: https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/library/alloc/src/sync.rs. +// +// Copyright & License of the original code: +// - https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/COPYRIGHT +// - https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/LICENSE-APACHE +// - https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/LICENSE-MIT + +#![allow(clippy::must_use_candidate)] // align to alloc::sync::Arc +#![allow(clippy::undocumented_unsafe_blocks)] // TODO: most of the unsafe codes were inherited from alloc::sync::Arc + +use portable_atomic::{ + self as atomic, hint, + Ordering::{Acquire, Relaxed, Release}, +}; + +use alloc::{alloc::handle_alloc_error, boxed::Box}; +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +use alloc::{ + borrow::{Cow, ToOwned}, + string::String, + vec::Vec, +}; +use core::{ + alloc::Layout, + any::Any, + borrow, cmp, fmt, + hash::{Hash, Hasher}, + isize, + marker::PhantomData, + mem::{self, align_of_val, size_of_val, ManuallyDrop}, + ops::Deref, + pin::Pin, + ptr::{self, NonNull}, + usize, +}; +#[cfg(portable_atomic_unstable_coerce_unsized)] +use core::{marker::Unsize, ops::CoerceUnsized}; + +/// A soft limit on the amount of references that may be made to an `Arc`. +/// +/// Going above this limit will abort your program (although not +/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. +/// Trying to go above it might call a `panic` (if not actually going above it). +/// +/// This is a global invariant, and also applies when using a compare-exchange loop. +/// +/// See comment in `Arc::clone`. +const MAX_REFCOUNT: usize = isize::MAX as usize; + +/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely. +const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow"; + +#[cfg(not(portable_atomic_sanitize_thread))] +macro_rules! acquire { + ($x:expr) => { + atomic::fence(Acquire) + }; +} + +// ThreadSanitizer does not support memory fences. To avoid false positive +// reports in Arc / Weak implementation use atomic loads for synchronization +// instead. +#[cfg(portable_atomic_sanitize_thread)] +macro_rules! acquire { + ($x:expr) => { + $x.load(Acquire) + }; +} + +/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically +/// Reference Counted'. +/// +/// This is an equivalent to [`std::sync::Arc`], but using [portable-atomic] for synchronization. +/// See the documentation for [`std::sync::Arc`] for more details. +/// +/// **Note:** Unlike `std::sync::Arc`, coercing `Arc` to `Arc` is only possible if +/// the optional cfg `portable_atomic_unstable_coerce_unsized` is enabled, as documented at the crate-level documentation, +/// and this optional cfg item is only supported with Rust nightly version. +/// This is because coercing the pointee requires the +/// [unstable `CoerceUnsized` trait](https://doc.rust-lang.org/nightly/core/ops/trait.CoerceUnsized.html). +/// See [this issue comment](https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569) +/// for a workaround that works without depending on unstable features. +/// +/// [portable-atomic]: https://crates.io/crates/portable-atomic +/// +/// # Examples +/// +/// ``` +/// use portable_atomic_util::Arc; +/// use std::thread; +/// +/// let five = Arc::new(5); +/// +/// for _ in 0..10 { +/// let five = Arc::clone(&five); +/// +/// thread::spawn(move || { +/// assert_eq!(*five, 5); +/// }); +/// } +/// # if cfg!(miri) { std::thread::sleep(std::time::Duration::from_millis(500)); } // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +/// ``` +pub struct Arc { + ptr: NonNull>, + phantom: PhantomData>, +} + +unsafe impl Send for Arc {} +unsafe impl Sync for Arc {} + +#[cfg(not(portable_atomic_no_core_unwind_safe))] +impl core::panic::UnwindSafe for Arc {} +#[cfg(all(portable_atomic_no_core_unwind_safe, feature = "std"))] +impl std::panic::UnwindSafe for Arc {} + +#[cfg(portable_atomic_unstable_coerce_unsized)] +impl, U: ?Sized> CoerceUnsized> for Arc {} + +impl Arc { + #[inline] + fn into_inner_non_null(this: Self) -> NonNull> { + let this = mem::ManuallyDrop::new(this); + this.ptr + } + + #[inline] + unsafe fn from_inner(ptr: NonNull>) -> Self { + Self { ptr, phantom: PhantomData } + } + + #[inline] + unsafe fn from_ptr(ptr: *mut ArcInner) -> Self { + // SAFETY: the caller must uphold the safety contract. + unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) } + } +} + +#[allow(clippy::too_long_first_doc_paragraph)] +/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the +/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak` +/// pointer, which returns an [Option]<[Arc]\>. +/// +/// This is an equivalent to [`std::sync::Weak`], but using [portable-atomic] for synchronization. +/// See the documentation for [`std::sync::Weak`] for more details. +/// +/// +/// **Note:** Unlike `std::sync::Weak`, coercing `Weak` to `Weak` is not possible, not even if +/// the optional cfg `portable_atomic_unstable_coerce_unsized` is enabled. +/// +/// [`upgrade`]: Weak::upgrade +/// [portable-atomic]: https://crates.io/crates/portable-atomic +/// +/// # Examples +/// +/// ``` +/// use portable_atomic_util::Arc; +/// use std::thread; +/// +/// let five = Arc::new(5); +/// let weak_five = Arc::downgrade(&five); +/// +/// # let t = +/// thread::spawn(move || { +/// let five = weak_five.upgrade().unwrap(); +/// assert_eq!(*five, 5); +/// }); +/// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 +/// ``` +pub struct Weak { + // This is a `NonNull` to allow optimizing the size of this type in enums, + // but it is not necessarily a valid pointer. + // `Weak::new` sets this to `usize::MAX` so that it doesn’t need + // to allocate space on the heap. That's not a value a real pointer + // will ever have because RcBox has alignment at least 2. + // This is only possible when `T: Sized`; unsized `T` never dangle. + ptr: NonNull>, +} + +unsafe impl Send for Weak {} +unsafe impl Sync for Weak {} + +impl fmt::Debug for Weak { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("(Weak)") + } +} + +// This is repr(C) to future-proof against possible field-reordering, which +// would interfere with otherwise safe [into|from]_raw() of transmutable +// inner types. +#[repr(C)] +struct ArcInner { + strong: atomic::AtomicUsize, + + // the value usize::MAX acts as a sentinel for temporarily "locking" the + // ability to upgrade weak pointers or downgrade strong ones; this is used + // to avoid races in `make_mut` and `get_mut`. + weak: atomic::AtomicUsize, + + data: T, +} + +/// Calculate layout for `ArcInner` using the inner value's layout +fn arc_inner_layout_for_value_layout(layout: Layout) -> Layout { + // Calculate layout using the given value layout. + // Previously, layout was calculated on the expression + // `&*(ptr as *const ArcInner)`, but this created a misaligned + // reference (see #54908). + pad_to_align(extend_layout(Layout::new::>(), layout).unwrap().0) +} + +unsafe impl Send for ArcInner {} +unsafe impl Sync for ArcInner {} + +impl Arc { + /// Constructs a new `Arc`. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// ``` + #[inline] + pub fn new(data: T) -> Self { + // Start the weak pointer count as 1 which is the weak pointer that's + // held by all the strong pointers (kinda), see std/rc.rs for more info + let x: Box<_> = Box::new(ArcInner { + strong: atomic::AtomicUsize::new(1), + weak: atomic::AtomicUsize::new(1), + data, + }); + unsafe { Self::from_inner(Box::leak(x).into()) } + } + + /// Constructs a new `Arc` while giving you a `Weak` to the allocation, + /// to allow you to construct a `T` which holds a weak pointer to itself. + /// + /// Generally, a structure circularly referencing itself, either directly or + /// indirectly, should not hold a strong reference to itself to prevent a memory leak. + /// Using this function, you get access to the weak pointer during the + /// initialization of `T`, before the `Arc` is created, such that you can + /// clone and store it inside the `T`. + /// + /// `new_cyclic` first allocates the managed allocation for the `Arc`, + /// then calls your closure, giving it a `Weak` to this allocation, + /// and only afterwards completes the construction of the `Arc` by placing + /// the `T` returned from your closure into the allocation. + /// + /// Since the new `Arc` is not fully-constructed until `Arc::new_cyclic` + /// returns, calling [`upgrade`] on the weak reference inside your closure will + /// fail and result in a `None` value. + /// + /// # Panics + /// + /// If `data_fn` panics, the panic is propagated to the caller, and the + /// temporary [`Weak`] is dropped normally. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::{Arc, Weak}; + /// + /// struct Gadget { + /// me: Weak, + /// } + /// + /// impl Gadget { + /// /// Constructs a reference counted Gadget. + /// fn new() -> Arc { + /// // `me` is a `Weak` pointing at the new allocation of the + /// // `Arc` we're constructing. + /// Arc::new_cyclic(|me| { + /// // Create the actual struct here. + /// Gadget { me: me.clone() } + /// }) + /// } + /// + /// /// Returns a reference counted pointer to Self. + /// fn me(&self) -> Arc { + /// self.me.upgrade().unwrap() + /// } + /// } + /// ``` + /// [`upgrade`]: Weak::upgrade + #[inline] + pub fn new_cyclic(data_fn: F) -> Self + where + F: FnOnce(&Weak) -> T, + { + // Construct the inner in the "uninitialized" state with a single + // weak reference. + let init_ptr = Weak::new_uninit_ptr(); + + let weak = Weak { ptr: init_ptr }; + + // It's important we don't give up ownership of the weak pointer, or + // else the memory might be freed by the time `data_fn` returns. If + // we really wanted to pass ownership, we could create an additional + // weak pointer for ourselves, but this would result in additional + // updates to the weak reference count which might not be necessary + // otherwise. + let data = data_fn(&weak); + + // Now we can properly initialize the inner value and turn our weak + // reference into a strong reference. + unsafe { + let inner = init_ptr.as_ptr(); + ptr::write(data_ptr::(inner, &data), data); + + // The above write to the data field must be visible to any threads which + // observe a non-zero strong count. Therefore we need at least "Release" ordering + // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`. + // + // "Acquire" ordering is not required. When considering the possible behaviors + // of `data_fn` we only need to look at what it could do with a reference to a + // non-upgradeable `Weak`: + // - It can *clone* the `Weak`, increasing the weak reference count. + // - It can drop those clones, decreasing the weak reference count (but never to zero). + // + // These side effects do not impact us in any way, and no other side effects are + // possible with safe code alone. + let prev_value = (*inner).strong.fetch_add(1, Release); + debug_assert_eq!(prev_value, 0, "No prior strong references should exist"); + + // Strong references should collectively own a shared weak reference, + // so don't run the destructor for our old weak reference. + mem::forget(weak); + + Self::from_inner(init_ptr) + } + } + + /// Constructs a new `Arc` with uninitialized contents. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let mut five = Arc::::new_uninit(); + /// + /// // Deferred initialization: + /// Arc::get_mut(&mut five).unwrap().write(5); + /// + /// let five = unsafe { five.assume_init() }; + /// + /// assert_eq!(*five, 5) + /// ``` + #[cfg(not(portable_atomic_no_maybe_uninit))] + #[inline] + #[must_use] + pub fn new_uninit() -> Arc> { + unsafe { + Arc::from_ptr(Arc::allocate_for_layout( + Layout::new::(), + |layout| Global.allocate(layout), + |ptr| ptr as *mut _, + )) + } + } + + /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then + /// `data` will be pinned in memory and unable to be moved. + #[must_use] + pub fn pin(data: T) -> Pin { + unsafe { Pin::new_unchecked(Self::new(data)) } + } + + /// Returns the inner value, if the `Arc` has exactly one strong reference. + /// + /// Otherwise, an [`Err`] is returned with the same `Arc` that was + /// passed in. + /// + /// This will succeed even if there are outstanding weak references. + /// + /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't + /// keep the `Arc` in the [`Err`] case. + /// Immediately dropping the [`Err`]-value, as the expression + /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to + /// drop to zero and the inner value of the `Arc` to be dropped. + /// For instance, if two threads execute such an expression in parallel, + /// there is a race condition without the possibility of unsafety: + /// The threads could first both check whether they own the last instance + /// in `Arc::try_unwrap`, determine that they both do not, and then both + /// discard and drop their instance in the call to [`ok`][`Result::ok`]. + /// In this scenario, the value inside the `Arc` is safely destroyed + /// by exactly one of the threads, but neither thread will ever be able + /// to use the value. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let x = Arc::new(3); + /// assert_eq!(Arc::try_unwrap(x), Ok(3)); + /// + /// let x = Arc::new(4); + /// let _y = Arc::clone(&x); + /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); + /// ``` + #[inline] + pub fn try_unwrap(this: Self) -> Result { + if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() { + return Err(this); + } + + acquire!(this.inner().strong); + + let this = ManuallyDrop::new(this); + let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) }; + + // Make a weak pointer to clean up the implicit strong-weak reference + let _weak = Weak { ptr: this.ptr }; + + Ok(elem) + } + + /// Returns the inner value, if the `Arc` has exactly one strong reference. + /// + /// Otherwise, [`None`] is returned and the `Arc` is dropped. + /// + /// This will succeed even if there are outstanding weak references. + /// + /// If `Arc::into_inner` is called on every clone of this `Arc`, + /// it is guaranteed that exactly one of the calls returns the inner value. + /// This means in particular that the inner value is not dropped. + /// + /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it + /// is meant for different use-cases. If used as a direct replacement + /// for `Arc::into_inner` anyway, such as with the expression + /// [Arc::try_unwrap]\(this).[ok][Result::ok](), then it does + /// **not** give the same guarantee as described in the previous paragraph. + /// For more information, see the examples below and read the documentation + /// of [`Arc::try_unwrap`]. + /// + /// # Examples + /// + /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives. + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let x = Arc::new(3); + /// let y = Arc::clone(&x); + /// + /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`: + /// let x_thread = std::thread::spawn(|| Arc::into_inner(x)); + /// let y_thread = std::thread::spawn(|| Arc::into_inner(y)); + /// + /// let x_inner_value = x_thread.join().unwrap(); + /// let y_inner_value = y_thread.join().unwrap(); + /// + /// // One of the threads is guaranteed to receive the inner value: + /// assert!(matches!((x_inner_value, y_inner_value), (None, Some(3)) | (Some(3), None))); + /// // The result could also be `(None, None)` if the threads called + /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead. + /// ``` + /// + /// A more practical example demonstrating the need for `Arc::into_inner`: + /// ``` + /// use portable_atomic_util::Arc; + /// + /// // Definition of a simple singly linked list using `Arc`: + /// #[derive(Clone)] + /// struct LinkedList(Option>>); + /// struct Node(T, Option>>); + /// + /// // Dropping a long `LinkedList` relying on the destructor of `Arc` + /// // can cause a stack overflow. To prevent this, we can provide a + /// // manual `Drop` implementation that does the destruction in a loop: + /// impl Drop for LinkedList { + /// fn drop(&mut self) { + /// let mut link = self.0.take(); + /// while let Some(arc_node) = link.take() { + /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) { + /// link = next; + /// } + /// } + /// } + /// } + /// + /// // Implementation of `new` and `push` omitted + /// impl LinkedList { + /// /* ... */ + /// # fn new() -> Self { + /// # LinkedList(None) + /// # } + /// # fn push(&mut self, x: T) { + /// # self.0 = Some(Arc::new(Node(x, self.0.take()))); + /// # } + /// } + /// + /// // The following code could have still caused a stack overflow + /// // despite the manual `Drop` impl if that `Drop` impl had used + /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`. + /// + /// // Create a long list and clone it + /// let mut x = LinkedList::new(); + /// let size = 100000; + /// # let size = if cfg!(miri) { 100 } else { size }; + /// for i in 0..size { + /// x.push(i); // Adds i to the front of x + /// } + /// let y = x.clone(); + /// + /// // Drop the clones in parallel + /// let x_thread = std::thread::spawn(|| drop(x)); + /// let y_thread = std::thread::spawn(|| drop(y)); + /// x_thread.join().unwrap(); + /// y_thread.join().unwrap(); + /// ``` + #[inline] + pub fn into_inner(this: Self) -> Option { + // Make sure that the ordinary `Drop` implementation isn’t called as well + let mut this = mem::ManuallyDrop::new(this); + + // Following the implementation of `drop` and `drop_slow` + if this.inner().strong.fetch_sub(1, Release) != 1 { + return None; + } + + acquire!(this.inner().strong); + + // SAFETY: This mirrors the line + // + // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) }; + // + // in `drop_slow`. Instead of dropping the value behind the pointer, + // it is read and eventually returned; `ptr::read` has the same + // safety conditions as `ptr::drop_in_place`. + let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) }; + + drop(Weak { ptr: this.ptr }); + + Some(inner) + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl Arc<[T]> { + /// Constructs a new atomically reference-counted slice with uninitialized contents. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let mut values = Arc::<[u32]>::new_uninit_slice(3); + /// + /// // Deferred initialization: + /// let data = Arc::get_mut(&mut values).unwrap(); + /// data[0].write(1); + /// data[1].write(2); + /// data[2].write(3); + /// + /// let values = unsafe { values.assume_init() }; + /// + /// assert_eq!(*values, [1, 2, 3]) + /// ``` + #[inline] + #[must_use] + pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit]> { + unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) } + } +} + +#[cfg(not(portable_atomic_no_maybe_uninit))] +impl Arc> { + /// Converts to `Arc`. + /// + /// # Safety + /// + /// As with [`MaybeUninit::assume_init`], + /// it is up to the caller to guarantee that the inner value + /// really is in an initialized state. + /// Calling this when the content is not yet fully initialized + /// causes immediate undefined behavior. + /// + /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let mut five = Arc::::new_uninit(); + /// + /// // Deferred initialization: + /// Arc::get_mut(&mut five).unwrap().write(5); + /// + /// let five = unsafe { five.assume_init() }; + /// + /// assert_eq!(*five, 5) + /// ``` + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub unsafe fn assume_init(self) -> Arc { + let ptr = Arc::into_inner_non_null(self); + // SAFETY: MaybeUninit has the same layout as T, and + // the caller must ensure data is initialized. + unsafe { Arc::from_inner(ptr.cast::>()) } + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl Arc<[mem::MaybeUninit]> { + /// Converts to `Arc<[T]>`. + /// + /// # Safety + /// + /// As with [`MaybeUninit::assume_init`], + /// it is up to the caller to guarantee that the inner value + /// really is in an initialized state. + /// Calling this when the content is not yet fully initialized + /// causes immediate undefined behavior. + /// + /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let mut values = Arc::<[u32]>::new_uninit_slice(3); + /// + /// // Deferred initialization: + /// let data = Arc::get_mut(&mut values).unwrap(); + /// data[0].write(1); + /// data[1].write(2); + /// data[2].write(3); + /// + /// let values = unsafe { values.assume_init() }; + /// + /// assert_eq!(*values, [1, 2, 3]) + /// ``` + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub unsafe fn assume_init(self) -> Arc<[T]> { + let ptr = Arc::into_inner_non_null(self); + // SAFETY: [MaybeUninit] has the same layout as [T], and + // the caller must ensure data is initialized. + unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner<[T]>) } + } +} + +impl Arc { + /// Constructs an `Arc` from a raw pointer. + /// + /// # Safety + /// + /// The raw pointer must have been previously returned by a call to + /// [`Arc::into_raw`][into_raw] with the following requirements: + /// + /// * If `U` is sized, it must have the same size and alignment as `T`. This + /// is trivially true if `U` is `T`. + /// * If `U` is unsized, its data pointer must have the same size and + /// alignment as `T`. This is trivially true if `Arc` was constructed + /// through `Arc` and then converted to `Arc` through an [unsized + /// coercion]. + /// + /// Note that if `U` or `U`'s data pointer is not `T` but has the same size + /// and alignment, this is basically like transmuting references of + /// different types. See [`mem::transmute`] for more information + /// on what restrictions apply in this case. + /// + /// The user of `from_raw` has to make sure a specific value of `T` is only + /// dropped once. + /// + /// This function is unsafe because improper use may lead to memory unsafety, + /// even if the returned `Arc` is never accessed. + /// + /// [into_raw]: Arc::into_raw + /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let x = Arc::new("hello".to_owned()); + /// let x_ptr = Arc::into_raw(x); + /// + /// unsafe { + /// // Convert back to an `Arc` to prevent leak. + /// let x = Arc::from_raw(x_ptr); + /// assert_eq!(&*x, "hello"); + /// + /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe. + /// } + /// + /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! + /// ``` + /// + /// Convert a slice back into its original array: + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let x: Arc<[u32]> = Arc::from([1, 2, 3]); + /// let x_ptr: *const [u32] = Arc::into_raw(x); + /// + /// unsafe { + /// let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>()); + /// assert_eq!(&*x, &[1, 2, 3]); + /// } + /// ``` + #[inline] + pub unsafe fn from_raw(ptr: *const T) -> Self { + unsafe { + let offset = data_offset::(&*ptr); + + // Reverse the offset to find the original ArcInner. + let arc_ptr = strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner; + + Self::from_ptr(arc_ptr) + } + } + + /// Increments the strong reference count on the `Arc` associated with the + /// provided pointer by one. + /// + /// # Safety + /// + /// The pointer must have been obtained through `Arc::into_raw`, and the + /// associated `Arc` instance must be valid (i.e. the strong count must be at + /// least 1) for the duration of this method. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// unsafe { + /// let ptr = Arc::into_raw(five); + /// Arc::increment_strong_count(ptr); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` between threads. + /// let five = Arc::from_raw(ptr); + /// assert_eq!(2, Arc::strong_count(&five)); + /// # // Prevent leaks for Miri. + /// # Arc::decrement_strong_count(ptr); + /// } + /// ``` + #[inline] + pub unsafe fn increment_strong_count(ptr: *const T) { + // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop + let arc = unsafe { mem::ManuallyDrop::new(Self::from_raw(ptr)) }; + // Now increase refcount, but don't drop new refcount either + let _arc_clone: mem::ManuallyDrop<_> = arc.clone(); + } + + /// Decrements the strong reference count on the `Arc` associated with the + /// provided pointer by one. + /// + /// # Safety + /// + /// The pointer must have been obtained through `Arc::into_raw`, and the + /// associated `Arc` instance must be valid (i.e. the strong count must be at + /// least 1) when invoking this method. This method can be used to release the final + /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been + /// released. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// unsafe { + /// let ptr = Arc::into_raw(five); + /// Arc::increment_strong_count(ptr); + /// + /// // Those assertions are deterministic because we haven't shared + /// // the `Arc` between threads. + /// let five = Arc::from_raw(ptr); + /// assert_eq!(2, Arc::strong_count(&five)); + /// Arc::decrement_strong_count(ptr); + /// assert_eq!(1, Arc::strong_count(&five)); + /// } + /// ``` + #[inline] + pub unsafe fn decrement_strong_count(ptr: *const T) { + // SAFETY: the caller must uphold the safety contract. + unsafe { drop(Self::from_raw(ptr)) } + } +} + +impl Arc { + /// Consumes the `Arc`, returning the wrapped pointer. + /// + /// To avoid a memory leak the pointer must be converted back to an `Arc` using + /// [`Arc::from_raw`]. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let x = Arc::new("hello".to_owned()); + /// let x_ptr = Arc::into_raw(x); + /// assert_eq!(unsafe { &*x_ptr }, "hello"); + /// # // Prevent leaks for Miri. + /// # drop(unsafe { Arc::from_raw(x_ptr) }); + /// ``` + #[must_use = "losing the pointer will leak memory"] + pub fn into_raw(this: Self) -> *const T { + let this = ManuallyDrop::new(this); + Self::as_ptr(&*this) + } + + /// Provides a raw pointer to the data. + /// + /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for + /// as long as there are strong counts in the `Arc`. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let x = Arc::new("hello".to_owned()); + /// let y = Arc::clone(&x); + /// let x_ptr = Arc::as_ptr(&x); + /// assert_eq!(x_ptr, Arc::as_ptr(&y)); + /// assert_eq!(unsafe { &*x_ptr }, "hello"); + /// ``` + #[must_use] + pub fn as_ptr(this: &Self) -> *const T { + let ptr: *mut ArcInner = this.ptr.as_ptr(); + + unsafe { data_ptr::(ptr, &**this) } + } + + /// Creates a new [`Weak`] pointer to this allocation. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// ``` + #[must_use = "this returns a new `Weak` pointer, \ + without modifying the original `Arc`"] + #[allow(clippy::missing_panics_doc)] + pub fn downgrade(this: &Self) -> Weak { + // This Relaxed is OK because we're checking the value in the CAS + // below. + let mut cur = this.inner().weak.load(Relaxed); + + loop { + // check if the weak counter is currently "locked"; if so, spin. + if cur == usize::MAX { + hint::spin_loop(); + cur = this.inner().weak.load(Relaxed); + continue; + } + + // We can't allow the refcount to increase much past `MAX_REFCOUNT`. + assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR); + + // NOTE: this code currently ignores the possibility of overflow + // into usize::MAX; in general both Rc and Arc need to be adjusted + // to deal with overflow. + + // Unlike with Clone(), we need this to be an Acquire read to + // synchronize with the write coming from `is_unique`, so that the + // events prior to that write happen before this read. + match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { + Ok(_) => { + // Make sure we do not create a dangling Weak + debug_assert!(!is_dangling(this.ptr.as_ptr())); + return Weak { ptr: this.ptr }; + } + Err(old) => cur = old, + } + } + } + + /// Gets the number of [`Weak`] pointers to this allocation. + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the weak count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// let _weak_five = Arc::downgrade(&five); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` or `Weak` between threads. + /// assert_eq!(1, Arc::weak_count(&five)); + /// ``` + #[inline] + #[must_use] + pub fn weak_count(this: &Self) -> usize { + let cnt = this.inner().weak.load(Relaxed); + // If the weak count is currently locked, the value of the + // count was 0 just before taking the lock. + if cnt == usize::MAX { + 0 + } else { + cnt - 1 + } + } + + /// Gets the number of strong (`Arc`) pointers to this allocation. + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the strong count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// let _also_five = Arc::clone(&five); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` between threads. + /// assert_eq!(2, Arc::strong_count(&five)); + /// ``` + #[inline] + #[must_use] + pub fn strong_count(this: &Self) -> usize { + this.inner().strong.load(Relaxed) + } + + #[inline] + fn inner(&self) -> &ArcInner { + // This unsafety is ok because while this arc is alive we're guaranteed + // that the inner pointer is valid. Furthermore, we know that the + // `ArcInner` structure itself is `Sync` because the inner data is + // `Sync` as well, so we're ok loaning out an immutable pointer to these + // contents. + unsafe { self.ptr.as_ref() } + } + + // Non-inlined part of `drop`. + #[inline(never)] + unsafe fn drop_slow(&mut self) { + // Destroy the data at this time, even though we must not free the box + // allocation itself (there might still be weak pointers lying around). + unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) }; + + // Drop the weak ref collectively held by all strong references + // Take a reference to `self.alloc` instead of cloning because 1. it'll + // last long enough, and 2. you should be able to drop `Arc`s with + // unclonable allocators + drop(Weak { ptr: self.ptr }); + } + + /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to + /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// let same_five = Arc::clone(&five); + /// let other_five = Arc::new(5); + /// + /// assert!(Arc::ptr_eq(&five, &same_five)); + /// assert!(!Arc::ptr_eq(&five, &other_five)); + /// ``` + /// + /// [`ptr::eq`]: core::ptr::eq "ptr::eq" + #[inline] + #[must_use] + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + ptr::eq(this.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ()) + } +} + +impl Arc { + /// Allocates an `ArcInner` with sufficient space for + /// a possibly-unsized inner value where the value has the layout provided. + /// + /// The function `mem_to_arc_inner` is called with the data pointer + /// and must return back a (potentially fat)-pointer for the `ArcInner`. + unsafe fn allocate_for_layout( + value_layout: Layout, + allocate: impl FnOnce(Layout) -> Option>, + mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> *mut ArcInner { + let layout = arc_inner_layout_for_value_layout(value_layout); + + let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout)); + + unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) } + } + + unsafe fn initialize_arc_inner( + ptr: NonNull, + _layout: Layout, + mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> *mut ArcInner { + let inner: *mut ArcInner = mem_to_arc_inner(ptr.as_ptr()); + // debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout); // for_value_raw is unstable + + // SAFETY: mem_to_arc_inner return a valid pointer to uninitialized ArcInner. + // ArcInner is repr(C), and strong and weak are the first and second fields and + // are the same type, so `inner as *mut atomic::AtomicUsize` is strong and + // `(inner as *mut atomic::AtomicUsize).add(1)` is weak. + unsafe { + let strong = inner as *mut atomic::AtomicUsize; + strong.write(atomic::AtomicUsize::new(1)); + let weak = strong.add(1); + weak.write(atomic::AtomicUsize::new(1)); + } + + inner + } +} + +impl Arc { + /// Allocates an `ArcInner` with sufficient space for an unsized inner value. + #[inline] + unsafe fn allocate_for_value(value: &T) -> *mut ArcInner { + let ptr: *const T = value; + // Allocate for the `ArcInner` using the given value. + unsafe { + Self::allocate_for_layout( + Layout::for_value(value), + |layout| Global.allocate(layout), + |mem| strict::with_metadata_of(mem, ptr as *mut ArcInner), + ) + } + } + + fn from_box(src: Box) -> Arc { + unsafe { + let value_size = size_of_val(&*src); + let ptr = Self::allocate_for_value(&*src); + + // Copy value as bytes + ptr::copy_nonoverlapping( + &*src as *const T as *const u8, + data_ptr::(ptr, &*src) as *mut u8, + value_size, + ); + + // Free the allocation without dropping its contents + let box_ptr = Box::into_raw(src); + let src = Box::from_raw(box_ptr as *mut mem::ManuallyDrop); + drop(src); + + Self::from_ptr(ptr) + } + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl Arc<[T]> { + /// Allocates an `ArcInner<[T]>` with the given length. + unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> { + unsafe { + Self::allocate_for_layout( + Layout::array::(len).unwrap(), + |layout| Global.allocate(layout), + |mem| ptr::slice_from_raw_parts_mut(mem.cast::(), len) as *mut ArcInner<[T]>, + ) + } + } + + /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size. + /// + /// Behavior is undefined should the size be wrong. + unsafe fn from_iter_exact(iter: impl Iterator, len: usize) -> Self { + // Panic guard while cloning T elements. + // In the event of a panic, elements that have been written + // into the new ArcInner will be dropped, then the memory freed. + struct Guard { + ptr: *mut ArcInner<[mem::MaybeUninit]>, + elems: *mut T, + n_elems: usize, + } + + impl Drop for Guard { + fn drop(&mut self) { + unsafe { + let slice = ptr::slice_from_raw_parts_mut(self.elems, self.n_elems); + ptr::drop_in_place(slice); + + drop(Box::from_raw(self.ptr)); + } + } + } + + unsafe { + let ptr: *mut ArcInner<[mem::MaybeUninit]> = Arc::allocate_for_slice(len); + + // Pointer to first element + let elems = (*ptr).data.as_mut_ptr() as *mut T; + + let mut guard = Guard { ptr, elems, n_elems: 0 }; + + for (i, item) in iter.enumerate() { + ptr::write(elems.add(i), item); + guard.n_elems += 1; + } + + // All clear. Forget the guard so it doesn't free the new ArcInner. + mem::forget(guard); + + Arc::from_ptr(ptr).assume_init() + } + } +} + +impl Clone for Arc { + /// Makes a clone of the `Arc` pointer. + /// + /// This creates another pointer to the same allocation, increasing the + /// strong reference count. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// let _ = Arc::clone(&five); + /// ``` + #[inline] + fn clone(&self) -> Self { + // Using a relaxed ordering is alright here, as knowledge of the + // original reference prevents other threads from erroneously deleting + // the object. + // + // As explained in the [Boost documentation][1], Increasing the + // reference counter can always be done with memory_order_relaxed: New + // references to an object can only be formed from an existing + // reference, and passing an existing reference from one thread to + // another must already provide any required synchronization. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + let old_size = self.inner().strong.fetch_add(1, Relaxed); + + // However we need to guard against massive refcounts in case someone is `mem::forget`ing + // Arcs. If we don't do this the count can overflow and users will use-after free. This + // branch will never be taken in any realistic program. We abort because such a program is + // incredibly degenerate, and we don't care to support it. + // + // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`. + // But we do that check *after* having done the increment, so there is a chance here that + // the worst already happened and we actually do overflow the `usize` counter. However, that + // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment + // above and the `abort` below, which seems exceedingly unlikely. + // + // This is a global invariant, and also applies when using a compare-exchange loop to increment + // counters in other methods. + // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop, + // and then overflow using a few `fetch_add`s. + if old_size > MAX_REFCOUNT { + abort(); + } + + unsafe { Self::from_inner(self.ptr) } + } +} + +impl Deref for Arc { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.inner().data + } +} + +impl Arc { + /// Makes a mutable reference into the given `Arc`. + /// + /// If there are other `Arc` pointers to the same allocation, then `make_mut` will + /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also + /// referred to as clone-on-write. + /// + /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`] + /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not + /// be cloned. + /// + /// See also [`get_mut`], which will fail rather than cloning the inner value + /// or dissociating [`Weak`] pointers. + /// + /// [`clone`]: Clone::clone + /// [`get_mut`]: Arc::get_mut + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let mut data = Arc::new(5); + /// + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// let mut other_data = Arc::clone(&data); // Won't clone inner data + /// *Arc::make_mut(&mut data) += 1; // Clones inner data + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything + /// + /// // Now `data` and `other_data` point to different allocations. + /// assert_eq!(*data, 8); + /// assert_eq!(*other_data, 12); + /// ``` + /// + /// [`Weak`] pointers will be dissociated: + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let mut data = Arc::new(75); + /// let weak = Arc::downgrade(&data); + /// + /// assert!(75 == *data); + /// assert!(75 == *weak.upgrade().unwrap()); + /// + /// *Arc::make_mut(&mut data) += 1; + /// + /// assert!(76 == *data); + /// assert!(weak.upgrade().is_none()); + /// ``` + #[inline] + pub fn make_mut(this: &mut Self) -> &mut T { + let size_of_val = mem::size_of_val::(&**this); + + // Note that we hold both a strong reference and a weak reference. + // Thus, releasing our strong reference only will not, by itself, cause + // the memory to be deallocated. + // + // Use Acquire to ensure that we see any writes to `weak` that happen + // before release writes (i.e., decrements) to `strong`. Since we hold a + // weak count, there's no chance the ArcInner itself could be + // deallocated. + if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { + // Another strong pointer exists, so we must clone. + + let this_data_ref: &T = this; + // `in_progress` drops the allocation if we panic before finishing initializing it. + let mut in_progress: UniqueArcUninit = UniqueArcUninit::new(this_data_ref); + + let initialized_clone = unsafe { + // Clone. If the clone panics, `in_progress` will be dropped and clean up. + this_data_ref.clone_to_uninit(in_progress.data_ptr()); + // Cast type of pointer, now that it is initialized. + in_progress.into_arc() + }; + *this = initialized_clone; + } else if this.inner().weak.load(Relaxed) != 1 { + // Relaxed suffices in the above because this is fundamentally an + // optimization: we are always racing with weak pointers being + // dropped. Worst case, we end up allocated a new Arc unnecessarily. + + // We removed the last strong ref, but there are additional weak + // refs remaining. We'll move the contents to a new Arc, and + // invalidate the other weak refs. + + // Note that it is not possible for the read of `weak` to yield + // usize::MAX (i.e., locked), since the weak count can only be + // locked by a thread with a strong reference. + + // Materialize our own implicit weak pointer, so that it can clean + // up the ArcInner as needed. + let _weak = Weak { ptr: this.ptr }; + + // Can just steal the data, all that's left is `Weak`s + // + // We don't need panic-protection like the above branch does, but we might as well + // use the same mechanism. + let mut in_progress: UniqueArcUninit = UniqueArcUninit::new(&**this); + unsafe { + // Initialize `in_progress` with move of **this. + // We have to express this in terms of bytes because `T: ?Sized`; there is no + // operation that just copies a value based on its `size_of_val()`. + ptr::copy_nonoverlapping( + &**this as *const T as *const u8, + in_progress.data_ptr() as *mut u8, + size_of_val, + ); + + ptr::write(this, in_progress.into_arc()); + } + } else { + // We were the sole reference of either kind; bump back up the + // strong ref count. + this.inner().strong.store(1, Release); + } + + // As with `get_mut()`, the unsafety is ok because our reference was + // either unique to begin with, or became one upon cloning the contents. + unsafe { Self::get_mut_unchecked(this) } + } +} + +impl Arc { + /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the + /// clone. + /// + /// Assuming `arc_t` is of type `Arc`, this function is functionally equivalent to + /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// use std::ptr; + /// + /// let inner = String::from("test"); + /// let ptr = inner.as_ptr(); + /// + /// let arc = Arc::new(inner); + /// let inner = Arc::unwrap_or_clone(arc); + /// // The inner value was not cloned + /// assert!(ptr::eq(ptr, inner.as_ptr())); + /// + /// let arc = Arc::new(inner); + /// let arc2 = arc.clone(); + /// let inner = Arc::unwrap_or_clone(arc); + /// // Because there were 2 references, we had to clone the inner value. + /// assert!(!ptr::eq(ptr, inner.as_ptr())); + /// // `arc2` is the last reference, so when we unwrap it we get back + /// // the original `String`. + /// let inner = Arc::unwrap_or_clone(arc2); + /// assert!(ptr::eq(ptr, inner.as_ptr())); + /// ``` + #[inline] + pub fn unwrap_or_clone(this: Self) -> T { + Self::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone()) + } +} + +impl Arc { + /// Returns a mutable reference into the given `Arc`, if there are + /// no other `Arc` or [`Weak`] pointers to the same allocation. + /// + /// Returns [`None`] otherwise, because it is not safe to + /// mutate a shared value. + /// + /// See also [`make_mut`][make_mut], which will [`clone`][clone] + /// the inner value when there are other `Arc` pointers. + /// + /// [make_mut]: Arc::make_mut + /// [clone]: Clone::clone + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let mut x = Arc::new(3); + /// *Arc::get_mut(&mut x).unwrap() = 4; + /// assert_eq!(*x, 4); + /// + /// let _y = Arc::clone(&x); + /// assert!(Arc::get_mut(&mut x).is_none()); + /// ``` + #[inline] + pub fn get_mut(this: &mut Self) -> Option<&mut T> { + if this.is_unique() { + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the Arc itself to be `mut`, so we're returning the only possible + // reference to the inner data. + unsafe { Some(Self::get_mut_unchecked(this)) } + } else { + None + } + } + + #[inline] + unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T { + // We are careful to *not* create a reference covering the "count" fields, as + // this would alias with concurrent access to the reference counts (e.g. by `Weak`). + unsafe { &mut (*this.ptr.as_ptr()).data } + } + + /// Determine whether this is the unique reference (including weak refs) to + /// the underlying data. + /// + /// Note that this requires locking the weak ref count. + fn is_unique(&mut self) -> bool { + // lock the weak pointer count if we appear to be the sole weak pointer + // holder. + // + // The acquire label here ensures a happens-before relationship with any + // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements + // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded + // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. + if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { + // This needs to be an `Acquire` to synchronize with the decrement of the `strong` + // counter in `drop` -- the only access that happens when any but the last reference + // is being dropped. + let unique = self.inner().strong.load(Acquire) == 1; + + // The release write here synchronizes with a read in `downgrade`, + // effectively preventing the above read of `strong` from happening + // after the write. + self.inner().weak.store(1, Release); // release the lock + unique + } else { + false + } + } +} + +impl Drop for Arc { + /// Drops the `Arc`. + /// + /// This will decrement the strong reference count. If the strong reference + /// count reaches zero then the only other references (if any) are + /// [`Weak`], so we `drop` the inner value. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// struct Foo; + /// + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } + /// } + /// + /// let foo = Arc::new(Foo); + /// let foo2 = Arc::clone(&foo); + /// + /// drop(foo); // Doesn't print anything + /// drop(foo2); // Prints "dropped!" + /// ``` + #[inline] + fn drop(&mut self) { + // Because `fetch_sub` is already atomic, we do not need to synchronize + // with other threads unless we are going to delete the object. This + // same logic applies to the below `fetch_sub` to the `weak` count. + if self.inner().strong.fetch_sub(1, Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // In particular, while the contents of an Arc are usually immutable, it's + // possible to have interior writes to something like a Mutex. Since a + // Mutex is not acquired when it is deleted, we can't rely on its + // synchronization logic to make writes in thread A visible to a destructor + // running in thread B. + // + // Also note that the Acquire fence here could probably be replaced with an + // Acquire load, which could improve performance in highly-contended + // situations. See [2]. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + // [2]: (https://github.com/rust-lang/rust/pull/41714) + acquire!(self.inner().strong); + + unsafe { + self.drop_slow(); + } + } +} + +impl Arc { + /// Attempts to downcast the `Arc` to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// use std::any::Any; + /// + /// fn print_if_string(value: Arc) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// let my_string = "Hello World".to_string(); + /// print_if_string(Arc::from(Box::new(my_string) as Box)); + /// print_if_string(Arc::from(Box::new(0i8) as Box)); + /// // or with "--cfg portable_atomic_unstable_coerce_unsized" in RUSTFLAGS (requires Rust nightly): + /// // print_if_string(Arc::new(my_string)); + /// // print_if_string(Arc::new(0i8)); + /// ``` + #[inline] + pub fn downcast(self) -> Result, Self> + where + T: Any + Send + Sync, + { + if (*self).is::() { + unsafe { + let ptr = Arc::into_inner_non_null(self); + Ok(Arc::from_inner(ptr.cast::>())) + } + } else { + Err(self) + } + } +} + +impl Weak { + /// Constructs a new `Weak`, without allocating any memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. + /// + /// [`upgrade`]: Weak::upgrade + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Weak; + /// + /// let empty: Weak = Weak::new(); + /// assert!(empty.upgrade().is_none()); + /// ``` + #[inline] + #[must_use] + pub const fn new() -> Self { + Self { + ptr: unsafe { + NonNull::new_unchecked(strict::without_provenance_mut::>(usize::MAX)) + }, + } + } + + #[inline] + #[must_use] + fn new_uninit_ptr() -> NonNull> { + unsafe { + NonNull::new_unchecked(Self::allocate_for_layout( + Layout::new::(), + |layout| Global.allocate(layout), + |ptr| ptr as *mut _, + )) + } + } +} + +/// Helper type to allow accessing the reference counts without +/// making any assertions about the data field. +struct WeakInner<'a> { + weak: &'a atomic::AtomicUsize, + strong: &'a atomic::AtomicUsize, +} + +// TODO: See Weak::from_raw +impl Weak { + /// Converts a raw pointer previously created by [`into_raw`] back into `Weak`. + /// + /// This can be used to safely get a strong reference (by calling [`upgrade`] + /// later) or to deallocate the weak count by dropping the `Weak`. + /// + /// It takes ownership of one weak reference (with the exception of pointers created by [`new`], + /// as these don't own anything; the method still works on them). + /// + /// # Safety + /// + /// The pointer must have originated from the [`into_raw`] and must still own its potential + /// weak reference. + /// + /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this + /// takes ownership of one weak reference currently represented as a raw pointer (the weak + /// count is not modified by this operation) and therefore it must be paired with a previous + /// call to [`into_raw`]. + /// # Examples + /// + /// ``` + /// use portable_atomic_util::{Arc, Weak}; + /// + /// let strong = Arc::new("hello".to_owned()); + /// + /// let raw_1 = Arc::downgrade(&strong).into_raw(); + /// let raw_2 = Arc::downgrade(&strong).into_raw(); + /// + /// assert_eq!(2, Arc::weak_count(&strong)); + /// + /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap()); + /// assert_eq!(1, Arc::weak_count(&strong)); + /// + /// drop(strong); + /// + /// // Decrement the last weak count. + /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none()); + /// ``` + /// + /// [`new`]: Weak::new + /// [`into_raw`]: Weak::into_raw + /// [`upgrade`]: Weak::upgrade + #[inline] + pub unsafe fn from_raw(ptr: *const T) -> Self { + // See Weak::as_ptr for context on how the input pointer is derived. + + let ptr = if is_dangling(ptr) { + // This is a dangling Weak. + ptr as *mut ArcInner + } else { + // Otherwise, we're guaranteed the pointer came from a non-dangling Weak. + // TODO: data_offset calls align_of_val which needs to create a reference + // to data but we cannot create a reference to data here since data in Weak + // can be dropped concurrently from another thread. Therefore, we can + // only support sized types that can avoid references to data + // unless align_of_val_raw is stabilized. + // // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T. + // let offset = unsafe { data_offset::(ptr) }; + let offset = data_offset_align(mem::align_of::()); + + // Thus, we reverse the offset to get the whole RcBox. + // SAFETY: the pointer originated from a Weak, so this offset is safe. + unsafe { strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner } + }; + + // SAFETY: we now have recovered the original Weak pointer, so can create the Weak. + Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } } + } +} + +// TODO: See Weak::from_raw +impl Weak { + /// Returns a raw pointer to the object `T` pointed to by this `Weak`. + /// + /// The pointer is valid only if there are some strong references. The pointer may be dangling, + /// unaligned or even [`null`] otherwise. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// use std::ptr; + /// + /// let strong = Arc::new("hello".to_owned()); + /// let weak = Arc::downgrade(&strong); + /// // Both point to the same object + /// assert!(ptr::eq(&*strong, weak.as_ptr())); + /// // The strong here keeps it alive, so we can still access the object. + /// assert_eq!("hello", unsafe { &*weak.as_ptr() }); + /// + /// drop(strong); + /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to + /// // undefined behavior. + /// // assert_eq!("hello", unsafe { &*weak.as_ptr() }); + /// ``` + /// + /// [`null`]: core::ptr::null "ptr::null" + #[must_use] + pub fn as_ptr(&self) -> *const T { + let ptr: *mut ArcInner = self.ptr.as_ptr(); + + if is_dangling(ptr) { + // If the pointer is dangling, we return the sentinel directly. This cannot be + // a valid payload address, as the payload is at least as aligned as ArcInner (usize). + ptr as *const T + } else { + // TODO: See Weak::from_raw + // // SAFETY: if is_dangling returns false, then the pointer is dereferenceable. + // // The payload may be dropped at this point, and we have to maintain provenance, + // // so use raw pointer manipulation. + // unsafe { data_ptr::(ptr, &(*ptr).data) } + unsafe { + let offset = data_offset_align(mem::align_of::()); + strict::byte_add(ptr, offset) as *const T + } + } + } + + /// Consumes the `Weak` and turns it into a raw pointer. + /// + /// This converts the weak pointer into a raw pointer, while still preserving the ownership of + /// one weak reference (the weak count is not modified by this operation). It can be turned + /// back into the `Weak` with [`from_raw`]. + /// + /// The same restrictions of accessing the target of the pointer as with + /// [`as_ptr`] apply. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::{Arc, Weak}; + /// + /// let strong = Arc::new("hello".to_owned()); + /// let weak = Arc::downgrade(&strong); + /// let raw = weak.into_raw(); + /// + /// assert_eq!(1, Arc::weak_count(&strong)); + /// assert_eq!("hello", unsafe { &*raw }); + /// + /// drop(unsafe { Weak::from_raw(raw) }); + /// assert_eq!(0, Arc::weak_count(&strong)); + /// ``` + /// + /// [`from_raw`]: Weak::from_raw + /// [`as_ptr`]: Weak::as_ptr + #[must_use = "losing the pointer will leak memory"] + pub fn into_raw(self) -> *const T { + ManuallyDrop::new(self).as_ptr() + } +} + +impl Weak { + /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying + /// dropping of the inner value if successful. + /// + /// Returns [`None`] if the inner value has since been dropped. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// + /// let strong_five: Option> = weak_five.upgrade(); + /// assert!(strong_five.is_some()); + /// + /// // Destroy all strong pointers. + /// drop(strong_five); + /// drop(five); + /// + /// assert!(weak_five.upgrade().is_none()); + /// ``` + #[must_use = "this returns a new `Arc`, \ + without modifying the original weak pointer"] + pub fn upgrade(&self) -> Option> { + #[inline] + fn checked_increment(n: usize) -> Option { + // Any write of 0 we can observe leaves the field in permanently zero state. + if n == 0 { + return None; + } + // See comments in `Arc::clone` for why we do this (for `mem::forget`). + assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR); + Some(n + 1) + } + + // We use a CAS loop to increment the strong count instead of a + // fetch_add as this function should never take the reference count + // from zero to one. + // + // Relaxed is fine for the failure case because we don't have any expectations about the new state. + // Acquire is necessary for the success case to synchronize with `Arc::new_cyclic`, when the inner + // value can be initialized after `Weak` references have already been created. In that case, we + // expect to observe the fully initialized value. + if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() { + // SAFETY: pointer is not null, verified in checked_increment + unsafe { Some(Arc::from_inner(self.ptr)) } + } else { + None + } + } + + /// Gets the number of strong (`Arc`) pointers pointing to this allocation. + /// + /// If `self` was created using [`Weak::new`], this will return 0. + #[must_use] + pub fn strong_count(&self) -> usize { + if let Some(inner) = self.inner() { + inner.strong.load(Relaxed) + } else { + 0 + } + } + + /// Gets an approximation of the number of `Weak` pointers pointing to this + /// allocation. + /// + /// If `self` was created using [`Weak::new`], or if there are no remaining + /// strong pointers, this will return 0. + /// + /// # Accuracy + /// + /// Due to implementation details, the returned value can be off by 1 in + /// either direction when other threads are manipulating any `Arc`s or + /// `Weak`s pointing to the same allocation. + #[must_use] + pub fn weak_count(&self) -> usize { + if let Some(inner) = self.inner() { + let weak = inner.weak.load(Acquire); + let strong = inner.strong.load(Relaxed); + if strong == 0 { + 0 + } else { + // Since we observed that there was at least one strong pointer + // after reading the weak count, we know that the implicit weak + // reference (present whenever any strong references are alive) + // was still around when we observed the weak count, and can + // therefore safely subtract it. + weak - 1 + } + } else { + 0 + } + } + + /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`, + /// (i.e., when this `Weak` was created by `Weak::new`). + #[inline] + fn inner(&self) -> Option> { + let ptr = self.ptr.as_ptr(); + if is_dangling(ptr) { + None + } else { + // SAFETY: non-dangling Weak is a valid pointer. + // We are careful to *not* create a reference covering the "data" field, as + // the field may be mutated concurrently (for example, if the last `Arc` + // is dropped, the data field will be dropped in-place). + Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } }) + } + } + + /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if + /// both don't point to any allocation (because they were created with `Weak::new()`). However, + /// this function ignores the metadata of `dyn Trait` pointers. + /// + /// # Notes + /// + /// Since this compares pointers it means that `Weak::new()` will equal each + /// other, even though they don't point to any allocation. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let first_rc = Arc::new(5); + /// let first = Arc::downgrade(&first_rc); + /// let second = Arc::downgrade(&first_rc); + /// + /// assert!(first.ptr_eq(&second)); + /// + /// let third_rc = Arc::new(5); + /// let third = Arc::downgrade(&third_rc); + /// + /// assert!(!first.ptr_eq(&third)); + /// ``` + /// + /// Comparing `Weak::new`. + /// + /// ``` + /// use portable_atomic_util::{Arc, Weak}; + /// + /// let first = Weak::new(); + /// let second = Weak::new(); + /// assert!(first.ptr_eq(&second)); + /// + /// let third_rc = Arc::new(()); + /// let third = Arc::downgrade(&third_rc); + /// assert!(!first.ptr_eq(&third)); + /// ``` + /// + /// [`ptr::eq`]: core::ptr::eq "ptr::eq" + #[inline] + #[must_use] + pub fn ptr_eq(&self, other: &Self) -> bool { + ptr::eq(self.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ()) + } +} + +impl Weak { + /// Allocates an `ArcInner` with sufficient space for + /// a possibly-unsized inner value where the value has the layout provided. + /// + /// The function `mem_to_arc_inner` is called with the data pointer + /// and must return back a (potentially fat)-pointer for the `ArcInner`. + unsafe fn allocate_for_layout( + value_layout: Layout, + allocate: impl FnOnce(Layout) -> Option>, + mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> *mut ArcInner { + let layout = arc_inner_layout_for_value_layout(value_layout); + + let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout)); + + unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) } + } + + unsafe fn initialize_arc_inner( + ptr: NonNull, + _layout: Layout, + mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> *mut ArcInner { + let inner: *mut ArcInner = mem_to_arc_inner(ptr.as_ptr()); + // debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout); // for_value_raw is unstable + + // SAFETY: mem_to_arc_inner return a valid pointer to uninitialized ArcInner. + // ArcInner is repr(C), and strong and weak are the first and second fields and + // are the same type, so `inner as *mut atomic::AtomicUsize` is strong and + // `(inner as *mut atomic::AtomicUsize).add(1)` is weak. + unsafe { + let strong = inner as *mut atomic::AtomicUsize; + strong.write(atomic::AtomicUsize::new(0)); + let weak = strong.add(1); + weak.write(atomic::AtomicUsize::new(1)); + } + + inner + } +} + +impl Clone for Weak { + /// Makes a clone of the `Weak` pointer that points to the same allocation. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::{Arc, Weak}; + /// + /// let weak_five = Arc::downgrade(&Arc::new(5)); + /// + /// let _ = Weak::clone(&weak_five); + /// ``` + #[inline] + fn clone(&self) -> Self { + if let Some(inner) = self.inner() { + // See comments in Arc::clone() for why this is relaxed. This can use a + // fetch_add (ignoring the lock) because the weak count is only locked + // where are *no other* weak pointers in existence. (So we can't be + // running this code in that case). + let old_size = inner.weak.fetch_add(1, Relaxed); + + // See comments in Arc::clone() for why we do this (for mem::forget). + if old_size > MAX_REFCOUNT { + abort(); + } + } + + Self { ptr: self.ptr } + } +} + +impl Default for Weak { + /// Constructs a new `Weak`, without allocating memory. + /// Calling [`upgrade`] on the return value always + /// gives [`None`]. + /// + /// [`upgrade`]: Weak::upgrade + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Weak; + /// + /// let empty: Weak = Default::default(); + /// assert!(empty.upgrade().is_none()); + /// ``` + fn default() -> Self { + Self::new() + } +} + +impl Drop for Weak { + /// Drops the `Weak` pointer. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::{Arc, Weak}; + /// + /// struct Foo; + /// + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } + /// } + /// + /// let foo = Arc::new(Foo); + /// let weak_foo = Arc::downgrade(&foo); + /// let other_weak_foo = Weak::clone(&weak_foo); + /// + /// drop(weak_foo); // Doesn't print anything + /// drop(foo); // Prints "dropped!" + /// + /// assert!(other_weak_foo.upgrade().is_none()); + /// ``` + fn drop(&mut self) { + // If we find out that we were the last weak pointer, then its time to + // deallocate the data entirely. See the discussion in Arc::drop() about + // the memory orderings + // + // It's not necessary to check for the locked state here, because the + // weak count can only be locked if there was precisely one weak ref, + // meaning that drop could only subsequently run ON that remaining weak + // ref, which can only happen after the lock is released. + let inner = if let Some(inner) = self.inner() { inner } else { return }; + + if inner.weak.fetch_sub(1, Release) == 1 { + acquire!(inner.weak); + // Free the allocation without dropping T + let ptr = self.ptr.as_ptr() as *mut ArcInner>; + drop(unsafe { Box::from_raw(ptr) }); + } + } +} + +impl PartialEq for Arc { + /// Equality for two `Arc`s. + /// + /// Two `Arc`s are equal if their inner values are equal, even if they are + /// stored in different allocation. + /// + /// If `T` also implements `Eq` (implying reflexivity of equality), + /// two `Arc`s that point to the same allocation are always equal. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five == Arc::new(5)); + /// ``` + #[inline] + fn eq(&self, other: &Self) -> bool { + **self == **other + } + + /// Inequality for two `Arc`s. + /// + /// Two `Arc`s are not equal if their inner values are not equal. + /// + /// If `T` also implements `Eq` (implying reflexivity of equality), + /// two `Arc`s that point to the same value are always equal. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five != Arc::new(6)); + /// ``` + #[allow(clippy::partialeq_ne_impl)] + #[inline] + fn ne(&self, other: &Self) -> bool { + **self != **other + } +} + +impl PartialOrd for Arc { + /// Partial comparison for two `Arc`s. + /// + /// The two are compared by calling `partial_cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// use std::cmp::Ordering; + /// + /// let five = Arc::new(5); + /// + /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6))); + /// ``` + fn partial_cmp(&self, other: &Self) -> Option { + (**self).partial_cmp(&**other) + } + + /// Less-than comparison for two `Arc`s. + /// + /// The two are compared by calling `<` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five < Arc::new(6)); + /// ``` + fn lt(&self, other: &Self) -> bool { + *(*self) < *(*other) + } + + /// 'Less than or equal to' comparison for two `Arc`s. + /// + /// The two are compared by calling `<=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five <= Arc::new(5)); + /// ``` + fn le(&self, other: &Self) -> bool { + *(*self) <= *(*other) + } + + /// Greater-than comparison for two `Arc`s. + /// + /// The two are compared by calling `>` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five > Arc::new(4)); + /// ``` + fn gt(&self, other: &Self) -> bool { + *(*self) > *(*other) + } + + /// 'Greater than or equal to' comparison for two `Arc`s. + /// + /// The two are compared by calling `>=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five >= Arc::new(5)); + /// ``` + fn ge(&self, other: &Self) -> bool { + *(*self) >= *(*other) + } +} +impl Ord for Arc { + /// Comparison for two `Arc`s. + /// + /// The two are compared by calling `cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// use std::cmp::Ordering; + /// + /// let five = Arc::new(5); + /// + /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6))); + /// ``` + fn cmp(&self, other: &Self) -> cmp::Ordering { + (**self).cmp(&**other) + } +} +impl Eq for Arc {} + +impl fmt::Display for Arc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +impl fmt::Debug for Arc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl fmt::Pointer for Arc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&(&**self as *const T), f) + } +} + +impl Default for Arc { + /// Creates a new `Arc`, with the `Default` value for `T`. + /// + /// # Examples + /// + /// ``` + /// use portable_atomic_util::Arc; + /// + /// let x: Arc = Default::default(); + /// assert_eq!(*x, 0); + /// ``` + fn default() -> Self { + Self::new(T::default()) + } +} + +#[cfg(not(portable_atomic_no_min_const_generics))] +impl Default for Arc { + /// Creates an empty str inside an Arc. + /// + /// This may or may not share an allocation with other Arcs. + #[inline] + fn default() -> Self { + let arc: Arc<[u8]> = Arc::default(); + debug_assert!(core::str::from_utf8(&arc).is_ok()); + let ptr = Arc::into_inner_non_null(arc); + unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner) } + } +} + +#[cfg(not(portable_atomic_no_min_const_generics))] +impl Default for Arc<[T]> { + /// Creates an empty `[T]` inside an Arc. + /// + /// This may or may not share an allocation with other Arcs. + #[inline] + fn default() -> Self { + // TODO: we cannot use non-allocation optimization (https://github.com/rust-lang/rust/blob/1.80.0/library/alloc/src/sync.rs#L3449) + // for now since casting Arc<[T; N]> -> Arc<[T]> requires unstable CoerceUnsized. + let arr: [T; 0] = []; + Arc::from(arr) + } +} + +impl Hash for Arc { + fn hash(&self, state: &mut H) { + (**self).hash(state); + } +} + +impl From for Arc { + /// Converts a `T` into an `Arc` + /// + /// The conversion moves the value into a + /// newly allocated `Arc`. It is equivalent to + /// calling `Arc::new(t)`. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let x = 5; + /// let arc = Arc::new(5); + /// + /// assert_eq!(Arc::from(x), arc); + /// ``` + fn from(t: T) -> Self { + Self::new(t) + } +} + +// This just outputs the input as is, but helps avoid syntax checks by old rustc that rejects const generics. +#[cfg(not(portable_atomic_no_min_const_generics))] +macro_rules! items { + ($($tt:tt)*) => { + $($tt)* + }; +} + +#[cfg(not(portable_atomic_no_min_const_generics))] +items! { +impl From<[T; N]> for Arc<[T]> { + /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`. + /// + /// The conversion moves the array into a newly allocated `Arc`. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let original: [i32; 3] = [1, 2, 3]; + /// let shared: Arc<[i32]> = Arc::from(original); + /// assert_eq!(&[1, 2, 3], &shared[..]); + /// ``` + #[inline] + fn from(v: [T; N]) -> Self { + // Casting Arc<[T; N]> -> Arc<[T]> requires unstable CoerceUnsized, so we convert via Box. + // Since the compiler knows the actual size and metadata, the intermediate allocation is + // optimized and generates the same code as when using CoerceUnsized and convert Arc<[T; N]> to Arc<[T]>. + // https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569 + let v: Box<[T]> = Box::<[T; N]>::from(v); + v.into() + } +} +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl From<&[T]> for Arc<[T]> { + /// Allocates a reference-counted slice and fills it by cloning `v`'s items. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let original: &[i32] = &[1, 2, 3]; + /// let shared: Arc<[i32]> = Arc::from(original); + /// assert_eq!(&[1, 2, 3], &shared[..]); + /// ``` + #[inline] + fn from(v: &[T]) -> Self { + unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) } + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl From<&str> for Arc { + /// Allocates a reference-counted `str` and copies `v` into it. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let shared: Arc = Arc::from("eggplant"); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(v: &str) -> Self { + let arc = Arc::<[u8]>::from(v.as_bytes()); + // SAFETY: `str` has the same layout as `[u8]`. + // https://doc.rust-lang.org/nightly/reference/type-layout.html#str-layout + unsafe { Self::from_raw(Arc::into_raw(arc) as *const str) } + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl From for Arc { + /// Allocates a reference-counted `str` and copies `v` into it. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let unique: String = "eggplant".to_owned(); + /// let shared: Arc = Arc::from(unique); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(v: String) -> Self { + Self::from(&v[..]) + } +} + +impl From> for Arc { + /// Move a boxed object to a new, reference-counted allocation. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let unique: Box = Box::from("eggplant"); + /// let shared: Arc = Arc::from(unique); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(v: Box) -> Self { + Self::from_box(v) + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl From> for Arc<[T]> { + /// Allocates a reference-counted slice and moves `v`'s items into it. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let unique: Vec = vec![1, 2, 3]; + /// let shared: Arc<[i32]> = Arc::from(unique); + /// assert_eq!(&[1, 2, 3], &shared[..]); + /// ``` + #[inline] + fn from(v: Vec) -> Self { + unsafe { + let len = v.len(); + let cap = v.capacity(); + let vec_ptr = mem::ManuallyDrop::new(v).as_mut_ptr(); + + let mut arc = Self::new_uninit_slice(len); + let data = Arc::get_mut_unchecked(&mut arc); + ptr::copy_nonoverlapping(vec_ptr, data.as_mut_ptr() as *mut T, len); + + // Create a `Vec` with length 0, to deallocate the buffer + // without dropping its contents or the allocator + let _ = Vec::from_raw_parts(vec_ptr, 0, cap); + + arc.assume_init() + } + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl<'a, B> From> for Arc +where + B: ?Sized + ToOwned, + Arc: From<&'a B> + From, +{ + /// Creates an atomically reference-counted pointer from a clone-on-write + /// pointer by copying its content. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// use std::borrow::Cow; + /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant"); + /// let shared: Arc = Arc::from(cow); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(cow: Cow<'a, B>) -> Self { + match cow { + Cow::Borrowed(s) => Self::from(s), + Cow::Owned(s) => Self::from(s), + } + } +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl From> for Arc<[u8]> { + /// Converts an atomically reference-counted string slice into a byte slice. + /// + /// # Example + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let string: Arc = Arc::from("eggplant"); + /// let bytes: Arc<[u8]> = Arc::from(string); + /// assert_eq!("eggplant".as_bytes(), bytes.as_ref()); + /// ``` + #[inline] + fn from(rc: Arc) -> Self { + // SAFETY: `str` has the same layout as `[u8]`. + // https://doc.rust-lang.org/nightly/reference/type-layout.html#str-layout + unsafe { Self::from_raw(Arc::into_raw(rc) as *const [u8]) } + } +} + +#[cfg(not(portable_atomic_no_min_const_generics))] +items! { +impl core::convert::TryFrom> for Arc<[T; N]> { + type Error = Arc<[T]>; + + fn try_from(boxed_slice: Arc<[T]>) -> Result { + if boxed_slice.len() == N { + let ptr = Arc::into_inner_non_null(boxed_slice); + Ok(unsafe { Self::from_inner(ptr.cast::>()) }) + } else { + Err(boxed_slice) + } + } +} +} + +#[cfg(not(portable_atomic_no_alloc_layout_extras))] +impl core::iter::FromIterator for Arc<[T]> { + /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`. + /// + /// # Performance characteristics + /// + /// ## The general case + /// + /// In the general case, collecting into `Arc<[T]>` is done by first + /// collecting into a `Vec`. That is, when writing the following: + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect(); + /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); + /// ``` + /// + /// this behaves as if we wrote: + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0) + /// .collect::>() // The first set of allocations happens here. + /// .into(); // A second allocation for `Arc<[T]>` happens here. + /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); + /// ``` + /// + /// This will allocate as many times as needed for constructing the `Vec` + /// and then it will allocate once for turning the `Vec` into the `Arc<[T]>`. + /// + /// ## Iterators of known length + /// + /// When your `Iterator` implements `TrustedLen` and is of an exact size, + /// a single allocation will be made for the `Arc<[T]>`. For example: + /// + /// ``` + /// use portable_atomic_util::Arc; + /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here. + /// # assert_eq!(&*evens, &*(0..10).collect::>()); + /// ``` + fn from_iter>(iter: I) -> Self { + iter.into_iter().collect::>().into() + } +} + +impl borrow::Borrow for Arc { + fn borrow(&self) -> &T { + self + } +} + +impl AsRef for Arc { + fn as_ref(&self) -> &T { + self + } +} + +impl Unpin for Arc {} + +/// Gets the pointer to data within the given an `ArcInner`. +/// +/// # Safety +/// +/// `arc` must uphold the safety requirements for `.byte_add(data_offset)`. +/// This is automatically satisfied if it is a pointer to a valid `ArcInner``. +unsafe fn data_ptr(arc: *mut ArcInner, data: &T) -> *mut T { + // SAFETY: the caller must uphold the safety contract. + unsafe { + let offset = data_offset::(data); + strict::byte_add(arc, offset) as *mut T + } +} + +/// Gets the offset within an `ArcInner` for the payload behind a pointer. +fn data_offset(ptr: &T) -> usize { + // Align the unsized value to the end of the ArcInner. + // Because RcBox is repr(C), it will always be the last field in memory. + data_offset_align(align_of_val::(ptr)) +} + +#[inline] +fn data_offset_align(align: usize) -> usize { + let layout = Layout::new::>(); + layout.size() + padding_needed_for(layout, align) +} + +/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,** +/// but will deallocate it (without dropping the value) when dropped. +/// +/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic. +struct UniqueArcUninit { + ptr: NonNull>, + layout_for_value: Layout, +} + +impl UniqueArcUninit { + /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it. + fn new(for_value: &T) -> Self { + let layout = Layout::for_value(for_value); + let ptr = unsafe { Arc::allocate_for_value(for_value) }; + Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout } + } + + /// Returns the pointer to be written into to initialize the [`Arc`]. + fn data_ptr(&mut self) -> *mut T { + let offset = data_offset_align(self.layout_for_value.align()); + unsafe { strict::byte_add(self.ptr.as_ptr(), offset) as *mut T } + } + + /// Upgrade this into a normal [`Arc`]. + /// + /// # Safety + /// + /// The data must have been initialized (by writing to [`Self::data_ptr()`]). + unsafe fn into_arc(self) -> Arc { + let this = ManuallyDrop::new(self); + let ptr = this.ptr.as_ptr(); + + // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible + // for having initialized the data. + unsafe { Arc::from_ptr(ptr) } + } +} + +impl Drop for UniqueArcUninit { + fn drop(&mut self) { + // SAFETY: + // * new() produced a pointer safe to deallocate. + // * We own the pointer unless into_arc() was called, which forgets us. + unsafe { + Global.deallocate( + self.ptr.cast::(), + arc_inner_layout_for_value_layout(self.layout_for_value), + ); + } + } +} + +#[cfg(not(portable_atomic_no_error_in_core))] +use core::error; +#[cfg(all(portable_atomic_no_error_in_core, feature = "std"))] +use std::error; +#[cfg(any(not(portable_atomic_no_error_in_core), feature = "std"))] +impl error::Error for Arc { + #[allow(deprecated)] + fn description(&self) -> &str { + error::Error::description(&**self) + } + #[allow(deprecated)] + fn cause(&self) -> Option<&dyn error::Error> { + error::Error::cause(&**self) + } + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + error::Error::source(&**self) + } +} + +#[cfg(feature = "std")] +mod std_impls { + use super::Arc; + + // TODO: Other trait implementations that are stable but we currently don't provide: + // - alloc::ffi + // - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-From%3C%26CStr%3E-for-Arc%3CCStr%3E + // - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-From%3CCString%3E-for-Arc%3CCStr%3E + // - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-Default-for-Arc%3CCStr%3E + // - Currently, we cannot implement these since CStr layout is not stable. + // - std::ffi + // - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26OsStr%3E-for-Arc%3COsStr%3E + // - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3COsString%3E-for-Arc%3COsStr%3E + // - Currently, we cannot implement these since OsStr layout is not stable. + // - std::path + // - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26Path%3E-for-Arc%3CPath%3E + // - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3CPathBuf%3E-for-Arc%3CPath%3E + // - Currently, we cannot implement these since Path layout is not stable. + + // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsFd-for-Arc%3CT%3E + // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsHandle-for-Arc%3CT%3E + // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsRawFd-for-Arc%3CT%3E + // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsSocket-for-Arc%3CT%3E + // Note: + // - T: ?Sized is currently only allowed on AsFd/AsHandle: https://github.com/rust-lang/rust/pull/114655#issuecomment-1977994288 + // - std doesn't implement AsRawHandle/AsRawSocket for Arc as of Rust 1.77. + #[cfg(not(portable_atomic_no_io_safety))] + #[cfg(unix)] + use std::os::unix::io as fd; + // - std::os::unix::io::AsRawFd and std::os::windows::io::{AsRawHandle, AsRawSocket} are available in all versions + // - std::os::wasi::prelude::AsRawFd requires 1.56 (https://github.com/rust-lang/rust/commit/e555003e6d6b6d71ce5509a6b6c7a15861208d6c) + // - std::os::unix::io::AsFd, std::os::wasi::prelude::AsFd, and std::os::windows::io::{AsHandle, AsSocket} require Rust 1.63 + // - std::os::wasi::io::AsFd requires Rust 1.65 (https://github.com/rust-lang/rust/pull/103308) + // - std::os::fd requires Rust 1.66 (https://github.com/rust-lang/rust/pull/98368) + // - std::os::hermit::io::AsFd requires Rust 1.69 (https://github.com/rust-lang/rust/commit/b5fb4f3d9b1b308d59cab24ef2f9bf23dad948aa) + // - std::os::fd for HermitOS requires Rust 1.81 (https://github.com/rust-lang/rust/pull/126346) + // - std::os::solid::io::AsFd is unstable (solid_ext, https://github.com/rust-lang/rust/pull/115159) + // Note: we don't implement unstable ones. + #[cfg(not(portable_atomic_no_io_safety))] + #[cfg(target_os = "hermit")] + use std::os::hermit::io as fd; + #[cfg(not(portable_atomic_no_io_safety))] + #[cfg(target_os = "wasi")] + use std::os::wasi::prelude as fd; + /// This impl allows implementing traits that require `AsRawFd` on Arc. + /// ``` + /// # #[cfg(target_os = "hermit")] + /// # use std::os::hermit::io::AsRawFd; + /// # #[cfg(target_os = "wasi")] + /// # use std::os::wasi::prelude::AsRawFd; + /// # #[cfg(unix)] + /// # use std::os::unix::io::AsRawFd; + /// use portable_atomic_util::Arc; + /// use std::net::UdpSocket; + /// + /// trait MyTrait: AsRawFd {} + /// impl MyTrait for Arc {} + /// ``` + // AsRawFd has been stable before io_safety, but this impl was added after io_safety: https://github.com/rust-lang/rust/pull/97437 + #[cfg(not(portable_atomic_no_io_safety))] + #[cfg(any(unix, target_os = "hermit", target_os = "wasi"))] + impl fd::AsRawFd for Arc { + #[inline] + fn as_raw_fd(&self) -> fd::RawFd { + (**self).as_raw_fd() + } + } + /// This impl allows implementing traits that require `AsFd` on Arc. + /// ``` + /// # #[cfg(target_os = "hermit")] + /// # use std::os::hermit::io::AsFd; + /// # #[cfg(target_os = "wasi")] + /// # use std::os::wasi::prelude::AsFd; + /// # #[cfg(unix)] + /// # use std::os::unix::io::AsFd; + /// use portable_atomic_util::Arc; + /// use std::net::UdpSocket; + /// + /// trait MyTrait: AsFd {} + /// impl MyTrait for Arc {} + /// ``` + #[cfg(not(portable_atomic_no_io_safety))] + #[cfg(any(unix, target_os = "hermit", target_os = "wasi"))] + impl fd::AsFd for Arc { + #[inline] + fn as_fd(&self) -> fd::BorrowedFd<'_> { + (**self).as_fd() + } + } + /// This impl allows implementing traits that require `AsHandle` on Arc. + /// ``` + /// # use std::os::windows::io::AsHandle; + /// use portable_atomic_util::Arc; + /// use std::fs::File; + /// + /// trait MyTrait: AsHandle {} + /// impl MyTrait for Arc {} + /// ``` + #[cfg(not(portable_atomic_no_io_safety))] + #[cfg(windows)] + impl std::os::windows::io::AsHandle for Arc { + #[inline] + fn as_handle(&self) -> std::os::windows::io::BorrowedHandle<'_> { + (**self).as_handle() + } + } + /// This impl allows implementing traits that require `AsSocket` on Arc. + /// ``` + /// # use std::os::windows::io::AsSocket; + /// use portable_atomic_util::Arc; + /// use std::net::UdpSocket; + /// + /// trait MyTrait: AsSocket {} + /// impl MyTrait for Arc {} + /// ``` + #[cfg(not(portable_atomic_no_io_safety))] + #[cfg(windows)] + impl std::os::windows::io::AsSocket for Arc { + #[inline] + fn as_socket(&self) -> std::os::windows::io::BorrowedSocket<'_> { + (**self).as_socket() + } + } + + // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Read-for-Arc%3CFile%3E + // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Seek-for-Arc%3CFile%3E + // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Write-for-Arc%3CFile%3E + // Note: From discussions in https://github.com/rust-lang/rust/pull/94748 and relevant, + // TcpStream and UnixStream will likely have similar implementations in the future. + impl std::io::Read for Arc { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + (&**self).read(buf) + } + #[cfg(not(portable_atomic_no_io_vec))] + fn read_vectored( + &mut self, + bufs: &mut [std::io::IoSliceMut<'_>], + ) -> std::io::Result { + (&**self).read_vectored(bufs) + } + // fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> { + // (&**self).read_buf(cursor) + // } + // #[inline] + // fn is_read_vectored(&self) -> bool { + // (&**self).is_read_vectored() + // } + fn read_to_end(&mut self, buf: &mut alloc::vec::Vec) -> std::io::Result { + (&**self).read_to_end(buf) + } + fn read_to_string(&mut self, buf: &mut alloc::string::String) -> std::io::Result { + (&**self).read_to_string(buf) + } + } + impl std::io::Write for Arc { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + (&**self).write(buf) + } + #[cfg(not(portable_atomic_no_io_vec))] + fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result { + (&**self).write_vectored(bufs) + } + // #[inline] + // fn is_write_vectored(&self) -> bool { + // (&**self).is_write_vectored() + // } + #[inline] + fn flush(&mut self) -> std::io::Result<()> { + (&**self).flush() + } + } + impl std::io::Seek for Arc { + fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { + (&**self).seek(pos) + } + } +} + +use self::clone::CloneToUninit; +mod clone { + use core::ptr; + #[cfg(not(portable_atomic_no_maybe_uninit))] + use core::{ + mem::{self, MaybeUninit}, + slice, + }; + + // Based on unstable core::clone::CloneToUninit. + // This trait is private and cannot be implemented for types outside of `portable-atomic-util`. + #[doc(hidden)] // private API + pub unsafe trait CloneToUninit { + unsafe fn clone_to_uninit(&self, dst: *mut Self); + } + unsafe impl CloneToUninit for T { + #[inline] + unsafe fn clone_to_uninit(&self, dst: *mut Self) { + // SAFETY: we're calling a specialization with the same contract + unsafe { clone_one(self, dst) } + } + } + #[cfg(not(portable_atomic_no_maybe_uninit))] + unsafe impl CloneToUninit for [T] { + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + unsafe fn clone_to_uninit(&self, dst: *mut Self) { + // SAFETY: we're calling a specialization with the same contract + unsafe { clone_slice(self, dst) } + } + } + #[cfg(not(portable_atomic_no_maybe_uninit))] + unsafe impl CloneToUninit for str { + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + unsafe fn clone_to_uninit(&self, dst: *mut Self) { + // SAFETY: str is just a [u8] with UTF-8 invariant + unsafe { self.as_bytes().clone_to_uninit(dst as *mut [u8]) } + } + } + // Note: Currently, we cannot implement this for CStr/OsStr/Path since theirs layout is not stable. + + #[inline] + unsafe fn clone_one(src: &T, dst: *mut T) { + // SAFETY: The safety conditions of clone_to_uninit() are a superset of those of + // ptr::write(). + unsafe { + // We hope the optimizer will figure out to create the cloned value in-place, + // skipping ever storing it on the stack and the copy to the destination. + ptr::write(dst, src.clone()); + } + } + #[cfg(not(portable_atomic_no_maybe_uninit))] + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + unsafe fn clone_slice(src: &[T], dst: *mut [T]) { + let len = src.len(); + + // SAFETY: The produced `&mut` is valid because: + // * The caller is obligated to provide a pointer which is valid for writes. + // * All bytes pointed to are in MaybeUninit, so we don't care about the memory's + // initialization status. + let uninit_ref = unsafe { &mut *(dst as *mut [MaybeUninit]) }; + + // This is the most likely mistake to make, so check it as a debug assertion. + debug_assert_eq!( + len, + uninit_ref.len(), + "clone_to_uninit() source and destination must have equal lengths", + ); + + // Copy the elements + let mut initializing = InitializingSlice::from_fully_uninit(uninit_ref); + for element_ref in src { + // If the clone() panics, `initializing` will take care of the cleanup. + initializing.push(element_ref.clone()); + } + // If we reach here, then the entire slice is initialized, and we've satisfied our + // responsibilities to the caller. Disarm the cleanup guard by forgetting it. + mem::forget(initializing); + } + + /// Ownership of a collection of values stored in a non-owned `[MaybeUninit]`, some of which + /// are not yet initialized. This is sort of like a `Vec` that doesn't own its allocation. + /// Its responsibility is to provide cleanup on unwind by dropping the values that *are* + /// initialized, unless disarmed by forgetting. + /// + /// This is a helper for `impl CloneToUninit for [T]`. + #[cfg(not(portable_atomic_no_maybe_uninit))] + struct InitializingSlice<'a, T> { + data: &'a mut [MaybeUninit], + /// Number of elements of `*self.data` that are initialized. + initialized_len: usize, + } + #[cfg(not(portable_atomic_no_maybe_uninit))] + impl<'a, T> InitializingSlice<'a, T> { + #[inline] + fn from_fully_uninit(data: &'a mut [MaybeUninit]) -> Self { + Self { data, initialized_len: 0 } + } + /// Push a value onto the end of the initialized part of the slice. + /// + /// # Panics + /// + /// Panics if the slice is already fully initialized. + #[inline] + fn push(&mut self, value: T) { + self.data[self.initialized_len] = MaybeUninit::new(value); + self.initialized_len += 1; + } + } + #[cfg(not(portable_atomic_no_maybe_uninit))] + impl Drop for InitializingSlice<'_, T> { + #[cold] // will only be invoked on unwind + fn drop(&mut self) { + let initialized_slice = unsafe { + slice::from_raw_parts_mut(self.data.as_mut_ptr() as *mut T, self.initialized_len) + }; + // SAFETY: + // * the pointer is valid because it was made from a mutable reference + // * `initialized_len` counts the initialized elements as an invariant of this type, + // so each of the pointed-to elements is initialized and may be dropped. + unsafe { + ptr::drop_in_place::<[T]>(initialized_slice); + } + } + } +} + +// Based on unstable Layout::padding_needed_for. +#[must_use] +#[inline] +fn padding_needed_for(layout: Layout, align: usize) -> usize { + let len = layout.size(); + + // Rounded up value is: + // len_rounded_up = (len + align - 1) & !(align - 1); + // and then we return the padding difference: `len_rounded_up - len`. + // + // We use modular arithmetic throughout: + // + // 1. align is guaranteed to be > 0, so align - 1 is always + // valid. + // + // 2. `len + align - 1` can overflow by at most `align - 1`, + // so the &-mask with `!(align - 1)` will ensure that in the + // case of overflow, `len_rounded_up` will itself be 0. + // Thus the returned padding, when added to `len`, yields 0, + // which trivially satisfies the alignment `align`. + // + // (Of course, attempts to allocate blocks of memory whose + // size and padding overflow in the above manner should cause + // the allocator to yield an error anyway.) + + let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); + len_rounded_up.wrapping_sub(len) +} + +// Based on Layout::pad_to_align stabilized in Rust 1.44. +#[must_use] +#[inline] +fn pad_to_align(layout: Layout) -> Layout { + let pad = padding_needed_for(layout, layout.align()); + // This cannot overflow. Quoting from the invariant of Layout: + // > `size`, when rounded up to the nearest multiple of `align`, + // > must not overflow isize (i.e., the rounded value must be + // > less than or equal to `isize::MAX`) + let new_size = layout.size() + pad; + + // SAFETY: padded size is guaranteed to not exceed `isize::MAX`. + unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) } +} + +// Based on Layout::extend stabilized in Rust 1.44. +#[inline] +fn extend_layout(layout: Layout, next: Layout) -> Option<(Layout, usize)> { + let new_align = cmp::max(layout.align(), next.align()); + let pad = padding_needed_for(layout, next.align()); + + let offset = layout.size().checked_add(pad)?; + let new_size = offset.checked_add(next.size())?; + + // The safe constructor is called here to enforce the isize size limit. + let layout = Layout::from_size_align(new_size, new_align).ok()?; + Some((layout, offset)) +} + +#[cfg(feature = "std")] +use std::process::abort; +#[cfg(not(feature = "std"))] +#[cold] +fn abort() -> ! { + struct Abort; + impl Drop for Abort { + fn drop(&mut self) { + panic!(); + } + } + + let _abort = Abort; + panic!("abort") +} + +fn is_dangling(ptr: *const T) -> bool { + ptr as *const () as usize == usize::MAX +} + +// Based on unstable alloc::alloc::Global. +// +// Note: unlike alloc::alloc::Global that returns NonNull<[u8]>, +// this returns NonNull. +struct Global; +#[allow(clippy::unused_self)] +impl Global { + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + fn allocate(self, layout: Layout) -> Option> { + // Layout::dangling is unstable + #[must_use] + #[inline] + fn dangling(layout: Layout) -> NonNull { + // SAFETY: align is guaranteed to be non-zero + unsafe { NonNull::new_unchecked(strict::without_provenance_mut::(layout.align())) } + } + + match layout.size() { + 0 => Some(dangling(layout)), + // SAFETY: `layout` is non-zero in size, + _size => unsafe { + let raw_ptr = alloc::alloc::alloc(layout); + NonNull::new(raw_ptr) + }, + } + } + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + // SAFETY: `layout` is non-zero in size, + // other conditions must be upheld by the caller + unsafe { alloc::alloc::dealloc(ptr.as_ptr(), layout) } + } + } +} + +// TODO: use stabilized core::ptr strict_provenance helpers https://github.com/rust-lang/rust/pull/130350 +mod strict { + #[inline(always)] + #[must_use] + pub(super) const fn without_provenance_mut(addr: usize) -> *mut T { + // An int-to-pointer transmute currently has exactly the intended semantics: it creates a + // pointer without provenance. Note that this is *not* a stable guarantee about transmute + // semantics, it relies on sysroot crates having special status. + // SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that + // pointer). + #[cfg(miri)] + unsafe { + core::mem::transmute(addr) + } + // const transmute requires Rust 1.56. + #[cfg(not(miri))] + { + addr as *mut T + } + } + + /// Creates a new pointer with the metadata of `other`. + #[inline] + #[must_use] + pub(super) fn with_metadata_of(this: *mut T, mut other: *mut U) -> *mut U { + let target = &mut other as *mut *mut U as *mut *mut u8; + + // SAFETY: In case of a thin pointer, this operations is identical + // to a simple assignment. In case of a fat pointer, with the current + // fat pointer layout implementation, the first field of such a + // pointer is always the data pointer, which is likewise assigned. + unsafe { *target = this as *mut u8 }; + other + } + + // Based on ::byte_add stabilized in Rust 1.75. + #[inline] + #[must_use] + pub(super) unsafe fn byte_add(ptr: *mut T, count: usize) -> *mut T { + // SAFETY: the caller must uphold the safety contract for `add`. + unsafe { with_metadata_of((ptr as *mut u8).add(count), ptr) } + } + + // Based on ::byte_sub stabilized in Rust 1.75. + #[inline] + #[must_use] + pub(super) unsafe fn byte_sub(ptr: *mut T, count: usize) -> *mut T { + // SAFETY: the caller must uphold the safety contract for `sub`. + unsafe { with_metadata_of((ptr as *mut u8).sub(count), ptr) } + } +} diff --git a/external/vendor/portable-atomic-util/src/lib.rs b/external/vendor/portable-atomic-util/src/lib.rs new file mode 100644 index 0000000000..64d9ab5992 --- /dev/null +++ b/external/vendor/portable-atomic-util/src/lib.rs @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +/*! + +Synchronization primitives built with [portable-atomic]. + +- Provide `Arc`. (optional, requires the `std` or `alloc` feature) +- Provide `task::Wake`. (optional, requires the `std` or `alloc` feature) + + +See [#1] for other primitives being considered for addition to this crate. + +## Optional features + +- **`std`**
+ Use `std`. + + Note: + - This implicitly enables the `alloc` feature. + +- **`alloc`**
+ Use `alloc`. + + Note: + - The MSRV when this feature is enabled and the `std` feature is *not* enabled is Rust 1.36 that `alloc` crate stabilized. + + + +[portable-atomic]: https://github.com/taiki-e/portable-atomic +[#1]: https://github.com/taiki-e/portable-atomic/issues/1 + +## Optional cfg + +One of the ways to enable cfg is to set [rustflags in the cargo config](https://doc.rust-lang.org/cargo/reference/config.html#targettriplerustflags): + +```toml +# .cargo/config.toml +[target.] +rustflags = ["--cfg", "portable_atomic_unstable_coerce_unsized"] +``` + +Or set environment variable: + +```sh +RUSTFLAGS="--cfg portable_atomic_unstable_coerce_unsized" cargo ... +``` + +- **`--cfg portable_atomic_unstable_coerce_unsized`**
+ Support coercing of `Arc` to `Arc` as in `std::sync::Arc`. + + + + This cfg requires Rust nightly because this coercing requires [unstable `CoerceUnsized` trait](https://doc.rust-lang.org/nightly/core/ops/trait.CoerceUnsized.html). + + See [this issue comment](https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569) for another known workaround. + + **Note:** This cfg is unstable and outside of the normal semver guarantees and minor or patch versions of portable-atomic-util may make breaking changes to them at any time. + + +*/ + +#![no_std] +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_variables) + ) +))] +#![cfg_attr(not(portable_atomic_no_unsafe_op_in_unsafe_fn), warn(unsafe_op_in_unsafe_fn))] // unsafe_op_in_unsafe_fn requires Rust 1.52 +#![cfg_attr(portable_atomic_no_unsafe_op_in_unsafe_fn, allow(unused_unsafe))] +#![warn( + // Lints that may help when writing public library. + missing_debug_implementations, + missing_docs, + clippy::alloc_instead_of_core, + clippy::exhaustive_enums, + clippy::exhaustive_structs, + clippy::impl_trait_in_params, + // clippy::missing_inline_in_public_items, + clippy::std_instead_of_alloc, + clippy::std_instead_of_core, +)] +#![allow(clippy::inline_always)] +// docs.rs only (cfg is enabled by docs.rs, not build script) +#![cfg_attr(docsrs, feature(doc_cfg))] +// Enable custom unsized coercions if the user explicitly opts-in to unstable cfg +#![cfg_attr(portable_atomic_unstable_coerce_unsized, feature(coerce_unsized, unsize))] + +#[cfg(all(feature = "alloc", not(portable_atomic_no_alloc)))] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; +#[cfg(all(feature = "std", portable_atomic_no_alloc))] +extern crate std as alloc; + +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +mod arc; +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +pub use self::arc::{Arc, Weak}; + +#[cfg(not(portable_atomic_no_futures_api))] +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +pub mod task; diff --git a/external/vendor/portable-atomic-util/src/task.rs b/external/vendor/portable-atomic-util/src/task.rs new file mode 100644 index 0000000000..57c0a5f0f7 --- /dev/null +++ b/external/vendor/portable-atomic-util/src/task.rs @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Types and Traits for working with asynchronous tasks. + +// This module is based on alloc::task::Wake. +// +// The code has been adjusted to work with stable Rust. +// +// Source: https://github.com/rust-lang/rust/blob/1.80.0/library/alloc/src/task.rs. +// +// Copyright & License of the original code: +// - https://github.com/rust-lang/rust/blob/1.80.0/COPYRIGHT +// - https://github.com/rust-lang/rust/blob/1.80.0/LICENSE-APACHE +// - https://github.com/rust-lang/rust/blob/1.80.0/LICENSE-MIT + +use core::{ + mem::ManuallyDrop, + task::{RawWaker, RawWakerVTable, Waker}, +}; + +use crate::Arc; + +/// The implementation of waking a task on an executor. +/// +/// This is an equivalent to [`std::task::Wake`], but using [`portable_atomic_util::Arc`](crate::Arc) +/// as a reference-counted pointer. See the documentation for [`std::task::Wake`] for more details. +/// +/// **Note:** Unlike `std::task::Wake`, all methods take `this:` instead of `self:`. +/// This is because using `portable_atomic_util::Arc` as a receiver requires the +/// [unstable `arbitrary_self_types` feature](https://github.com/rust-lang/rust/issues/44874). +/// +/// # Examples +/// +/// A basic `block_on` function that takes a future and runs it to completion on +/// the current thread. +/// +/// **Note:** This example trades correctness for simplicity. In order to prevent +/// deadlocks, production-grade implementations will also need to handle +/// intermediate calls to `thread::unpark` as well as nested invocations. +/// +/// ``` +/// use portable_atomic_util::{task::Wake, Arc}; +/// use std::{ +/// future::Future, +/// task::{Context, Poll}, +/// thread::{self, Thread}, +/// }; +/// +/// /// A waker that wakes up the current thread when called. +/// struct ThreadWaker(Thread); +/// +/// impl Wake for ThreadWaker { +/// fn wake(this: Arc) { +/// this.0.unpark(); +/// } +/// } +/// +/// /// Run a future to completion on the current thread. +/// fn block_on(fut: impl Future) -> T { +/// // Pin the future so it can be polled. +/// let mut fut = Box::pin(fut); +/// +/// // Create a new context to be passed to the future. +/// let t = thread::current(); +/// let waker = Arc::new(ThreadWaker(t)).into(); +/// let mut cx = Context::from_waker(&waker); +/// +/// // Run the future to completion. +/// loop { +/// match fut.as_mut().poll(&mut cx) { +/// Poll::Ready(res) => return res, +/// Poll::Pending => thread::park(), +/// } +/// } +/// } +/// +/// block_on(async { +/// println!("Hi from inside a future!"); +/// }); +/// ``` +pub trait Wake { + /// Wake this task. + fn wake(this: Arc); + + /// Wake this task without consuming the waker. + /// + /// If an executor supports a cheaper way to wake without consuming the + /// waker, it should override this method. By default, it clones the + /// [`Arc`] and calls [`wake`] on the clone. + /// + /// [`wake`]: Wake::wake + fn wake_by_ref(this: &Arc) { + Self::wake(this.clone()); + } +} + +impl From> for Waker { + /// Use a `Wake`-able type as a `Waker`. + /// + /// No heap allocations or atomic operations are used for this conversion. + fn from(waker: Arc) -> Self { + // SAFETY: This is safe because raw_waker safely constructs + // a RawWaker from Arc. + unsafe { Self::from_raw(raw_waker(waker)) } + } +} + +impl From> for RawWaker { + /// Use a `Wake`-able type as a `RawWaker`. + /// + /// No heap allocations or atomic operations are used for this conversion. + fn from(waker: Arc) -> Self { + raw_waker(waker) + } +} + +// NB: This private function for constructing a RawWaker is used, rather than +// inlining this into the `From> for RawWaker` impl, to ensure that +// the safety of `From> for Waker` does not depend on the correct +// trait dispatch - instead both impls call this function directly and +// explicitly. +#[inline(always)] +fn raw_waker(waker: Arc) -> RawWaker { + // Increment the reference count of the arc to clone it. + // + // The #[inline(always)] is to ensure that raw_waker and clone_waker are + // always generated in the same code generation unit as one another, and + // therefore that the structurally identical const-promoted RawWakerVTable + // within both functions is deduplicated at LLVM IR code generation time. + // This allows optimizing Waker::will_wake to a single pointer comparison of + // the vtable pointers, rather than comparing all four function pointers + // within the vtables. + #[inline(always)] + unsafe fn clone_waker(waker: *const ()) -> RawWaker { + // SAFETY: the caller must uphold the safety contract. + unsafe { Arc::increment_strong_count(waker as *const W) }; + RawWaker::new( + waker, + &RawWakerVTable::new(clone_waker::, wake::, wake_by_ref::, drop_waker::), + ) + } + + // Wake by value, moving the Arc into the Wake::wake function + unsafe fn wake(waker: *const ()) { + // SAFETY: the caller must uphold the safety contract. + let waker = unsafe { Arc::from_raw(waker as *const W) }; + ::wake(waker); + } + + // Wake by reference, wrap the waker in ManuallyDrop to avoid dropping it + unsafe fn wake_by_ref(waker: *const ()) { + // SAFETY: the caller must uphold the safety contract. + let waker = unsafe { ManuallyDrop::new(Arc::from_raw(waker as *const W)) }; + ::wake_by_ref(&waker); + } + + // Decrement the reference count of the Arc on drop + unsafe fn drop_waker(waker: *const ()) { + // SAFETY: the caller must uphold the safety contract. + unsafe { Arc::decrement_strong_count(waker as *const W) }; + } + + RawWaker::new( + Arc::into_raw(waker) as *const (), + &RawWakerVTable::new(clone_waker::, wake::, wake_by_ref::, drop_waker::), + ) +} diff --git a/external/vendor/portable-atomic-util/tests/arc.rs b/external/vendor/portable-atomic-util/tests/arc.rs new file mode 100644 index 0000000000..d9f152cb4d --- /dev/null +++ b/external/vendor/portable-atomic-util/tests/arc.rs @@ -0,0 +1,784 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#![cfg(any(feature = "std", feature = "alloc"))] +#![allow(clippy::undocumented_unsafe_blocks)] + +use std::{borrow::Cow, panic}; + +use portable_atomic_util::{Arc, Weak}; + +#[derive(Debug, PartialEq)] +#[repr(align(128))] +struct Aligned(u32); + +// https://github.com/taiki-e/portable-atomic/issues/37 +#[test] +fn over_aligned() { + let value = Arc::new(Aligned(128)); + let ptr = Arc::into_raw(value); + // SAFETY: `ptr` should always be a valid `Aligned`. + assert_eq!(unsafe { (*ptr).0 }, 128); + // SAFETY: `ptr` is a valid reference to an `Arc`. + let value = unsafe { Arc::from_raw(ptr) }; + assert_eq!(value.0, 128); +} + +#[test] +fn default() { + let v = Arc::<[Aligned]>::default(); + assert_eq!(v[..], []); + let v = Arc::<[()]>::default(); + assert_eq!(v[..], []); + let v = Arc::::default(); + assert_eq!(&v[..], ""); +} + +#[test] +fn cow_from() { + let o = Cow::Owned("abc".to_owned()); + let b = Cow::Borrowed("def"); + let o: Arc = Arc::from(o); + let b: Arc = Arc::from(b); + assert_eq!(&*o, "abc"); + assert_eq!(&*b, "def"); +} + +#[test] +fn make_mut_unsized() { + let mut v: Arc<[i32]> = Arc::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + Arc::make_mut(&mut v)[0] += 10; + assert_eq!(&*v, [11, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + let v1 = Arc::clone(&v); + let v2 = Arc::make_mut(&mut v); + v2[1] += 10; + assert_eq!(&*v1, [11, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + assert_eq!(&*v2, [11, 12, 3, 4, 5, 6, 7, 8, 9, 10]); + assert_eq!(&*v, [11, 12, 3, 4, 5, 6, 7, 8, 9, 10]); + drop(v1); + let w = Arc::downgrade(&v); + Arc::make_mut(&mut v)[2] += 10; + assert_eq!(&*v, [11, 12, 13, 4, 5, 6, 7, 8, 9, 10]); + assert!(w.upgrade().is_none()); +} + +#[test] +fn make_mut_clone_panic() { + struct C(#[allow(dead_code)] Box); + impl Clone for C { + fn clone(&self) -> Self { + panic!() + } + } + let mut v: Arc<[C]> = Arc::from([C(Box::new(1)), C(Box::new(2))]); + let _v = Arc::make_mut(&mut v); + let v1 = Arc::clone(&v); + if !is_panic_abort() { + panic::catch_unwind(panic::AssertUnwindSafe(|| { + let _v = Arc::make_mut(&mut v); + })) + .unwrap_err(); + } + drop(v1); +} + +#[test] +fn weak_dangling() { + let w = Weak::::new(); + let p = Weak::into_raw(w); + let w = unsafe { Weak::from_raw(p) }; + let w2 = Weak::clone(&w); + assert!(w.upgrade().is_none()); + assert!(w2.upgrade().is_none()); +} + +// For -C panic=abort -Z panic_abort_tests: https://github.com/rust-lang/rust/issues/67650 +fn is_panic_abort() -> bool { + build_context::PANIC.contains("abort") +} + +// https://github.com/rust-lang/rust/blob/1.80.0/library/alloc/src/sync/tests.rs +#[allow(clippy::many_single_char_names)] +mod alloc_tests { + use std::{ + convert::TryInto, + sync::{mpsc::channel, Mutex}, + thread, + }; + + use portable_atomic::{ + AtomicBool, AtomicUsize, + Ordering::{Acquire, SeqCst}, + }; + use portable_atomic_util::{Arc, Weak}; + + struct Canary(*mut AtomicUsize); + + impl Drop for Canary { + fn drop(&mut self) { + unsafe { + match *self { + Canary(c) => { + (*c).fetch_add(1, SeqCst); + } + } + } + } + } + + #[test] + #[cfg_attr(target_os = "emscripten", ignore)] + fn manually_share_arc() { + let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let arc_v = Arc::new(v); + + let (tx, rx) = channel(); + + let _t = thread::spawn(move || { + let arc_v: Arc> = rx.recv().unwrap(); + assert_eq!((*arc_v)[3], 4); + }); + + tx.send(arc_v.clone()).unwrap(); + + assert_eq!((*arc_v)[2], 3); + assert_eq!((*arc_v)[4], 5); + } + + #[test] + fn test_arc_get_mut() { + let mut x = Arc::new(3); + *Arc::get_mut(&mut x).unwrap() = 4; + assert_eq!(*x, 4); + let y = x.clone(); + assert!(Arc::get_mut(&mut x).is_none()); + drop(y); + assert!(Arc::get_mut(&mut x).is_some()); + let _w = Arc::downgrade(&x); + assert!(Arc::get_mut(&mut x).is_none()); + } + + #[test] + fn weak_counts() { + assert_eq!(Weak::weak_count(&Weak::::new()), 0); + assert_eq!(Weak::strong_count(&Weak::::new()), 0); + + let a = Arc::new(0); + let w = Arc::downgrade(&a); + assert_eq!(Weak::strong_count(&w), 1); + assert_eq!(Weak::weak_count(&w), 1); + let w2 = w.clone(); + assert_eq!(Weak::strong_count(&w), 1); + assert_eq!(Weak::weak_count(&w), 2); + assert_eq!(Weak::strong_count(&w2), 1); + assert_eq!(Weak::weak_count(&w2), 2); + drop(w); + assert_eq!(Weak::strong_count(&w2), 1); + assert_eq!(Weak::weak_count(&w2), 1); + let a2 = a.clone(); + assert_eq!(Weak::strong_count(&w2), 2); + assert_eq!(Weak::weak_count(&w2), 1); + drop(a2); + drop(a); + assert_eq!(Weak::strong_count(&w2), 0); + assert_eq!(Weak::weak_count(&w2), 0); + drop(w2); + } + + #[test] + fn try_unwrap() { + let x = Arc::new(3); + assert_eq!(Arc::try_unwrap(x), Ok(3)); + let x = Arc::new(4); + let _y = x.clone(); + assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); + let x = Arc::new(5); + let _w = Arc::downgrade(&x); + assert_eq!(Arc::try_unwrap(x), Ok(5)); + } + + #[test] + fn into_inner() { + for _ in 0..100 + // ^ Increase chances of hitting potential race conditions + { + let x = Arc::new(3); + let y = Arc::clone(&x); + let r_thread = std::thread::spawn(|| Arc::into_inner(x)); + let s_thread = std::thread::spawn(|| Arc::into_inner(y)); + let r = r_thread.join().expect("r_thread panicked"); + let s = s_thread.join().expect("s_thread panicked"); + assert!( + matches!((r, s), (None, Some(3)) | (Some(3), None)), + "assertion failed: unexpected result `{:?}`\ + \n expected `(None, Some(3))` or `(Some(3), None)`", + (r, s), + ); + } + + let x = Arc::new(3); + assert_eq!(Arc::into_inner(x), Some(3)); + + let x = Arc::new(4); + let y = Arc::clone(&x); + assert_eq!(Arc::into_inner(x), None); + assert_eq!(Arc::into_inner(y), Some(4)); + + let x = Arc::new(5); + let _w = Arc::downgrade(&x); + assert_eq!(Arc::into_inner(x), Some(5)); + } + + #[test] + fn into_from_raw() { + let x = Arc::new(Box::new("hello")); + let y = x.clone(); + + let x_ptr = Arc::into_raw(x); + drop(y); + unsafe { + assert_eq!(**x_ptr, "hello"); + + let x = Arc::from_raw(x_ptr); + assert_eq!(**x, "hello"); + + assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello")); + } + } + + #[test] + fn test_into_from_raw_unsized() { + use std::fmt::Display; + + let arc: Arc = Arc::from("foo"); + + let ptr = Arc::into_raw(arc.clone()); + let arc2 = unsafe { Arc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }, "foo"); + assert_eq!(arc, arc2); + + #[cfg(portable_atomic_unstable_coerce_unsized)] + let arc: Arc = Arc::new(123); + // TODO: This is a workaround in case CoerceUnsized is not available - remove this once it is no longer needed + #[cfg(not(portable_atomic_unstable_coerce_unsized))] + let arc: Arc = Arc::from(Box::new(123) as Box); + + let ptr = Arc::into_raw(arc.clone()); + let arc2 = unsafe { Arc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }.to_string(), "123"); + assert_eq!(arc2.to_string(), "123"); + } + + #[test] + fn into_from_weak_raw() { + let x = Arc::new(Box::new("hello")); + let y = Arc::downgrade(&x); + + let y_ptr = Weak::into_raw(y); + unsafe { + assert_eq!(**y_ptr, "hello"); + + let y = Weak::from_raw(y_ptr); + let y_up = Weak::upgrade(&y).unwrap(); + assert_eq!(**y_up, "hello"); + drop(y_up); + + assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello")); + } + } + + // TODO: See Weak::from_raw + // #[test] + // fn test_into_from_weak_raw_unsized() { + // use std::fmt::Display; + + // let arc: Arc = Arc::from("foo"); + // let weak: Weak = Arc::downgrade(&arc); + + // let ptr = Weak::into_raw(weak.clone()); + // let weak2 = unsafe { Weak::from_raw(ptr) }; + + // assert_eq!(unsafe { &*ptr }, "foo"); + // assert!(weak.ptr_eq(&weak2)); + + // // TODO: CoerceUnsized is needed to cast to Arc + // // (may be possible to support this with portable_atomic_unstable_coerce_unsized cfg option) + // // let arc: Arc = Arc::new(123); + // let arc: Arc = Arc::from(Box::new(123) as Box); + // let weak: Weak = Arc::downgrade(&arc); + + // let ptr = Weak::into_raw(weak.clone()); + // let weak2 = unsafe { Weak::from_raw(ptr) }; + + // assert_eq!(unsafe { &*ptr }.to_string(), "123"); + // assert!(weak.ptr_eq(&weak2)); + // } + + #[test] + fn test_cow_arc_clone_make_mut() { + let mut cow0 = Arc::new(75); + let mut cow1 = cow0.clone(); + let mut cow2 = cow1.clone(); + + assert!(75 == *Arc::make_mut(&mut cow0)); + assert!(75 == *Arc::make_mut(&mut cow1)); + assert!(75 == *Arc::make_mut(&mut cow2)); + + *Arc::make_mut(&mut cow0) += 1; + *Arc::make_mut(&mut cow1) += 2; + *Arc::make_mut(&mut cow2) += 3; + + assert!(76 == *cow0); + assert!(77 == *cow1); + assert!(78 == *cow2); + + // none should point to the same backing memory + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 != *cow2); + } + + #[test] + fn test_cow_arc_clone_unique2() { + let mut cow0 = Arc::new(75); + let cow1 = cow0.clone(); + let cow2 = cow1.clone(); + + assert!(75 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + *Arc::make_mut(&mut cow0) += 1; + assert!(76 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + // cow1 and cow2 should share the same contents + // cow0 should have a unique reference + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 == *cow2); + } + + #[test] + fn test_cow_arc_clone_weak() { + let mut cow0 = Arc::new(75); + let cow1_weak = Arc::downgrade(&cow0); + + assert!(75 == *cow0); + assert!(75 == *cow1_weak.upgrade().unwrap()); + + *Arc::make_mut(&mut cow0) += 1; + + assert!(76 == *cow0); + assert!(cow1_weak.upgrade().is_none()); + } + + #[test] + fn test_live() { + let x = Arc::new(5); + let y = Arc::downgrade(&x); + assert!(y.upgrade().is_some()); + } + + #[test] + fn test_dead() { + let x = Arc::new(5); + let y = Arc::downgrade(&x); + drop(x); + assert!(y.upgrade().is_none()); + } + + #[test] + fn weak_self_cyclic() { + struct Cycle { + x: Mutex>>, + } + + let a = Arc::new(Cycle { x: Mutex::new(None) }); + let b = Arc::downgrade(&a.clone()); + *a.x.lock().unwrap() = Some(b); + + // hopefully we don't double-free (or leak)... + } + + #[test] + fn drop_arc() { + let mut canary = AtomicUsize::new(0); + let x = Arc::new(Canary(&mut canary as *mut AtomicUsize)); + drop(x); + assert!(canary.load(Acquire) == 1); + } + + #[test] + fn drop_arc_weak() { + let mut canary = AtomicUsize::new(0); + let arc = Arc::new(Canary(&mut canary as *mut AtomicUsize)); + let arc_weak = Arc::downgrade(&arc); + assert!(canary.load(Acquire) == 0); + drop(arc); + assert!(canary.load(Acquire) == 1); + drop(arc_weak); + } + + #[test] + fn test_strong_count() { + let a = Arc::new(0); + assert!(Arc::strong_count(&a) == 1); + let w = Arc::downgrade(&a); + assert!(Arc::strong_count(&a) == 1); + let b = w.upgrade().expect(""); + assert!(Arc::strong_count(&b) == 2); + assert!(Arc::strong_count(&a) == 2); + drop(w); + drop(a); + assert!(Arc::strong_count(&b) == 1); + let c = b.clone(); + assert!(Arc::strong_count(&b) == 2); + assert!(Arc::strong_count(&c) == 2); + } + + #[test] + fn test_weak_count() { + let a = Arc::new(0); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 0); + let w = Arc::downgrade(&a); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 1); + let x = w.clone(); + assert!(Arc::weak_count(&a) == 2); + drop(w); + drop(x); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 0); + let c = a.clone(); + assert!(Arc::strong_count(&a) == 2); + assert!(Arc::weak_count(&a) == 0); + let d = Arc::downgrade(&c); + assert!(Arc::weak_count(&c) == 1); + assert!(Arc::strong_count(&c) == 2); + + drop(a); + drop(c); + drop(d); + } + + #[test] + fn show_arc() { + let a = Arc::new(5); + assert_eq!(format!("{:?}", a), "5"); + } + + // Make sure deriving works with Arc + #[allow(dead_code)] + #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)] + struct Foo { + inner: Arc, + } + + #[test] + fn test_unsized() { + #[cfg(portable_atomic_unstable_coerce_unsized)] + let x: Arc<[i32]> = Arc::new([1, 2, 3]); + // TODO: This is a workaround in case CoerceUnsized is not available - remove this once it is no longer needed + #[cfg(not(portable_atomic_unstable_coerce_unsized))] + let x: Arc<[i32]> = Arc::from(Box::new([1, 2, 3]) as Box<[i32]>); + assert_eq!(format!("{:?}", x), "[1, 2, 3]"); + let y = Arc::downgrade(&x.clone()); + drop(x); + assert!(y.upgrade().is_none()); + } + + #[test] + fn test_maybe_thin_unsized() { + // If/when custom thin DSTs exist, this test should be updated to use one + use std::ffi::{CStr, CString}; + + let x: Arc = Arc::from(CString::new("swordfish").unwrap().into_boxed_c_str()); + assert_eq!(format!("{:?}", x), "\"swordfish\""); + let y: Weak = Arc::downgrade(&x); + drop(x); + + // At this point, the weak points to a dropped DST + assert!(y.upgrade().is_none()); + // But we still need to be able to get the alloc layout to drop. + // CStr has no drop glue, but custom DSTs might, and need to work. + drop(y); + } + + #[test] + fn test_from_owned() { + let foo = 123; + let foo_arc = Arc::from(foo); + assert!(123 == *foo_arc); + } + + #[test] + fn test_new_weak() { + let foo: Weak = Weak::new(); + assert!(foo.upgrade().is_none()); + } + + #[test] + fn test_ptr_eq() { + let five = Arc::new(5); + let same_five = five.clone(); + let other_five = Arc::new(5); + + assert!(Arc::ptr_eq(&five, &same_five)); + assert!(!Arc::ptr_eq(&five, &other_five)); + } + + #[test] + #[cfg_attr(target_os = "emscripten", ignore)] + fn test_weak_count_locked() { + let mut a = Arc::new(AtomicBool::new(false)); + let a2 = a.clone(); + let t = thread::spawn(move || { + // Miri is too slow + let count = if cfg!(miri) { 1_000 } else { 1_000_000 }; + for _i in 0..count { + Arc::get_mut(&mut a); + } + a.store(true, SeqCst); + }); + + while !a2.load(SeqCst) { + let n = Arc::weak_count(&a2); + assert!(n < 2, "bad weak count: {}", n); + #[cfg(miri)] // Miri's scheduler does not guarantee liveness, and thus needs this hint. + std::hint::spin_loop(); + } + t.join().unwrap(); + } + + #[test] + fn test_from_str() { + let r: Arc = Arc::from("foo"); + + assert_eq!(&r[..], "foo"); + } + + #[test] + fn test_copy_from_slice() { + let s: &[u32] = &[1, 2, 3]; + let r: Arc<[u32]> = Arc::from(s); + + assert_eq!(&r[..], [1, 2, 3]); + } + + #[test] + fn test_clone_from_slice() { + #[derive(Clone, Debug, Eq, PartialEq)] + struct X(u32); + + let s: &[X] = &[X(1), X(2), X(3)]; + let r: Arc<[X]> = Arc::from(s); + + assert_eq!(&r[..], s); + } + + #[test] + #[should_panic = "test_clone_from_slice_panic"] + fn test_clone_from_slice_panic() { + struct Fail(u32, String); + + impl Clone for Fail { + fn clone(&self) -> Fail { + if self.0 == 2 { + panic!("test_clone_from_slice_panic"); + } + Fail(self.0, self.1.clone()) + } + } + + let s: &[Fail] = + &[Fail(0, "foo".to_owned()), Fail(1, "bar".to_owned()), Fail(2, "baz".to_owned())]; + + // Should panic, but not cause memory corruption + let _r: Arc<[Fail]> = Arc::from(s); + } + + #[test] + fn test_from_box() { + let b: Box = Box::new(123); + let r: Arc = Arc::from(b); + + assert_eq!(*r, 123); + } + + #[test] + fn test_from_box_str() { + let s = String::from("foo").into_boxed_str(); + let r: Arc = Arc::from(s); + + assert_eq!(&r[..], "foo"); + } + + #[test] + fn test_from_box_slice() { + let s = vec![1, 2, 3].into_boxed_slice(); + let r: Arc<[u32]> = Arc::from(s); + + assert_eq!(&r[..], [1, 2, 3]); + } + + #[test] + fn test_from_box_trait() { + use std::fmt::Display; + + let b: Box = Box::new(123); + let r: Arc = Arc::from(b); + + assert_eq!(r.to_string(), "123"); + } + + #[test] + fn test_from_box_trait_zero_sized() { + use std::fmt::Debug; + + let b: Box = Box::new(()); + let r: Arc = Arc::from(b); + + assert_eq!(format!("{:?}", r), "()"); + } + + #[test] + fn test_from_vec() { + let v = vec![1, 2, 3]; + let r: Arc<[u32]> = Arc::from(v); + + assert_eq!(&r[..], [1, 2, 3]); + } + + #[test] + fn test_downcast() { + use std::any::Any; + + #[cfg(portable_atomic_unstable_coerce_unsized)] + let r1: Arc = Arc::new(i32::MAX); + // TODO: This is a workaround in case CoerceUnsized is not available - remove this once it is no longer needed + #[cfg(not(portable_atomic_unstable_coerce_unsized))] + let r1: Arc = + Arc::from(Box::new(i32::MAX) as Box); + + #[cfg(portable_atomic_unstable_coerce_unsized)] + let r2: Arc = Arc::new("abc"); + // TODO: This is a workaround in case CoerceUnsized is not available - remove this once it is no longer needed + #[cfg(not(portable_atomic_unstable_coerce_unsized))] + let r2: Arc = + Arc::from(Box::new("abc") as Box); + + assert!(r1.clone().downcast::().is_err()); + + let r1i32 = r1.downcast::(); + assert!(r1i32.is_ok()); + assert_eq!(r1i32.unwrap(), Arc::new(i32::MAX)); + + assert!(r2.clone().downcast::().is_err()); + + let r2str = r2.downcast::<&'static str>(); + assert!(r2str.is_ok()); + assert_eq!(r2str.unwrap(), Arc::new("abc")); + } + + #[test] + fn test_array_from_slice() { + let v = vec![1, 2, 3]; + let r: Arc<[u32]> = Arc::from(v); + + let a: Result, _> = r.clone().try_into(); + assert!(a.is_ok()); + + let a: Result, _> = r.clone().try_into(); + assert!(a.is_err()); + } + + #[test] + fn test_arc_cyclic_with_zero_refs() { + struct ZeroRefs { + inner: Weak, + } + let zero_refs = Arc::new_cyclic(|inner| { + assert_eq!(inner.strong_count(), 0); + assert!(inner.upgrade().is_none()); + ZeroRefs { inner: Weak::new() } + }); + + assert_eq!(Arc::strong_count(&zero_refs), 1); + assert_eq!(Arc::weak_count(&zero_refs), 0); + assert_eq!(zero_refs.inner.strong_count(), 0); + assert_eq!(zero_refs.inner.weak_count(), 0); + } + + #[test] + fn test_arc_new_cyclic_one_ref() { + struct OneRef { + inner: Weak, + } + let one_ref = Arc::new_cyclic(|inner| { + assert_eq!(inner.strong_count(), 0); + assert!(inner.upgrade().is_none()); + OneRef { inner: inner.clone() } + }); + + assert_eq!(Arc::strong_count(&one_ref), 1); + assert_eq!(Arc::weak_count(&one_ref), 1); + + let one_ref2 = Weak::upgrade(&one_ref.inner).unwrap(); + assert!(Arc::ptr_eq(&one_ref, &one_ref2)); + + assert_eq!(Arc::strong_count(&one_ref), 2); + assert_eq!(Arc::weak_count(&one_ref), 1); + } + + #[test] + fn test_arc_cyclic_two_refs() { + struct TwoRefs { + inner1: Weak, + inner2: Weak, + } + let two_refs = Arc::new_cyclic(|inner| { + assert_eq!(inner.strong_count(), 0); + assert!(inner.upgrade().is_none()); + + let inner1 = inner.clone(); + let inner2 = inner1.clone(); + + TwoRefs { inner1, inner2 } + }); + + assert_eq!(Arc::strong_count(&two_refs), 1); + assert_eq!(Arc::weak_count(&two_refs), 2); + + let two_refs1 = Weak::upgrade(&two_refs.inner1).unwrap(); + assert!(Arc::ptr_eq(&two_refs, &two_refs1)); + + let two_refs2 = Weak::upgrade(&two_refs.inner2).unwrap(); + assert!(Arc::ptr_eq(&two_refs, &two_refs2)); + + assert_eq!(Arc::strong_count(&two_refs), 3); + assert_eq!(Arc::weak_count(&two_refs), 2); + } + + /// Test for Arc::drop bug (https://github.com/rust-lang/rust/issues/55005) + #[test] + #[cfg(miri)] // relies on Stacked Borrows in Miri + fn arc_drop_dereferenceable_race() { + // The bug seems to take up to 700 iterations to reproduce with most seeds (tested 0-9). + for _ in 0..750 { + let arc_1 = Arc::new(()); + let arc_2 = arc_1.clone(); + let thread = thread::spawn(|| drop(arc_2)); + // Spin a bit; makes the race more likely to appear + let mut i = 0; + while i < 256 { + i += 1; + } + drop(arc_1); + thread.join().unwrap(); + } + } +} diff --git a/external/vendor/portable-atomic-util/version.rs b/external/vendor/portable-atomic-util/version.rs new file mode 100644 index 0000000000..4e8c38cb44 --- /dev/null +++ b/external/vendor/portable-atomic-util/version.rs @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use std::{env, iter, process::Command, str}; + +pub(crate) fn rustc_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let rustc_wrapper = if env::var_os("CARGO_ENCODED_RUSTFLAGS").is_some() { + env::var_os("RUSTC_WRAPPER").filter(|v| !v.is_empty()) + } else { + // Cargo sets environment variables for wrappers correctly only since https://github.com/rust-lang/cargo/pull/9601. + None + }; + // Do not apply RUSTC_WORKSPACE_WRAPPER: https://github.com/cuviper/autocfg/issues/58#issuecomment-2067625980 + let mut rustc = rustc_wrapper.into_iter().chain(iter::once(rustc)); + let mut cmd = Command::new(rustc.next().unwrap()); + cmd.args(rustc); + // Use verbose version output because the packagers add extra strings to the normal version output. + // Do not use long flags (--version --verbose) because clippy-deriver doesn't handle them properly. + // -vV is also matched with that cargo internally uses: https://github.com/rust-lang/cargo/blob/0.80.0/src/cargo/util/rustc.rs#L65 + let output = cmd.arg("-vV").output().ok()?; + let verbose_version = str::from_utf8(&output.stdout).ok()?; + Version::parse(verbose_version) +} + +#[cfg_attr(test, derive(Debug, PartialEq))] +pub(crate) struct Version { + pub(crate) minor: u32, + pub(crate) nightly: bool, + commit_date: Date, + pub(crate) llvm: u32, +} + +impl Version { + // The known latest stable version. If we unable to determine + // the rustc version, we assume this is the current version. + // It is no problem if this is older than the actual latest stable. + // LLVM version is assumed to be the minimum external LLVM version: + // https://github.com/rust-lang/rust/blob/1.82.0/src/bootstrap/src/core/build_steps/llvm.rs#L586 + pub(crate) const LATEST: Self = Self::stable(82, 17); + + pub(crate) const fn stable(rustc_minor: u32, llvm_major: u32) -> Self { + Self { minor: rustc_minor, nightly: false, commit_date: Date::UNKNOWN, llvm: llvm_major } + } + + pub(crate) fn probe(&self, minor: u32, year: u16, month: u8, day: u8) -> bool { + if self.nightly { + self.minor > minor + || self.minor == minor && self.commit_date >= Date::new(year, month, day) + } else { + self.minor >= minor + } + } + + #[cfg(test)] + pub(crate) fn commit_date(&self) -> &Date { + &self.commit_date + } + + pub(crate) fn parse(verbose_version: &str) -> Option { + let mut release = verbose_version + .lines() + .find(|line| line.starts_with("release: ")) + .map(|line| &line["release: ".len()..])? + .splitn(2, '-'); + let version = release.next().unwrap(); + let channel = release.next().unwrap_or_default(); + let mut digits = version.splitn(3, '.'); + let major = digits.next()?; + if major != "1" { + return None; + } + let minor = digits.next()?.parse::().ok()?; + let _patch = digits.next().unwrap_or("0").parse::().ok()?; + let nightly = match env::var_os("RUSTC_BOOTSTRAP") { + // When -1 is passed rustc works like stable, e.g., cfg(target_feature = "unstable_target_feature") will never be set. https://github.com/rust-lang/rust/pull/132993 + Some(ref v) if v == "-1" => false, + _ => channel == "nightly" || channel == "dev", + }; + + // Note that rustc 1.49-1.50 (and 1.13 or older) don't print LLVM version. + let llvm_major = (|| { + let version = verbose_version + .lines() + .find(|line| line.starts_with("LLVM version: ")) + .map(|line| &line["LLVM version: ".len()..])?; + let mut digits = version.splitn(3, '.'); + let major = digits.next()?.parse::().ok()?; + let _minor = digits.next()?.parse::().ok()?; + let _patch = digits.next().unwrap_or("0").parse::().ok()?; + Some(major) + })() + .unwrap_or(0); + + // we don't refer commit date on stable/beta. + if nightly { + let commit_date = (|| { + let mut commit_date = verbose_version + .lines() + .find(|line| line.starts_with("commit-date: ")) + .map(|line| &line["commit-date: ".len()..])? + .splitn(3, '-'); + let year = commit_date.next()?.parse::().ok()?; + let month = commit_date.next()?.parse::().ok()?; + let day = commit_date.next()?.parse::().ok()?; + if month > 12 || day > 31 { + return None; + } + Some(Date::new(year, month, day)) + })(); + Some(Self { + minor, + nightly, + commit_date: commit_date.unwrap_or(Date::UNKNOWN), + llvm: llvm_major, + }) + } else { + Some(Self::stable(minor, llvm_major)) + } + } +} + +#[derive(PartialEq, PartialOrd)] +#[cfg_attr(test, derive(Debug))] +pub(crate) struct Date { + pub(crate) year: u16, + pub(crate) month: u8, + pub(crate) day: u8, +} + +impl Date { + const UNKNOWN: Self = Self::new(0, 0, 0); + + const fn new(year: u16, month: u8, day: u8) -> Self { + Self { year, month, day } + } +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 134c20a85f..c1723d92a4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -15,7 +15,6 @@ # limitations under the License. set(DBB-FIRMWARE-SOURCES - ${CMAKE_SOURCE_DIR}/src/firmware_main_loop.c ${CMAKE_SOURCE_DIR}/src/delay.c ${CMAKE_SOURCE_DIR}/src/keystore.c ${CMAKE_SOURCE_DIR}/src/random.c diff --git a/src/bootloader/startup.c b/src/bootloader/startup.c index 830da3d972..b476b33bf1 100644 --- a/src/bootloader/startup.c +++ b/src/bootloader/startup.c @@ -112,9 +112,9 @@ int main(void) // Set device name, the MCU and BLE chip will probably not have the same name after a reset of // only the MCU. - char buf[MEMORY_DEVICE_NAME_MAX_LEN] = {0}; + char buf[MEMORY_DEVICE_NAME_MAX_LEN + 1] = {0}; memory_random_name(buf); - da14531_set_name(buf, strlen(buf), &uart_write_queue); + da14531_set_name(buf, &uart_write_queue); // Ask for the current conection state da14531_get_connection_state(&uart_write_queue); diff --git a/src/da14531/da14531.c b/src/da14531/da14531.c index b74157edb1..f7557bc0b5 100644 --- a/src/da14531/da14531.c +++ b/src/da14531/da14531.c @@ -72,8 +72,9 @@ void da14531_set_product( } } -void da14531_set_name(const char* name, size_t name_len, struct ringbuffer* uart_out) +void da14531_set_name(const char* name, struct ringbuffer* uart_out) { + size_t name_len = strlen(name); uint8_t payload[64] = {0}; payload[0] = CTRL_CMD_DEVICE_NAME; memcpy(&payload[1], name, MIN(name_len, sizeof(payload) - 1)); diff --git a/src/da14531/da14531.h b/src/da14531/da14531.h index 5829041687..c17afd8e2c 100644 --- a/src/da14531/da14531.h +++ b/src/da14531/da14531.h @@ -52,7 +52,7 @@ void da14531_set_product( volatile uint16_t product_len, struct ringbuffer* uart_out); -void da14531_set_name(const char* name, size_t name_len, struct ringbuffer* uart_out); +void da14531_set_name(const char* name, struct ringbuffer* uart_out); void da14531_get_connection_state(struct ringbuffer* uart_out); diff --git a/src/da14531/da14531_handler.c b/src/da14531/da14531_handler.c index 90d240372e..57869623f7 100644 --- a/src/da14531/da14531_handler.c +++ b/src/da14531/da14531_handler.c @@ -82,7 +82,7 @@ bool da14531_handler_bond_db_set(void) } #endif -static void _ctrl_handler(struct da14531_ctrl_frame* frame, struct ringbuffer* queue) +static void _ctrl_handler(const struct da14531_ctrl_frame* frame, struct ringbuffer* queue) { switch (frame->cmd) { case CTRL_CMD_DEVICE_NAME: { @@ -158,7 +158,7 @@ static void _ctrl_handler(struct da14531_ctrl_frame* frame, struct ringbuffer* q &frame->cmd_data[0], sizeof(_ble_pairing_callback_data.key)); _ble_pairing_callback_data.queue = queue; - uint32_t pairing_code_int = (*(uint32_t*)&frame->cmd_data[0]) % 1000000; + uint32_t pairing_code_int = (*(const uint32_t*)&frame->cmd_data[0]) % 1000000; char pairing_code[7] = {0}; snprintf(pairing_code, sizeof(pairing_code), "%06lu", (long unsigned int)pairing_code_int); // util_log("da14531: show/confirm pairing code: %s", pairing_code); @@ -275,21 +275,21 @@ static void _ctrl_handler(struct da14531_ctrl_frame* frame, struct ringbuffer* q } } -static void _hww_handler(struct da14531_protocol_frame* frame, struct ringbuffer* queue) +static void _hww_handler(const struct da14531_protocol_frame* frame, struct ringbuffer* queue) { // util_log(" in: %s", util_dbg_hex(frame->payload, 64)); (void)queue; ASSERT(frame->payload_length == 64); - usb_packet_process((USB_FRAME*)&frame->payload[0]); + usb_packet_process((const USB_FRAME*)&frame->payload[0]); } // Handler must not use the frame pointer after it has returned -void da14531_handler(struct da14531_protocol_frame* frame, struct ringbuffer* queue) +void da14531_handler(const struct da14531_protocol_frame* frame, struct ringbuffer* queue) { // util_log("handler called"); switch (frame->type) { case DA14531_PROTOCOL_PACKET_TYPE_CTRL_DATA: - _ctrl_handler((struct da14531_ctrl_frame*)frame, queue); + _ctrl_handler((const struct da14531_ctrl_frame*)frame, queue); break; case DA14531_PROTOCOL_PACKET_TYPE_BLE_DATA: _hww_handler(frame, queue); diff --git a/src/da14531/da14531_handler.h b/src/da14531/da14531_handler.h index c39535e5d8..a36480e43b 100644 --- a/src/da14531/da14531_handler.h +++ b/src/da14531/da14531_handler.h @@ -26,6 +26,6 @@ extern uint16_t da14531_handler_current_product_len; bool da14531_handler_bond_db_set(void); #endif -void da14531_handler(struct da14531_protocol_frame* frame, struct ringbuffer* queue); +void da14531_handler(const struct da14531_protocol_frame* frame, struct ringbuffer* queue); #endif diff --git a/src/delay.c b/src/delay.c index 0b7edeb6a3..6ec4b737bc 100644 --- a/src/delay.c +++ b/src/delay.c @@ -25,6 +25,8 @@ struct task { struct timer_task timer; volatile bool done; + delay_callback_t cb; + void* user_data; }; static struct task _tasks[10] = {0}; @@ -34,11 +36,16 @@ static void _hal_timer_cb(const struct timer_task* const timer) for (size_t i = 0; i < COUNT_OF(_tasks); i++) { if (&_tasks[i].timer == timer) { _tasks[i].done = true; + if (_tasks[i].cb) { + _tasks[i].cb(_tasks[i].user_data); + // Only call callbak once + _tasks[i].cb = NULL; + } } } } -void delay_init_ms(delay_t* self, uint32_t ms) +void delay_init_ms(delay_t* self, uint32_t ms, delay_callback_t cb, void* user_data) { // find an unused slot in tasks size_t i; @@ -56,6 +63,8 @@ void delay_init_ms(delay_t* self, uint32_t ms) } else { _tasks[i].done = false; memset(&_tasks[i], 0, sizeof(struct task)); + _tasks[i].cb = cb; + _tasks[i].user_data = user_data; _tasks[i].timer.interval = ms; _tasks[i].timer.cb = _hal_timer_cb; _tasks[i].timer.mode = TIMER_TASK_ONE_SHOT; diff --git a/src/delay.h b/src/delay.h index ee7e6c3064..3b99efb924 100644 --- a/src/delay.h +++ b/src/delay.h @@ -22,9 +22,11 @@ typedef struct { size_t id; } delay_t; +typedef void (*delay_callback_t)(void*); + // Creates a non-blocking delay. Check with delay_is_elapsed if it has elapsed. // Limited to 10 concurrent delays, will abort if it fails to allocate one -void delay_init_ms(delay_t* self, uint32_t ms); +void delay_init_ms(delay_t* self, uint32_t ms, delay_callback_t cb, void* user_data); // returns true if time has passed. After it has returned true once it must not be called again bool delay_is_elapsed(const delay_t* self); diff --git a/src/firmware.c b/src/firmware.c index 3c909870b0..ba25d51bc5 100644 --- a/src/firmware.c +++ b/src/firmware.c @@ -15,7 +15,6 @@ #include "common_main.h" #include "da14531/da14531_protocol.h" #include "driver_init.h" -#include "firmware_main_loop.h" #include "hardfault.h" #include "memory/bitbox02_smarteeprom.h" #include "memory/memory_shared.h" @@ -27,6 +26,7 @@ #include "usb/usb_processing.h" #include #include +#include #include #if APP_U2F == 1 @@ -55,6 +55,6 @@ int main(void) #if APP_U2F == 1 u2f_device_setup(); #endif - firmware_main_loop(); + rust_main_loop(); return 0; } diff --git a/src/firmware_main_loop.c b/src/firmware_main_loop.c deleted file mode 100644 index 2fca2b28a0..0000000000 --- a/src/firmware_main_loop.c +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2019 Shift Cryptosecurity AG -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "firmware_main_loop.h" - -#include "communication_mode.h" -#include "da14531/da14531.h" -#include "da14531/da14531_handler.h" -#include "da14531/da14531_protocol.h" -#include "driver_init.h" -#include "hardfault.h" -#include "hid_hww.h" -#include "hww.h" -#include "memory/memory.h" -#include "memory/memory_shared.h" -#include "touch/gestures.h" -#include "uart.h" -#include "ui/screen_process.h" -#include "ui/screen_stack.h" -#include "usb/class/hid/hww/hid_hww.h" -#include "usb/usb.h" -#include "usb/usb_frame.h" -#include "usb/usb_processing.h" -#include -#include -#include -#include -#if APP_U2F == 1 - #include "u2f.h" - #include "u2f/u2f_packet.h" - #include "usb/class/hid/u2f/hid_u2f.h" -#endif - -// Must be power of 2 -#define UART_OUT_BUF_LEN 2048 - -static void _orientation_screen_poll(struct ringbuffer* uart_write_queue) -{ - static bool orientation_set = false; - bool _orientation; - if (!orientation_set && rust_workflow_orientation_screen_poll(&_orientation)) { - orientation_set = true; - // hww handler in usb_process must be setup before we can allow ble connections - if (memory_get_platform() == MEMORY_PLATFORM_BITBOX02_PLUS) { - size_t len; - da14531_handler_current_product = (const uint8_t*)platform_product(&len); - da14531_handler_current_product_len = len; - util_log("%s %d", da14531_handler_current_product, da14531_handler_current_product_len); - da14531_set_product( - da14531_handler_current_product, - da14531_handler_current_product_len, - uart_write_queue); - } - usb_start(); - } -} - -void firmware_main_loop(void) -{ - // Set the size of uart_read_buf to the size of the ringbuffer in the UART driver so we can read - // out all bytes - uint8_t uart_read_buf[USART_0_BUFFER_SIZE] = {0}; - uint16_t uart_read_buf_len = 0; - - struct ringbuffer uart_write_queue; - uint8_t uart_write_buf[UART_OUT_BUF_LEN]; - ringbuffer_init(&uart_write_queue, &uart_write_buf, UART_OUT_BUF_LEN); - - /// If the bootloader has booted the BLE chip, the BLE chip isn't aware of the name according to - /// the fw. Send it over. - char buf[MEMORY_DEVICE_NAME_MAX_LEN] = {0}; - memory_get_device_name(buf); - da14531_set_name(buf, strlen(buf), &uart_write_queue); - - // This starts the async orientation screen workflow, which is processed by the loop below. - rust_workflow_spawn_orientation_screen(); - - const uint8_t* hww_data = NULL; - uint8_t hww_frame[USB_REPORT_SIZE] = {0}; - -#if APP_U2F == 1 - u2f_packet_init(); - const uint8_t* u2f_data = NULL; - uint8_t u2f_frame[USB_REPORT_SIZE] = {0}; -#endif - - if (!memory_ble_enabled()) { - communication_mode_ble_disable(); - } - - while (1) { - // Do UART I/O - if (communication_mode_ble_enabled()) { - if (uart_read_buf_len < sizeof(uart_read_buf) || - ringbuffer_num(&uart_write_queue) > 0) { - uart_poll( - &uart_read_buf[0], - sizeof(uart_read_buf), - &uart_read_buf_len, - &uart_write_queue); - } - } - - // Check if there is outgoing data - if (!hww_data) { - hww_data = queue_pull(queue_hww_queue()); - } -#if APP_U2F == 1 - // Generate timeout packets - uint32_t timeout_cid; - while (u2f_packet_timeout_get(&timeout_cid)) { - u2f_packet_timeout(timeout_cid); - } - if (!u2f_data) { - u2f_data = queue_pull(queue_u2f_queue()); - // If USB stack was locked and there is no more messages to send out, time to - // unlock it. - if (!u2f_data && usb_processing_locked(usb_processing_u2f())) { - usb_processing_unlock(); - } - } -#endif - // Do USB Input - if (!hww_data && hid_hww_read(&hww_frame[0])) { - if (usb_packet_process((const USB_FRAME*)hww_frame)) { - if (communication_mode_ble_enabled()) { - // Enqueue a power down command to the da14531 - da14531_power_down(&uart_write_queue); - // Flush out the power down command. This will be the last UART communication we - // do. - while (ringbuffer_num(&uart_write_queue) > 0) { - uart_poll(NULL, 0, NULL, &uart_write_queue); - } - communication_mode_ble_disable(); - } - } else { - util_log("usb_packet_process: invalid"); - } - } -#if APP_U2F == 1 - if (!u2f_data && hid_u2f_read(&u2f_frame[0])) { - util_log("u2f data %s", util_dbg_hex((void*)u2f_frame, 16)); - u2f_packet_process((const USB_FRAME*)u2f_frame); - } -#endif - - // Do UART Output - if (communication_mode_ble_enabled()) { - struct da14531_protocol_frame* frame = da14531_protocol_poll( - &uart_read_buf[0], &uart_read_buf_len, &hww_data, &uart_write_queue); - - if (frame) { - da14531_handler(frame, &uart_write_queue); - } - } - - // Do USB Output - if (!communication_mode_ble_enabled() && hww_data) { - if (hid_hww_write_poll(hww_data)) { - hww_data = NULL; - } - } -#if APP_U2F == 1 - if (u2f_data) { - if (hid_u2f_write_poll(u2f_data)) { - util_log("u2f wrote %s", util_dbg_hex(u2f_data, 16)); - u2f_data = NULL; - } - } -#endif - - /* First, process all the incoming USB traffic. */ - usb_processing_process(usb_processing_hww()); -#if APP_U2F == 1 - usb_processing_process(usb_processing_u2f()); -#endif - /* - * If USB has generated events at the application level, - * process them now. - */ - hww_process(); -#if APP_U2F == 1 - u2f_process(); -#endif - - screen_process(); - /* And finally, run the high-level event processing. */ - - rust_workflow_spin(); - rust_async_usb_spin(); - - _orientation_screen_poll(&uart_write_queue); - } -} diff --git a/src/memory/memory_shared.c b/src/memory/memory_shared.c index e355d8b40d..e294a94bba 100644 --- a/src/memory/memory_shared.c +++ b/src/memory/memory_shared.c @@ -182,7 +182,7 @@ int16_t memory_get_ble_bond_db(uint8_t* data) return len; } -bool memory_set_ble_bond_db(uint8_t* data, int16_t data_len) +bool memory_set_ble_bond_db(const uint8_t* data, int16_t data_len) { ASSERT(data_len <= MEMORY_BLE_BOND_DB_LEN); if (data_len > MEMORY_BLE_BOND_DB_LEN) { diff --git a/src/memory/memory_shared.h b/src/memory/memory_shared.h index 7099c0e76f..a5135a514e 100644 --- a/src/memory/memory_shared.h +++ b/src/memory/memory_shared.h @@ -156,7 +156,7 @@ void memory_get_ble_irk(uint8_t* data); void memory_get_ble_identity_address(uint8_t* data); // data_len can be at most MEMORY_BLE_BOND_DB_LEN -bool memory_set_ble_bond_db(uint8_t* data, int16_t data_len); +bool memory_set_ble_bond_db(const uint8_t* data, int16_t data_len); typedef struct { uint8_t allowed_firmware_hash[32]; diff --git a/src/rust/Cargo.lock b/src/rust/Cargo.lock index 68821cd1c8..af6bd5f9ed 100644 --- a/src/rust/Cargo.lock +++ b/src/rust/Cargo.lock @@ -24,6 +24,26 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-channel" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", + "portable-atomic", + "portable-atomic-util", +] + +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "autocfg" version = "1.0.1" @@ -112,6 +132,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bitbox02-executor" +version = "0.1.0" +dependencies = [ + "async-task", + "concurrent-queue", + "critical-section", + "futures-lite", + "pin-project-lite", +] + [[package]] name = "bitbox02-noise" version = "0.1.0" @@ -125,11 +156,13 @@ dependencies = [ name = "bitbox02-rust" version = "0.1.0" dependencies = [ + "async-channel", "binascii", "bip32-ed25519", "bip39", "bitbox-aes", "bitbox02", + "bitbox02-executor", "bitbox02-noise", "bitcoin", "bitcoin_hashes", @@ -139,6 +172,7 @@ dependencies = [ "ed25519-dalek", "erc20_params", "futures-lite", + "grounded", "hex", "hex_lit", "hmac", @@ -312,6 +346,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", + "portable-atomic", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -361,6 +405,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -516,6 +566,28 @@ dependencies = [ "hex", ] +[[package]] +name = "event-listener" +version = "5.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" +dependencies = [ + "concurrent-queue", + "pin-project-lite", + "portable-atomic", + "portable-atomic-util", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "ff" version = "0.13.0" @@ -559,6 +631,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "grounded" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917d82402c7eb9755fdd87d52117701dae9e413a6abb309fac2a13af693b6080" +dependencies = [ + "portable-atomic", +] + [[package]] name = "group" version = "0.13.0" @@ -782,6 +863,18 @@ name = "portable-atomic" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" +dependencies = [ + "critical-section", +] + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] [[package]] name = "primeorder" diff --git a/src/rust/Cargo.toml b/src/rust/Cargo.toml index cbf333c10c..f0e7e48cec 100644 --- a/src/rust/Cargo.toml +++ b/src/rust/Cargo.toml @@ -24,6 +24,7 @@ members = [ "erc20_params", "streaming-silent-payments", "bitbox-aes", + "bitbox02-executor", ] resolver = "2" @@ -52,6 +53,7 @@ keccak = { version = "0.1.4", default-features = false, features = ["no_unroll"] zeroize = "1.7.0" futures-lite = { version = "2.6.1", default-features = false } hex_lit = { version = "0.1.1", default-features = false } +critical-section = {version = "1.2"} [patch.crates-io] rtt-target = { git = "https://github.com/probe-rs/rtt-target.git", rev = "117d9519a5d3b1f4bc024bc05f9e3c5dec0a57f5" } diff --git a/src/rust/bitbox02-executor/Cargo.toml b/src/rust/bitbox02-executor/Cargo.toml new file mode 100644 index 0000000000..efbfe2c96f --- /dev/null +++ b/src/rust/bitbox02-executor/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "bitbox02-executor" +version = "0.1.0" +edition = "2024" + +[dependencies] +async-task = {version="4.7.1", default-features=false} +concurrent-queue = {version="2.5.0", default-features=false} +critical-section = "1.2.0" +futures-lite = {workspace = true} +pin-project-lite = "0.2.16" diff --git a/src/rust/bitbox02-executor/src/lib.rs b/src/rust/bitbox02-executor/src/lib.rs new file mode 100644 index 0000000000..bad8abba10 --- /dev/null +++ b/src/rust/bitbox02-executor/src/lib.rs @@ -0,0 +1,64 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +use async_task::{Builder, Runnable, Task}; +use concurrent_queue::ConcurrentQueue; + +pub struct Executor { + queue: ConcurrentQueue, +} + +impl Executor { + pub const fn new() -> Executor { + Executor { + queue: ConcurrentQueue::unbounded(), + } + } + + /// Attempts to run a task if at least one is scheduled + /// + /// Running a scheduled task means simply polling its future once + pub fn try_tick(&self) -> bool { + match self.queue.pop() { + Err(_) => false, + Ok(runnable) => { + runnable.run(); + true + } + } + } + + /// Spawns a task onto the executor. + pub fn spawn(&'static self, future: impl Future + 'static) -> Task { + // `schedule` is the function eventually being called when `Waker.wake()` is called. The + // function schedules the task by placing the tasks Runnable into the executors queue. + let schedule = move |runnable| self.queue.push(runnable).unwrap(); + + // SAFETY + // 1. `future` doesn't need to be `Send` because the firmware is single threaded + // 2. `schedule` doesn't need to be `Send` and `Sync` beause the firmware is single threaded + let (runnable, task) = unsafe { Builder::new().spawn_unchecked(|()| future, schedule) }; + + // Schedule the task once to get started + runnable.schedule(); + task + } +} + +impl Default for Executor { + fn default() -> Executor { + Executor::new() + } +} diff --git a/src/rust/bitbox02-rust-c/src/lib.rs b/src/rust/bitbox02-rust-c/src/lib.rs index 08a2cce8ef..8ee196487d 100644 --- a/src/rust/bitbox02-rust-c/src/lib.rs +++ b/src/rust/bitbox02-rust-c/src/lib.rs @@ -25,8 +25,6 @@ mod alloc; pub mod async_usb; #[cfg(feature = "firmware")] mod der; -#[cfg(feature = "firmware")] -pub mod workflow; // Expose C interface defined in bitbox_aes #[cfg(feature = "firmware")] diff --git a/src/rust/bitbox02-rust-c/src/workflow.rs b/src/rust/bitbox02-rust-c/src/workflow.rs deleted file mode 100644 index 22952dae54..0000000000 --- a/src/rust/bitbox02-rust-c/src/workflow.rs +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2020 Shift Cryptosecurity AG -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! This module is a quick workaround to use async workflows from U2F/FIDO2, where the root of the -//! usb message proessing is not ported to Rust. If that happens, the `async_usb` module can be -//! used and this can be deleted. - -// TODO: figure out how to deal with the static muts below. -// https://doc.rust-lang.org/nightly/edition-guide/rust-2024/static-mut-references.html -#![allow(static_mut_refs)] -#![allow(clippy::missing_safety_doc)] - -extern crate alloc; - -use alloc::boxed::Box; -use alloc::string::String; -use bitbox02_rust::workflow::{confirm, orientation_screen}; -use core::task::Poll; -use util::bb02_async::{Task, spin}; - -enum TaskState<'a, O> { - Nothing, - Running(Task<'a, O>), - ResultAvailable(O), -} - -static mut UNLOCK_STATE: TaskState<'static, Result<(), ()>> = TaskState::Nothing; - -static mut CONFIRM_TITLE: Option = None; -static mut CONFIRM_BODY: Option = None; -static mut CONFIRM_PARAMS: Option = None; -static mut CONFIRM_STATE: TaskState<'static, Result<(), confirm::UserAbort>> = TaskState::Nothing; -static mut BITBOX02_HAL: bitbox02_rust::hal::BitBox02Hal = bitbox02_rust::hal::BitBox02Hal::new(); - -static mut ORIENTATION_SCREEN_STATE: TaskState<'static, bool> = TaskState::Nothing; - -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_spawn_unlock() { - unsafe { - UNLOCK_STATE = TaskState::Running(Box::pin(bitbox02_rust::workflow::unlock::unlock( - &mut BITBOX02_HAL, - ))); - } -} - -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_spawn_confirm( - title: *const core::ffi::c_char, - body: *const core::ffi::c_char, -) { - unsafe { - CONFIRM_TITLE = Some(core::ffi::CStr::from_ptr(title).to_str().unwrap().into()); - CONFIRM_BODY = Some(core::ffi::CStr::from_ptr(body).to_str().unwrap().into()); - CONFIRM_PARAMS = Some(confirm::Params { - title: CONFIRM_TITLE.as_ref().unwrap(), - body: CONFIRM_BODY.as_ref().unwrap(), - accept_only: true, - ..Default::default() - }); - - CONFIRM_STATE = - TaskState::Running(Box::pin(confirm::confirm(CONFIRM_PARAMS.as_ref().unwrap()))); - } -} - -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_spawn_orientation_screen() { - unsafe { - ORIENTATION_SCREEN_STATE = - TaskState::Running(Box::pin(orientation_screen::orientation_screen())); - } -} - -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_spin() { - unsafe { - match UNLOCK_STATE { - TaskState::Running(ref mut task) => { - let result = spin(task); - if let Poll::Ready(result) = result { - UNLOCK_STATE = TaskState::ResultAvailable(result); - } - } - _ => (), - } - match CONFIRM_STATE { - TaskState::Running(ref mut task) => { - let result = spin(task); - if let Poll::Ready(result) = result { - CONFIRM_STATE = TaskState::ResultAvailable(result); - } - } - _ => (), - } - match ORIENTATION_SCREEN_STATE { - TaskState::Running(ref mut task) => { - let result = spin(task); - if let Poll::Ready(result) = result { - ORIENTATION_SCREEN_STATE = TaskState::ResultAvailable(result); - } - } - _ => (), - } - } -} - -/// Returns true if there was a result. -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_unlock_poll(result_out: &mut bool) -> bool { - unsafe { - match UNLOCK_STATE { - TaskState::ResultAvailable(result) => { - UNLOCK_STATE = TaskState::Nothing; - match result { - Ok(()) => *result_out = true, - Err(()) => *result_out = false, - } - true - } - _ => false, - } - } -} - -/// Returns true if there was a result. -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_confirm_poll(result_out: &mut bool) -> bool { - unsafe { - match CONFIRM_STATE { - TaskState::ResultAvailable(ref result) => { - CONFIRM_TITLE = None; - CONFIRM_BODY = None; - CONFIRM_PARAMS = None; - CONFIRM_STATE = TaskState::Nothing; - *result_out = result.is_ok(); - true - } - _ => false, - } - } -} - -/// Returns true if there was a result. -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_orientation_screen_poll(result_out: &mut bool) -> bool { - unsafe { - match ORIENTATION_SCREEN_STATE { - TaskState::ResultAvailable(result) => { - ORIENTATION_SCREEN_STATE = TaskState::Nothing; - *result_out = result; - true - } - _ => false, - } - } -} - -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_abort_current() { - unsafe { - UNLOCK_STATE = TaskState::Nothing; - - CONFIRM_TITLE = None; - CONFIRM_BODY = None; - CONFIRM_PARAMS = None; - CONFIRM_STATE = TaskState::Nothing; - - ORIENTATION_SCREEN_STATE = TaskState::Nothing; - } -} diff --git a/src/rust/bitbox02-rust/Cargo.toml b/src/rust/bitbox02-rust/Cargo.toml index 8aea87c8db..22a6b9e3b6 100644 --- a/src/rust/bitbox02-rust/Cargo.toml +++ b/src/rust/bitbox02-rust/Cargo.toml @@ -49,6 +49,7 @@ minicbor = { version = "0.24.0", default-features = false, features = ["alloc"], crc = { version = "3.0.1", optional = true } ed25519-dalek = { version = "2.1.1", default-features = false, features = ["hazmat", "digest"], optional = true } hmac = { workspace = true } +bitbox02-executor = {path = "../bitbox02-executor"} miniscript = { version = "12.2.0", default-features = false, features = ["no-std"], optional = true } bitcoin = { workspace = true } @@ -59,6 +60,8 @@ bip39 = { workspace = true } bitcoin_hashes = { version = "0.14.0", default-features = false, features = ["small-hash"] } futures-lite = { workspace = true } hex_lit = { workspace = true, features = ["rust_v_1_46"] } +async-channel = { version = "2.5.0", default-features = false, features = ["portable-atomic"] } +grounded = { version = "0.2.0", default-features = false, features = ["critical-section"] } [dependencies.prost] # keep version in sync with tools/prost-build/Cargo.toml. @@ -110,3 +113,5 @@ c-unit-testing = [] simulator-graphical = [] firmware = [] + +rtt = [] diff --git a/src/rust/bitbox02-rust/src/bb02_async.rs b/src/rust/bitbox02-rust/src/bb02_async.rs index fbaeb01976..550d08f62b 100644 --- a/src/rust/bitbox02-rust/src/bb02_async.rs +++ b/src/rust/bitbox02-rust/src/bb02_async.rs @@ -12,12 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use core::cell::RefCell; - -/// Disables the screensaver while waiting for an option to contain a value. Afterwards, it returns that value -pub async fn option_no_screensaver(opt: &RefCell>) -> O { +pub async fn screensaver_without(fut: impl Future) -> T { bitbox02::screen_saver::screen_saver_disable(); - let result = util::bb02_async::option(opt).await; + let result = fut.await; bitbox02::screen_saver::screen_saver_enable(); result } diff --git a/src/rust/bitbox02-rust/src/hal.rs b/src/rust/bitbox02-rust/src/hal.rs index 92245281d1..3bfdc44917 100644 --- a/src/rust/bitbox02-rust/src/hal.rs +++ b/src/rust/bitbox02-rust/src/hal.rs @@ -154,6 +154,10 @@ impl Hal for BitBox02Hal { } } +impl grounded::const_init::ConstInit for BitBox02Hal { + const VAL: Self = Self::new(); +} + #[cfg(feature = "testing")] pub mod testing { use alloc::boxed::Box; diff --git a/src/rust/bitbox02-rust/src/hww/api/bitcoin/signtx.rs b/src/rust/bitbox02-rust/src/hww/api/bitcoin/signtx.rs index d25ac88fbf..2a538e5d5e 100644 --- a/src/rust/bitbox02-rust/src/hww/api/bitcoin/signtx.rs +++ b/src/rust/bitbox02-rust/src/hww/api/bitcoin/signtx.rs @@ -316,7 +316,7 @@ async fn handle_prevtx( input_index: u32, input: &pb::BtcSignInputRequest, num_inputs: u32, - progress_component: &mut bitbox02::ui::Component<'_>, + progress_component: &mut bitbox02::ui::Component, next_response: &mut NextResponse, ) -> Result<(), Error> { let prevtx_init = get_prevtx_init(input_index, next_response).await?; diff --git a/src/rust/bitbox02-rust/src/lib.rs b/src/rust/bitbox02-rust/src/lib.rs index 2a685974a8..228ceb8ff7 100644 --- a/src/rust/bitbox02-rust/src/lib.rs +++ b/src/rust/bitbox02-rust/src/lib.rs @@ -13,7 +13,14 @@ // limitations under the License. // Since we are targeting embedded we exclude the standard library by default -#![no_std] +#![cfg_attr( + not(any( + feature = "testing", + feature = "c-unit-testing", + feature = "simulator-graphical" + )), + no_std +)] // When compiling for testing we allow certain warnings. #![cfg_attr(test, allow(unused_imports, dead_code))] @@ -36,6 +43,8 @@ pub mod hal; pub mod hash; pub mod hww; pub mod keystore; +#[cfg(feature = "firmware")] +pub mod main_loop; pub mod salt; pub mod secp256k1; #[cfg(feature = "app-u2f")] diff --git a/src/rust/bitbox02-rust/src/main_loop.rs b/src/rust/bitbox02-rust/src/main_loop.rs new file mode 100644 index 0000000000..765d9ef2d3 --- /dev/null +++ b/src/rust/bitbox02-rust/src/main_loop.rs @@ -0,0 +1,211 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::sync::atomic::{AtomicBool, Ordering}; + +use async_channel::Receiver; +use bitbox02::ringbuffer::RingBuffer; +use bitbox02::uart::USART_0_BUFFER_SIZE; +use bitbox02::usb::USB_REPORT_SIZE; +use bitbox02_executor::Executor; +use util::log::log; + +#[cfg_attr(feature = "c-unit-testing", allow(unused))] +const UART_OUT_BUF_LEN: u32 = 2048; + +static EXECUTOR: Executor = Executor::new(); + +#[cfg_attr(feature = "c-unit-testing", allow(unused))] +fn main_loop() -> ! { + static ORIENTATION_CHOSEN: AtomicBool = AtomicBool::new(false); + // Set the size of uart_read_buf to the size of the ringbuffer in the UART driver so we can read + // out all bytes + let mut uart_read_buf = [0u8; USART_0_BUFFER_SIZE as usize]; + let mut uart_read_buf_len = 0u16; + + let mut uart_write_buf = [0u8; UART_OUT_BUF_LEN as usize]; + let mut uart_write_queue = RingBuffer::new(&mut uart_write_buf); + + // If the bootloader has booted the BLE chip, the BLE chip isn't aware of the name according to + // the fw. Send it over. + let device_name = bitbox02::memory::get_device_name(); + bitbox02::da14531::set_name(&device_name, &mut uart_write_queue); + + // This starts the async orientation screen workflow, which is processed by the loop below. + EXECUTOR + .spawn(async { + crate::workflow::orientation_screen::orientation_screen().await; + util::log!("ori chosen"); + ORIENTATION_CHOSEN.store(true, Ordering::Relaxed); + }) + .detach(); + + EXECUTOR + .spawn(async { + util::log::log!("hello world"); + }) + .detach(); + + let mut hww_data = None; + let mut hww_frame = [0u8; USB_REPORT_SIZE as usize]; + + #[cfg(feature = "app-u2f")] + bitbox02::u2f_packet::init(); + #[cfg(feature = "app-u2f")] + let mut u2f_data = None; + #[cfg(feature = "app-u2f")] + let mut u2f_frame = [0u8; USB_REPORT_SIZE as usize]; + + if !bitbox02::memory::ble_enabled() { + bitbox02::communication_mode::ble_disable(); + } + + loop { + // Do UART I/O + if bitbox02::communication_mode::ble_enabled() { + if uart_read_buf_len < uart_read_buf.len() as u16 || uart_write_queue.len() > 0 { + bitbox02::uart::poll( + Some(&mut uart_read_buf), + Some(&mut uart_read_buf_len), + Some(&mut uart_write_queue), + ) + } + } + + // Check if there is outgoing data + if hww_data.is_none() { + hww_data = bitbox02::queue::pull_hww(); + } + + // Generate u2f timeout packets + #[cfg(feature = "app-u2f")] + { + // Generate timeout packets + let mut timeout_cid = 0u32; + while bitbox02::u2f_packet::timeout_get(&mut timeout_cid) { + bitbox02::u2f_packet::timeout(timeout_cid); + } + if u2f_data.is_none() { + u2f_data = bitbox02::queue::pull_u2f(); + // If USB stack was locked and there is no more messages to send out, time to + // unlock it. + if u2f_data.is_none() && bitbox02::usb_processing::locked_u2f() { + bitbox02::usb_processing::unlock(); + } + } + } + + // Do USB Input + if hww_data.is_none() && bitbox02::hid_hww::read(&mut hww_frame) { + if bitbox02::usb_packet::process(&hww_frame) { + if bitbox02::communication_mode::ble_enabled() { + // Enqueue a power down command to the da14531 + bitbox02::da14531::power_down(&mut uart_write_queue); + // Flush out the power down command. This will be the last UART communication + // we do. + while uart_write_queue.len() > 0 { + bitbox02::uart::poll(None, None, Some(&mut uart_write_queue)); + } + bitbox02::communication_mode::ble_disable(); + } + } else { + log!("usb_packet_process: invalid"); + } + } + #[cfg(feature = "app-u2f")] + if u2f_data.is_none() && bitbox02::hid_u2f::read(&mut u2f_frame) { + bitbox02::u2f_packet::process(&u2f_frame); + } + + // Do UART Output + if bitbox02::communication_mode::ble_enabled() { + if let Some(frame) = bitbox02::da14531_protocol::poll( + &mut uart_read_buf, + &mut uart_read_buf_len, + &mut hww_data, + &mut uart_write_queue, + ) { + bitbox02::da14531_handler::handler(frame, &mut uart_write_queue); + } + } + + // Do USB Output + if let Some(data) = &mut hww_data + && !bitbox02::communication_mode::ble_enabled() + { + if bitbox02::hid_hww::write_poll(data) { + hww_data = None; + } + } + #[cfg(feature = "app-u2f")] + if let Some(data) = &mut u2f_data { + if bitbox02::hid_u2f::write_poll(data) { + u2f_data = None; + } + } + + /* First, process all the incoming USB traffic. */ + bitbox02::usb_processing::process_hww(); + #[cfg(feature = "app-u2f")] + bitbox02::usb_processing::process_u2f(); + + /* + * If USB has generated events at the application level, + * process them now. + */ + #[cfg(feature = "app-u2f")] + bitbox02::u2f::process(); + + bitbox02::screen::process(); + + /* And finally, run the high-level event processing. */ + crate::async_usb::spin(); + + // Run async exuecutor + EXECUTOR.try_tick(); + + if ORIENTATION_CHOSEN.swap(false, Ordering::Relaxed) { + util::log!("orientation chosen"); + // hww handler in usb_process must be setup before we can allow ble connections + if let Ok(bitbox02::memory::Platform::BitBox02Plus) = bitbox02::memory::get_platform() { + let (product, product_len) = bitbox02::platform::product(); + bitbox02::da14531_handler::set_product(product, product_len); + bitbox02::da14531::set_product(product, &mut uart_write_queue) + } + bitbox02::usb::start(); + } + } +} + +// Spawns a task and returns the receiving end of a one shot channel +pub fn spawn(fut: impl Future + 'static) -> Receiver +where + T: 'static, +{ + let (sender, receiver) = async_channel::bounded(1); + EXECUTOR + .spawn(async move { sender.send(fut.await).await }) + .detach(); + receiver +} + +// +// C interface +// + +#[unsafe(no_mangle)] +#[cfg(not(feature = "c-unit-testing"))] +pub extern "C" fn rust_main_loop() -> ! { + main_loop() +} diff --git a/src/rust/bitbox02-rust/src/workflow.rs b/src/rust/bitbox02-rust/src/workflow.rs index 7da142b061..de77ba6816 100644 --- a/src/rust/bitbox02-rust/src/workflow.rs +++ b/src/rust/bitbox02-rust/src/workflow.rs @@ -27,6 +27,8 @@ pub mod testing; pub mod transaction; pub mod trinary_choice; pub mod trinary_input_string; +#[cfg(feature = "app-u2f")] +pub mod u2f_c_api; pub mod unlock; pub mod unlock_animation; pub mod verify_message; diff --git a/src/rust/bitbox02-rust/src/workflow/cancel.rs b/src/rust/bitbox02-rust/src/workflow/cancel.rs index 71a13d1993..f634f90743 100644 --- a/src/rust/bitbox02-rust/src/workflow/cancel.rs +++ b/src/rust/bitbox02-rust/src/workflow/cancel.rs @@ -12,55 +12,38 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::bb02_async::option_no_screensaver; -use core::cell::RefCell; - -use super::confirm; +use crate::workflow::confirm; #[derive(Debug)] pub enum Error { Cancelled, } -pub type ResultCell = RefCell>>; - -/// Resolves the `with_cancel` future as cancelled. -pub fn cancel(result_cell: &ResultCell) { - *result_cell.borrow_mut() = Some(Err(Error::Cancelled)); -} +/// returns true if user cancelled and wants to exit +pub async fn cancel(title: &str) -> bool { + let params = confirm::Params { + title, + body: "Do you really\nwant to cancel?", + ..Default::default() + }; -/// Resolves the `with_cancel` future with the given result. -pub fn set_result(result_cell: &ResultCell, result: R) { - *result_cell.borrow_mut() = Some(Ok(result)); + // Err(UserAbort) means _do not cancel_, ask again + !matches!(confirm::confirm(¶ms).await, Err(confirm::UserAbort)) } -/// Blocks on showing/running a component until `cancel` or `result` is -/// called on the same `result_cell`. -/// In the former, a prompt with the given title to confirm cancellation is shown. -/// -/// * `title` - title to show in the cancel confirm prompt. -/// * `component` - component to process -/// * `result_cell` - result var to synchronize the result on. Pass the same to `cancel` and -/// `set_result`. -pub async fn with_cancel( - title: &str, - component: &mut bitbox02::ui::Component<'_>, - result_cell: &ResultCell, -) -> Result { - component.screen_stack_push(); +pub async fn with_cancel(title: &str, future_generator: GEN) -> Result +where + GEN: Fn() -> F, + F: Future>, +{ loop { - let result = option_no_screensaver(result_cell).await; - if let Err(Error::Cancelled) = result { - let params = confirm::Params { - title, - body: "Do you really\nwant to cancel?", - ..Default::default() - }; - - if let Err(confirm::UserAbort) = confirm::confirm(¶ms).await { - continue; + match future_generator().await { + Ok(o) => return Ok(o), + Err(_) => { + if cancel(title).await { + return Err(super::cancel::Error::Cancelled); + } } } - return result; } } diff --git a/src/rust/bitbox02-rust/src/workflow/confirm.rs b/src/rust/bitbox02-rust/src/workflow/confirm.rs index 19dc14701e..326f56ce7f 100644 --- a/src/rust/bitbox02-rust/src/workflow/confirm.rs +++ b/src/rust/bitbox02-rust/src/workflow/confirm.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::bb02_async::option_no_screensaver; +use crate::bb02_async::screensaver_without; pub use bitbox02::ui::{ConfirmParams as Params, Font}; @@ -20,16 +20,10 @@ pub struct UserAbort; /// Returns true if the user accepts, false if the user rejects. pub async fn confirm(params: &Params<'_>) -> Result<(), UserAbort> { - let result = core::cell::RefCell::new(None as Option>); - // The component will set the result when the user accepted/rejected. - let mut component = bitbox02::ui::confirm_create(params, |accepted| { - *result.borrow_mut() = if accepted { - Some(Ok(())) - } else { - Some(Err(UserAbort)) - }; - }); - component.screen_stack_push(); - option_no_screensaver(&result).await + if screensaver_without(bitbox02::ui::confirm(params)).await { + Ok(()) + } else { + Err(UserAbort) + } } diff --git a/src/rust/bitbox02-rust/src/workflow/menu.rs b/src/rust/bitbox02-rust/src/workflow/menu.rs index 19046a4ef6..594262b369 100644 --- a/src/rust/bitbox02-rust/src/workflow/menu.rs +++ b/src/rust/bitbox02-rust/src/workflow/menu.rs @@ -14,25 +14,18 @@ pub use super::cancel::Error as CancelError; -use crate::bb02_async::option_no_screensaver; - -use alloc::boxed::Box; -use core::cell::RefCell; +use crate::bb02_async::screensaver_without; +use bitbox02::ui::{MenuParams, menu_create}; /// Returns the index of the word chosen by the user. pub async fn pick(words: &[&str], title: Option<&str>) -> Result { - let result = RefCell::new(None as Option>); - let mut component = bitbox02::ui::menu_create(bitbox02::ui::MenuParams { + screensaver_without(menu_create(MenuParams { words, title, - select_word_cb: Some(Box::new(|choice_idx| { - *result.borrow_mut() = Some(Ok(choice_idx)); - })), - continue_on_last_cb: None, - cancel_cb: Some(Box::new(|| { - *result.borrow_mut() = Some(Err(CancelError::Cancelled)); - })), - }); - component.screen_stack_push(); - option_no_screensaver(&result).await + select_word: true, + continue_on_last: false, + cancel: true, + })) + .await + .or(Err(CancelError::Cancelled)) } diff --git a/src/rust/bitbox02-rust/src/workflow/mnemonic.rs b/src/rust/bitbox02-rust/src/workflow/mnemonic.rs index 09010dba10..43175d91ce 100644 --- a/src/rust/bitbox02-rust/src/workflow/mnemonic.rs +++ b/src/rust/bitbox02-rust/src/workflow/mnemonic.rs @@ -13,17 +13,14 @@ // limitations under the License. use super::Workflows; -pub use super::cancel::Error as CancelError; -use super::cancel::{cancel, set_result, with_cancel}; +pub use super::cancel::{Error as CancelError, with_cancel}; use super::confirm; use super::menu; use super::trinary_choice::TrinaryChoice; use super::trinary_input_string; -use alloc::boxed::Box; use alloc::string::String; use alloc::vec::Vec; -use core::cell::RefCell; use sha2::{Digest, Sha256}; @@ -78,36 +75,31 @@ fn create_random_unique_words(word: &str, length: u8) -> (u8, Vec Result<(), CancelError> { - let result = RefCell::new(None); - let mut component = bitbox02::ui::menu_create(bitbox02::ui::MenuParams { - words, - title: None, - select_word_cb: None, - continue_on_last_cb: Some(Box::new(|| { - set_result(&result, ()); - })), - cancel_cb: Some(Box::new(|| { - cancel(&result); - })), - }); - with_cancel("Recovery\nwords", &mut component, &result).await + with_cancel("Recovery\nwords", || { + bitbox02::ui::menu_create(bitbox02::ui::MenuParams { + words, + title: None, + select_word: false, + continue_on_last: true, + cancel: true, + }) + }) + .await + .map(|_| ()) } /// Displays the `choices` to the user, returning the index of the selected choice. pub async fn confirm_word(choices: &[&str], title: &str) -> Result { - let result = RefCell::new(None); - let mut component = bitbox02::ui::menu_create(bitbox02::ui::MenuParams { - words: choices, - title: Some(title), - select_word_cb: Some(Box::new(|idx| { - set_result(&result, idx); - })), - continue_on_last_cb: None, - cancel_cb: Some(Box::new(|| { - cancel(&result); - })), - }); - with_cancel("Recovery\nwords", &mut component, &result).await + with_cancel("Recovery\nwords", || { + bitbox02::ui::menu_create(bitbox02::ui::MenuParams { + words: choices, + title: Some(title), + select_word: true, + continue_on_last: false, + cancel: true, + }) + }) + .await } pub async fn show_and_confirm_mnemonic( diff --git a/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs b/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs index 530190e95c..236e0ee385 100644 --- a/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs +++ b/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs @@ -13,18 +13,8 @@ // limitations under the License. use bitbox02::delay::delay_for; +use bitbox02::ui::choose_orientation; use core::time::Duration; -use util::bb02_async::option; - -pub async fn choose_orientation() -> bool { - let result = core::cell::RefCell::new(None as Option); - let mut orientation_arrows = bitbox02::ui::orientation_arrows(|upside_down| { - *result.borrow_mut() = Some(upside_down); - }); - orientation_arrows.screen_stack_push(); - // Wait until orientation has been chosen - option(&result).await -} pub async fn orientation_screen() -> bool { let upside_down = choose_orientation().await; diff --git a/src/rust/bitbox02-rust/src/workflow/sdcard.rs b/src/rust/bitbox02-rust/src/workflow/sdcard.rs index 9b85d34c7a..65ba01bc2a 100644 --- a/src/rust/bitbox02-rust/src/workflow/sdcard.rs +++ b/src/rust/bitbox02-rust/src/workflow/sdcard.rs @@ -12,20 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::bb02_async::option_no_screensaver; -use core::cell::RefCell; +use crate::bb02_async::screensaver_without; pub struct UserAbort; pub async fn sdcard() -> Result<(), UserAbort> { - let result = RefCell::new(None as Option>); - let mut component = bitbox02::ui::sdcard_create(|sd_done| { - *result.borrow_mut() = if sd_done { - Some(Ok(())) - } else { - Some(Err(UserAbort)) - }; - }); - component.screen_stack_push(); - option_no_screensaver(&result).await + let fut = bitbox02::ui::sdcard(); + if screensaver_without(fut).await { + Ok(()) + } else { + Err(UserAbort) + } } diff --git a/src/rust/bitbox02-rust/src/workflow/status.rs b/src/rust/bitbox02-rust/src/workflow/status.rs index d5a38ec30f..822c331497 100644 --- a/src/rust/bitbox02-rust/src/workflow/status.rs +++ b/src/rust/bitbox02-rust/src/workflow/status.rs @@ -12,14 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::bb02_async::option_no_screensaver; -use core::cell::RefCell; +use crate::bb02_async::screensaver_without; pub async fn status(title: &str, status_success: bool) { - let result = RefCell::new(None); - let mut component = bitbox02::ui::status_create(title, status_success, || { - *result.borrow_mut() = Some(()); - }); - component.screen_stack_push(); - option_no_screensaver(&result).await + screensaver_without(bitbox02::ui::status(title, status_success)).await } diff --git a/src/rust/bitbox02-rust/src/workflow/transaction.rs b/src/rust/bitbox02-rust/src/workflow/transaction.rs index 67e0c08a9d..4bdca767c9 100644 --- a/src/rust/bitbox02-rust/src/workflow/transaction.rs +++ b/src/rust/bitbox02-rust/src/workflow/transaction.rs @@ -12,28 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::bb02_async::screensaver_without; use crate::hal::Ui; - -use crate::bb02_async::option_no_screensaver; -use core::cell::RefCell; - -use alloc::boxed::Box; use alloc::string::String; pub struct UserAbort; pub async fn verify_recipient(recipient: &str, amount: &str) -> Result<(), UserAbort> { - let result = RefCell::new(None as Option>); - - let mut component = bitbox02::ui::confirm_transaction_address_create( - amount, - recipient, - Box::new(|ok| { - *result.borrow_mut() = Some(if ok { Ok(()) } else { Err(UserAbort) }); - }), - ); - component.screen_stack_push(); - option_no_screensaver(&result).await + let future = bitbox02::ui::confirm_transaction_address_create(amount, recipient); + if screensaver_without(future).await { + Ok(()) + } else { + Err(UserAbort) + } } fn format_percentage(p: f64) -> String { @@ -42,18 +33,12 @@ fn format_percentage(p: f64) -> String { } pub async fn verify_total_fee(total: &str, fee: &str, longtouch: bool) -> Result<(), UserAbort> { - let result = RefCell::new(None as Option>); - - let mut component = bitbox02::ui::confirm_transaction_fee_create( - total, - fee, - longtouch, - Box::new(|ok| { - *result.borrow_mut() = Some(if ok { Ok(()) } else { Err(UserAbort) }); - }), - ); - component.screen_stack_push(); - option_no_screensaver(&result).await + let future = bitbox02::ui::confirm_transaction_fee_create(total, fee, longtouch); + if screensaver_without(future).await { + Ok(()) + } else { + Err(UserAbort) + } } pub async fn verify_total_fee_maybe_warn( diff --git a/src/rust/bitbox02-rust/src/workflow/trinary_choice.rs b/src/rust/bitbox02-rust/src/workflow/trinary_choice.rs index a19f5334fa..e355ca7a93 100644 --- a/src/rust/bitbox02-rust/src/workflow/trinary_choice.rs +++ b/src/rust/bitbox02-rust/src/workflow/trinary_choice.rs @@ -12,13 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::bb02_async::option_no_screensaver; -use core::cell::RefCell; - -use alloc::boxed::Box; +use crate::bb02_async::screensaver_without; pub use bitbox02::ui::TrinaryChoice; -use bitbox02::ui::trinary_choice_create; +use bitbox02::ui::trinary_choice; pub async fn choose( message: &str, @@ -26,17 +23,6 @@ pub async fn choose( label_middle: Option<&str>, label_right: Option<&str>, ) -> TrinaryChoice { - let result = RefCell::new(None as Option); - - let mut component = trinary_choice_create( - message, - label_left, - label_middle, - label_right, - Box::new(|choice| { - *result.borrow_mut() = Some(choice); - }), - ); - component.screen_stack_push(); - option_no_screensaver(&result).await + let fut = trinary_choice(message, label_left, label_middle, label_right); + screensaver_without(fut).await } diff --git a/src/rust/bitbox02-rust/src/workflow/trinary_input_string.rs b/src/rust/bitbox02-rust/src/workflow/trinary_input_string.rs index ddc9a9d037..ce75f0636b 100644 --- a/src/rust/bitbox02-rust/src/workflow/trinary_input_string.rs +++ b/src/rust/bitbox02-rust/src/workflow/trinary_input_string.rs @@ -12,13 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub use super::cancel::{Error, cancel, set_result}; -pub use bitbox02::ui::TrinaryInputStringParams as Params; +pub use super::cancel::Error; +pub use bitbox02::ui::{TrinaryInputStringParams as Params, trinary_input_string}; -use core::cell::RefCell; -use util::bb02_async::option; - -use alloc::boxed::Box; use alloc::string::String; #[derive(Copy, Clone)] @@ -36,20 +32,11 @@ pub async fn enter( can_cancel: CanCancel, preset: &str, ) -> Result, Error> { - let result = RefCell::new(None); - let mut component = bitbox02::ui::trinary_input_string_create( - params, - |string| set_result(&result, string), - match can_cancel { - CanCancel::Yes => Some(Box::new(|| cancel(&result))), - CanCancel::No => None, - }, - ); - if !preset.is_empty() { - bitbox02::ui::trinary_input_string_set_input(&mut component, preset); - } - component.screen_stack_push(); - option(&result) + let can_cancel = match can_cancel { + CanCancel::Yes => true, + CanCancel::No => false, + }; + trinary_input_string(params, can_cancel, preset) .await - .or(Err(super::cancel::Error::Cancelled)) + .or(Err(Error::Cancelled)) } diff --git a/src/rust/bitbox02-rust/src/workflow/u2f_c_api.rs b/src/rust/bitbox02-rust/src/workflow/u2f_c_api.rs new file mode 100644 index 0000000000..cf5432ff3d --- /dev/null +++ b/src/rust/bitbox02-rust/src/workflow/u2f_c_api.rs @@ -0,0 +1,134 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This module is a quick workaround to use async workflows from U2F/FIDO2, where the root of the +//! usb message proessing is not ported to Rust. If that happens, the `async_usb` module can be +//! used and this can be deleted. + +// TODO: figure out how to deal with the static muts below. +// https://doc.rust-lang.org/nightly/edition-guide/rust-2024/static-mut-references.html +#![allow(static_mut_refs)] +#![allow(clippy::missing_safety_doc)] + +extern crate alloc; + +use crate::workflow::confirm; +use alloc::string::String; +use async_channel::{Receiver, TryRecvError}; +use core::ffi::CStr; +use grounded::uninit::GroundedCell; + +enum TaskState { + Nothing, + Running(Receiver), +} + +static UNLOCK_STATE: GroundedCell>> = GroundedCell::uninit(); +static CONFIRM_STATE: GroundedCell>> = + GroundedCell::uninit(); + +#[unsafe(no_mangle)] +pub unsafe extern "C" fn rust_workflow_spawn_unlock() { + static BITBOX02_HAL: GroundedCell = GroundedCell::const_init(); + unsafe { + UNLOCK_STATE + .get() + .write(TaskState::Running(crate::main_loop::spawn( + crate::workflow::unlock::unlock(BITBOX02_HAL.get().as_mut().unwrap()), + ))); + } +} + +#[unsafe(no_mangle)] +pub unsafe extern "C" fn rust_workflow_spawn_confirm( + title: *const core::ffi::c_char, + body: *const core::ffi::c_char, +) { + static CONFIRM_TITLE: GroundedCell = GroundedCell::uninit(); + static CONFIRM_BODY: GroundedCell = GroundedCell::uninit(); + static CONFIRM_PARAMS: GroundedCell = GroundedCell::uninit(); + unsafe { + CONFIRM_TITLE + .get() + .write(CStr::from_ptr(title).to_str().unwrap().into()); + CONFIRM_BODY + .get() + .write(CStr::from_ptr(body).to_str().unwrap().into()); + CONFIRM_PARAMS.get().write(confirm::Params { + title: CONFIRM_TITLE.get().as_ref().unwrap(), + body: CONFIRM_BODY.get().as_ref().unwrap(), + accept_only: true, + ..Default::default() + }); + + CONFIRM_STATE + .get() + .write(TaskState::Running(crate::main_loop::spawn( + confirm::confirm(CONFIRM_PARAMS.get().as_ref().unwrap()), + ))); + } +} + +/// Returns true if there was a result. +#[unsafe(no_mangle)] +pub unsafe extern "C" fn rust_workflow_unlock_poll(result_out: &mut bool) -> bool { + unsafe { + match UNLOCK_STATE.get().as_ref().unwrap() { + TaskState::Running(recv) => { + match recv.try_recv() { + Ok(result) => { + UNLOCK_STATE.get().write(TaskState::Nothing); + match result { + Ok(()) => *result_out = true, + Err(()) => *result_out = false, + } + true + } + Err(TryRecvError::Empty) => false, // No result yet + Err(TryRecvError::Closed) => panic!("internal error"), + } + } + TaskState::Nothing => panic!("polled non-existing future"), + } + } +} + +/// Returns true if there was a result. +#[unsafe(no_mangle)] +pub unsafe extern "C" fn rust_workflow_confirm_poll(result_out: &mut bool) -> bool { + unsafe { + match CONFIRM_STATE.get().as_ref().unwrap() { + TaskState::Running(recv) => { + match recv.try_recv() { + Ok(result) => { + CONFIRM_STATE.get().write(TaskState::Nothing); + *result_out = result.is_ok(); + true + } + Err(TryRecvError::Empty) => false, //No result yet + Err(TryRecvError::Closed) => panic!("internal error"), + } + } + _ => false, + } + } +} + +#[unsafe(no_mangle)] +pub unsafe extern "C" fn rust_workflow_abort_current() { + unsafe { + UNLOCK_STATE.get().write(TaskState::Nothing); + CONFIRM_STATE.get().write(TaskState::Nothing); + } +} diff --git a/src/rust/bitbox02-rust/src/workflow/unlock_animation.rs b/src/rust/bitbox02-rust/src/workflow/unlock_animation.rs index 98596f18b4..14b77c3eb0 100644 --- a/src/rust/bitbox02-rust/src/workflow/unlock_animation.rs +++ b/src/rust/bitbox02-rust/src/workflow/unlock_animation.rs @@ -12,16 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::bb02_async::option_no_screensaver; -use core::cell::RefCell; +use crate::bb02_async::screensaver_without; +use bitbox02::ui::unlock_animation; /// Performs the unlock animation. Its duration is determined by the component render rate, see /// unlock_animation.c pub async fn animate() { - let result = RefCell::new(None as Option<()>); - let mut component = bitbox02::ui::unlock_animation_create(|| { - *result.borrow_mut() = Some(()); - }); - component.screen_stack_push(); - option_no_screensaver(&result).await + screensaver_without(unlock_animation()).await; } diff --git a/src/rust/bitbox02-sys/build.rs b/src/rust/bitbox02-sys/build.rs index 1f0835989f..97cf5c2cf7 100644 --- a/src/rust/bitbox02-sys/build.rs +++ b/src/rust/bitbox02-sys/build.rs @@ -21,6 +21,8 @@ const ALLOWLIST_VARS: &[&str] = &[ "BASE58_CHECKSUM_LEN", "BIP32_SERIALIZED_LEN", "BIP39_WORDLIST_LEN", + "da14531_handler_current_product_len", + "da14531_handler_current_product", "EC_PUBLIC_KEY_LEN", "font_font_a_11X10", "font_font_a_9X9", @@ -46,6 +48,8 @@ const ALLOWLIST_VARS: &[&str] = &[ "secfalse_u8", "SD_MAX_FILE_SIZE", "SLIDER_POSITION_TWO_THIRD", + "USART_0_BUFFER_SIZE", + "USB_REPORT_SIZE", "XPUB_ENCODED_LEN", ]; @@ -54,9 +58,11 @@ const ALLOWLIST_TYPES: &[&str] = &[ "buffer_t", "component_t", "confirm_params_t", + "da14531_protocol_frame", "delay_t", "event_slider_data_t", "event_types", + "ringbuffer", "secp256k1_ecdsa_s2c_opening", "secp256k1_ecdsa_signature", "secp256k1_pubkey", @@ -66,19 +72,26 @@ const ALLOWLIST_TYPES: &[&str] = &[ "upside_down_t", ]; +const OPAQUE_TYPES: &[&str] = &["da14531_protocol_frame"]; + const ALLOWLIST_FNS: &[&str] = &[ "bip32_derive_xpub", - "bitbox02_smarteeprom_init", - "bitbox02_smarteeprom_get_unlock_attempts", - "bitbox02_smarteeprom_increment_unlock_attempts", - "bitbox02_smarteeprom_reset_unlock_attempts", "bitbox_secp256k1_dleq_prove", "bitbox_secp256k1_dleq_verify", + "bitbox02_smarteeprom_get_unlock_attempts", + "bitbox02_smarteeprom_increment_unlock_attempts", "bitbox02_smarteeprom_init", + "bitbox02_smarteeprom_reset_unlock_attempts", + "communication_mode_ble_disable", "communication_mode_ble_enabled", "confirm_create", "confirm_transaction_address_create", "confirm_transaction_fee_create", + "da14531_handler", + "da14531_power_down", + "da14531_protocol_poll", + "da14531_set_name", + "da14531_set_product", "delay_cancel", "delay_init_ms", "delay_is_elapsed", @@ -90,6 +103,10 @@ const ALLOWLIST_FNS: &[&str] = &[ "fake_securechip_event_counter_reset", "fake_securechip_event_counter", "gmtime", + "hid_hww_read", + "hid_hww_write_poll", + "hid_u2f_read", + "hid_u2f_write_poll", "hww_setup", "keystore_bip39_mnemonic_to_seed", "keystore_get_bip39_word", @@ -136,17 +153,21 @@ const ALLOWLIST_FNS: &[&str] = &[ "memory_spi_get_active_ble_firmware_version", "menu_create", "orientation_arrows_create", + "platform_product", "printf", "progress_create", "progress_set", "queue_hww_queue", "queue_pull", + "queue_u2f_queue", "random_32_bytes_mcu", "random_32_bytes", "random_fake_reset", "reboot_to_bootloader", "reset_ble", "reset_reset", + "ringbuffer_init", + "ringbuffer_num", "screen_clear", "screen_init", "screen_print_debug", @@ -184,6 +205,12 @@ const ALLOWLIST_FNS: &[&str] = &[ "trinary_choice_create", "trinary_input_string_create", "trinary_input_string_set_input", + "u2f_packet_init", + "u2f_packet_process", + "u2f_packet_timeout_get", + "u2f_packet_timeout", + "u2f_process", + "uart_poll", "UG_ClearBuffer", "UG_FontSelect", "UG_PutString", @@ -195,8 +222,12 @@ const ALLOWLIST_FNS: &[&str] = &[ "usb_packet_process", "usb_processing_hww", "usb_processing_init", + "usb_processing_locked", "usb_processing_process", "usb_processing_timeout_reset", + "usb_processing_u2f", + "usb_processing_unlock", + "usb_start", "util_format_datetime", ]; @@ -417,6 +448,7 @@ pub fn main() -> Result<(), &'static str> { .args(ALLOWLIST_TYPES.iter().flat_map(|s| ["--allowlist-type", s])) .args(ALLOWLIST_VARS.iter().flat_map(|s| ["--allowlist-var", s])) .args(RUSTIFIED_ENUMS.iter().flat_map(|s| ["--rustified-enum", s])) + .args(OPAQUE_TYPES.iter().flat_map(|s| ["--opaque-type", s])) .arg("wrapper.h") .arg("--") .args(&definitions) diff --git a/src/rust/bitbox02-sys/wrapper.h b/src/rust/bitbox02-sys/wrapper.h index 6f6aaef58e..037fb9644d 100644 --- a/src/rust/bitbox02-sys/wrapper.h +++ b/src/rust/bitbox02-sys/wrapper.h @@ -13,7 +13,11 @@ // limitations under the License. #include +#include +#include +#include #include +#include #include #include #include @@ -21,6 +25,8 @@ #include #include #include +#include +#include #include #include #include @@ -30,6 +36,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -51,18 +60,19 @@ #include #include #include +#include +#include #include +#include #include #include +#include #if defined(TESTING) #include - #include #include #include #include - #include - #include #endif #if !defined(TESTING) diff --git a/src/rust/bitbox02/src/communication_mode.rs b/src/rust/bitbox02/src/communication_mode.rs new file mode 100644 index 0000000000..5a0767b344 --- /dev/null +++ b/src/rust/bitbox02/src/communication_mode.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn ble_disable() { + unsafe { + bitbox02_sys::communication_mode_ble_disable(); + } +} + +pub fn ble_enabled() -> bool { + unsafe { bitbox02_sys::communication_mode_ble_enabled() } +} diff --git a/src/rust/bitbox02/src/da14531.rs b/src/rust/bitbox02/src/da14531.rs new file mode 100644 index 0000000000..f394c37b24 --- /dev/null +++ b/src/rust/bitbox02/src/da14531.rs @@ -0,0 +1,36 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; + +pub fn set_name(name: &str, queue: &mut RingBuffer) { + let name = crate::util::str_to_cstr_vec(name).unwrap(); + unsafe { bitbox02_sys::da14531_set_name(name.as_ptr(), &mut queue.inner as *mut _) }; +} + +pub fn set_product(product: &str, queue: &mut RingBuffer) { + unsafe { + bitbox02_sys::da14531_set_product( + product.as_bytes().as_ptr() as *const _, + product.len() as u16, + &mut queue.inner, + ) + } +} + +pub fn power_down(queue: &mut RingBuffer) { + unsafe { + bitbox02_sys::da14531_power_down(&mut queue.inner as *mut _); + } +} diff --git a/src/rust/bitbox02/src/da14531_handler.rs b/src/rust/bitbox02/src/da14531_handler.rs new file mode 100644 index 0000000000..2a5fd4826c --- /dev/null +++ b/src/rust/bitbox02/src/da14531_handler.rs @@ -0,0 +1,29 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; +use bitbox02_sys::da14531_protocol_frame; + +pub fn handler(frame: &'static da14531_protocol_frame, uart_write_queue: &mut RingBuffer) { + unsafe { + bitbox02_sys::da14531_handler(frame as *const _, &mut uart_write_queue.inner); + } +} + +pub fn set_product(product: &'static str, len: u16) { + unsafe { + bitbox02_sys::da14531_handler_current_product = product.as_bytes().as_ptr(); + bitbox02_sys::da14531_handler_current_product_len = len; + } +} diff --git a/src/rust/bitbox02/src/da14531_protocol.rs b/src/rust/bitbox02/src/da14531_protocol.rs new file mode 100644 index 0000000000..6a3cc92645 --- /dev/null +++ b/src/rust/bitbox02/src/da14531_protocol.rs @@ -0,0 +1,45 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; +pub use bitbox02_sys::da14531_protocol_frame; + +pub fn poll( + uart_read_buf: &mut [u8], + uart_read_buf_len: &mut u16, + hww_data: &mut Option<[u8; 64]>, + uart_write_queue: &mut RingBuffer, +) -> Option<&'static da14531_protocol_frame> { + let mut data: *const u8 = if let Some(data) = (*hww_data).as_ref() { + data.as_ptr() as *const _ + } else { + core::ptr::null() + }; + let frame = unsafe { + bitbox02_sys::da14531_protocol_poll( + uart_read_buf.as_mut_ptr() as *mut _, + uart_read_buf_len as *mut _, + &mut data as *mut _, + &mut uart_write_queue.inner as *mut _, + ) + }; + if data.is_null() { + *hww_data = None; + } + if frame.is_null() { + None + } else { + Some(unsafe { &*frame }) + } +} diff --git a/src/rust/bitbox02/src/delay.rs b/src/rust/bitbox02/src/delay.rs index a31d3af7eb..30f93a130c 100644 --- a/src/rust/bitbox02/src/delay.rs +++ b/src/rust/bitbox02/src/delay.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use core::pin::Pin; -use core::task::{Context, Poll}; +use alloc::sync::Arc; +use core::task::{Poll, Waker}; use core::time::Duration; #[cfg(not(any( @@ -21,86 +21,52 @@ use core::time::Duration; feature = "c-unit-testing", feature = "simulator-graphical" )))] -struct DelayInner { - bitbox02_delay: bitbox02_sys::delay_t, -} - -#[cfg(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" -))] -struct DelayInner { - thread_handle: Option>, - done: std::sync::Arc, -} - -pub struct Delay { - inner: DelayInner, -} - -impl Delay { - #[cfg(not(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" - )))] - pub fn from_ms(ms: u32) -> Delay { - let mut delay = Delay { - inner: DelayInner { - bitbox02_delay: bitbox02_sys::delay_t { id: usize::MAX }, - }, - }; - unsafe { bitbox02_sys::delay_init_ms(&mut delay.inner.bitbox02_delay as *mut _, ms) } - delay +pub async fn delay_for(duration: Duration) { + use core::cell::RefCell; + use core::ffi::c_void; + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option<()>, + bitbox02_delay: bitbox02_sys::delay_t, } - #[cfg(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" - ))] - pub fn from_ms(ms: u32) -> Delay { - let (thread_handle, done) = if ms == 0 { - ( - None, - std::sync::Arc::new(std::sync::atomic::AtomicBool::new(true)), - ) - } else { - let done = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)); - let handle = Some(std::thread::spawn({ - let done = std::sync::Arc::clone(&done); - move || { - std::thread::sleep(std::time::Duration::from_millis(ms as u64)); - (*done).store(true, std::sync::atomic::Ordering::Relaxed); - // TODO: Waker.wake, once we have an async runtime - } - })); - (handle, done) - }; - Delay { - inner: DelayInner { - thread_handle, - done, - }, + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + bitbox02_delay: bitbox02_sys::delay_t { id: 0 }, + })); + unsafe extern "C" fn callback(user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(()); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); } } -} + unsafe { + bitbox02_sys::delay_init_ms( + &mut shared_state.borrow_mut().bitbox02_delay as *mut _, + duration.as_millis() as u32, + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, + ) + } -#[cfg(not(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" -)))] -impl Future for Delay { - type Output = (); + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - if unsafe { bitbox02_sys::delay_is_elapsed(&self.inner.bitbox02_delay as *const _) } { - Poll::Ready(()) - } else { - Poll::Pending + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } } - } + }) + .await } #[cfg(any( @@ -108,31 +74,55 @@ impl Future for Delay { feature = "c-unit-testing", feature = "simulator-graphical" ))] -impl Future for Delay { - type Output = (); - fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - if self.inner.done.load(std::sync::atomic::Ordering::Relaxed) { - if let Some(th) = self.inner.thread_handle.take() { - th.join().unwrap(); - } - Poll::Ready(()) - } else { - Poll::Pending - } +pub async fn delay_for(duration: Duration) { + use std::sync::Mutex; + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option<()>, + handle: Option>, } -} -#[cfg(not(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" -)))] -impl Drop for Delay { - fn drop(&mut self) { - unsafe { bitbox02_sys::delay_cancel(&self.inner.bitbox02_delay as *const _) } + if duration == Duration::ZERO { + return; } -} -pub fn delay_for(duration: Duration) -> Delay { - Delay::from_ms(duration.as_millis() as u32) + let shared_state = Arc::new(Mutex::new(SharedState { + waker: None, + result: None, + handle: None, + })); + + let handle = std::thread::spawn({ + let shared_state = Arc::clone(&shared_state); + move || { + std::thread::sleep(duration); + let mut shared_state = shared_state.lock().unwrap(); + shared_state.result = Some(()); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref() + } + } + }); + + shared_state.lock().unwrap().handle = Some(handle); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.lock().unwrap(); + + if let Some(result) = shared_state.result { + if let Some(handle) = shared_state.handle.take() { + handle.join().unwrap(); + } + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } diff --git a/src/rust/bitbox02/src/hid_hww.rs b/src/rust/bitbox02/src/hid_hww.rs new file mode 100644 index 0000000000..6060fd4519 --- /dev/null +++ b/src/rust/bitbox02/src/hid_hww.rs @@ -0,0 +1,21 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn write_poll(buf: &[u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_hww_write_poll(buf.as_ptr() as *const _) } +} + +pub fn read(buf: &mut [u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_hww_read(buf as *mut _) } +} diff --git a/src/rust/bitbox02/src/hid_u2f.rs b/src/rust/bitbox02/src/hid_u2f.rs new file mode 100644 index 0000000000..72eb2c8da0 --- /dev/null +++ b/src/rust/bitbox02/src/hid_u2f.rs @@ -0,0 +1,21 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn write_poll(buf: &[u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_u2f_write_poll(buf.as_ptr() as *const _) } +} + +pub fn read(buf: &mut [u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_u2f_read(buf as *mut _) } +} diff --git a/src/rust/bitbox02/src/lib.rs b/src/rust/bitbox02/src/lib.rs index 56efecc2e1..49038fb2cc 100644 --- a/src/rust/bitbox02/src/lib.rs +++ b/src/rust/bitbox02/src/lib.rs @@ -34,17 +34,22 @@ use alloc::string::String; #[cfg(any(feature = "testing", feature = "simulator-graphical"))] pub mod testing; +pub mod communication_mode; +pub mod da14531; +pub mod da14531_handler; +pub mod da14531_protocol; pub mod delay; #[cfg(feature = "simulator-graphical")] pub mod event; -#[cfg(feature = "simulator-graphical")] +pub mod hid_hww; +pub mod hid_u2f; pub mod hww; pub mod keystore; pub mod memory; -#[cfg(feature = "simulator-graphical")] +pub mod platform; pub mod queue; pub mod random; -#[cfg(feature = "simulator-graphical")] +pub mod ringbuffer; pub mod screen; pub mod screen_saver; pub mod sd; @@ -53,8 +58,13 @@ pub mod securechip; #[cfg(feature = "simulator-graphical")] pub mod smarteeprom; pub mod spi_mem; +#[cfg(feature = "app-u2f")] +pub mod u2f; +#[cfg(feature = "app-u2f")] +pub mod u2f_packet; +pub mod uart; pub mod ui; -#[cfg(feature = "simulator-graphical")] +pub mod usb; pub mod usb_packet; pub mod usb_processing; diff --git a/src/rust/bitbox02/src/platform.rs b/src/rust/bitbox02/src/platform.rs new file mode 100644 index 0000000000..4d82fbdeb1 --- /dev/null +++ b/src/rust/bitbox02/src/platform.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn product() -> (&'static str, u16) { + unsafe { + let mut len = 0; + let s = bitbox02_sys::platform_product(&mut len as *mut _) as *const u8; + let s = core::slice::from_raw_parts(s, len); + let s = str::from_utf8_unchecked(s); + (s as _, len as u16) + } +} diff --git a/src/rust/bitbox02/src/queue.rs b/src/rust/bitbox02/src/queue.rs index d41dc3e3b1..abac7c6ac9 100644 --- a/src/rust/bitbox02/src/queue.rs +++ b/src/rust/bitbox02/src/queue.rs @@ -20,3 +20,14 @@ pub fn pull_hww() -> Option<[u8; 64]> { unsafe { core::ptr::copy_nonoverlapping(hww_data, data.as_mut_ptr(), 64) } Some(data) } + +#[cfg(feature = "app-u2f")] +pub fn pull_u2f() -> Option<[u8; 64]> { + let u2f_data = unsafe { bitbox02_sys::queue_pull(bitbox02_sys::queue_u2f_queue()) }; + if u2f_data.is_null() { + return None; + } + let mut data: [u8; 64] = [0; 64]; + unsafe { core::ptr::copy_nonoverlapping(u2f_data, data.as_mut_ptr(), 64) } + Some(data) +} diff --git a/src/rust/bitbox02/src/ringbuffer.rs b/src/rust/bitbox02/src/ringbuffer.rs new file mode 100644 index 0000000000..b9d816f526 --- /dev/null +++ b/src/rust/bitbox02/src/ringbuffer.rs @@ -0,0 +1,48 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use bitbox02_sys::{ringbuffer, ringbuffer_init}; + +/// A wrapper around ASF4 `ringbuffer` type +pub struct RingBuffer<'a> { + // For now we don't use `buf`, but when we implement push/pull we will need to. + _buf: &'a mut [u8], + pub inner: ringbuffer, +} + +impl<'a> RingBuffer<'a> { + /// `buf` length must be a power of 2 + pub fn new(buf: &'a mut [u8]) -> Self { + debug_assert!(buf.len().is_power_of_two()); + let mut inner = ringbuffer { + buf: core::ptr::null_mut(), + size: 0, + read_index: 0, + write_index: 0, + }; + unsafe { + ringbuffer_init( + &mut inner as *mut _, + buf as *mut _ as *mut _, + buf.len() as u32, + ); + }; + RingBuffer { _buf: buf, inner } + } + + /// Bytes currently used + pub fn len(&self) -> u32 { + unsafe { bitbox02_sys::ringbuffer_num(&self.inner as *const _) } + } +} diff --git a/src/rust/bitbox02/src/u2f.rs b/src/rust/bitbox02/src/u2f.rs new file mode 100644 index 0000000000..066635affe --- /dev/null +++ b/src/rust/bitbox02/src/u2f.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn process() { + unsafe { + bitbox02_sys::u2f_process(); + } +} diff --git a/src/rust/bitbox02/src/u2f_packet.rs b/src/rust/bitbox02/src/u2f_packet.rs new file mode 100644 index 0000000000..9c57807cf4 --- /dev/null +++ b/src/rust/bitbox02/src/u2f_packet.rs @@ -0,0 +1,33 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use bitbox02_sys::USB_FRAME; + +pub fn init() { + unsafe { + bitbox02_sys::u2f_packet_init(); + } +} + +pub fn timeout_get(cid: &mut u32) -> bool { + unsafe { bitbox02_sys::u2f_packet_timeout_get(cid as *mut _) } +} + +pub fn timeout(cid: u32) { + unsafe { bitbox02_sys::u2f_packet_timeout(cid) } +} + +pub fn process(packet: &[u8; 64]) -> bool { + unsafe { bitbox02_sys::u2f_packet_process(packet.as_ptr() as *const _) } +} diff --git a/src/rust/bitbox02/src/uart.rs b/src/rust/bitbox02/src/uart.rs new file mode 100644 index 0000000000..d89623e1bb --- /dev/null +++ b/src/rust/bitbox02/src/uart.rs @@ -0,0 +1,45 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; +pub use bitbox02_sys::USART_0_BUFFER_SIZE; + +pub fn poll( + uart_read_buf: Option<&mut [u8]>, + uart_read_buf_len: Option<&mut u16>, + uart_write_queue: Option<&mut RingBuffer>, +) { + let (uart_read_buf, cap) = if let Some(uart_read_buf) = uart_read_buf { + ( + uart_read_buf as *mut _ as *mut _, + uart_read_buf.len() as u16, + ) + } else { + (core::ptr::null_mut(), 0u16) + }; + let uart_read_buf_len = if let Some(len) = uart_read_buf_len { + len as *mut _ + } else { + core::ptr::null_mut() + }; + let uart_write_queue = if let Some(uart_write_queue) = uart_write_queue { + &mut uart_write_queue.inner as *mut _ + } else { + core::ptr::null_mut() + }; + + unsafe { + bitbox02_sys::uart_poll(uart_read_buf, cap, uart_read_buf_len, uart_write_queue); + } +} diff --git a/src/rust/bitbox02/src/ui/types.rs b/src/rust/bitbox02/src/ui/types.rs index 9f2a5abed5..46e4a96468 100644 --- a/src/rust/bitbox02/src/ui/types.rs +++ b/src/rust/bitbox02/src/ui/types.rs @@ -150,9 +150,9 @@ pub type ContinueCancelCb<'a> = Box; pub struct MenuParams<'a> { pub words: &'a [&'a str], pub title: Option<&'a str>, - pub select_word_cb: Option>, - pub continue_on_last_cb: Option>, - pub cancel_cb: Option>, + pub select_word: bool, + pub continue_on_last: bool, + pub cancel: bool, } pub type TrinaryChoiceCb<'a> = Box; diff --git a/src/rust/bitbox02/src/ui/ui.rs b/src/rust/bitbox02/src/ui/ui.rs index 904b506fc7..8b72f4252f 100644 --- a/src/rust/bitbox02/src/ui/ui.rs +++ b/src/rust/bitbox02/src/ui/ui.rs @@ -21,22 +21,23 @@ pub use super::types::{ use core::ffi::{c_char, c_void}; extern crate alloc; -use alloc::boxed::Box; +use crate::util::str_to_cstr_vec; use alloc::string::String; +use alloc::sync::Arc; use alloc::vec::Vec; +use core::cell::RefCell; +//use core::pin::Pin; +use core::task::{Poll, Waker}; -use core::marker::PhantomData; +//use core::marker::PhantomPinned; /// Wraps the C component_t to be used in Rust. -pub struct Component<'a> { +pub struct Component { component: *mut bitbox02_sys::component_t, is_pushed: bool, - on_drop: Option>, - // This is used to have the result callbacks outlive the component. - _p: PhantomData<&'a ()>, } -impl Component<'_> { +impl Component { pub fn screen_stack_push(&mut self) { if self.is_pushed { panic!("component pushed twice"); @@ -48,7 +49,7 @@ impl Component<'_> { } } -impl Drop for Component<'_> { +impl Drop for Component { fn drop(&mut self) { if !self.is_pushed { panic!("component not pushed"); @@ -56,90 +57,118 @@ impl Drop for Component<'_> { unsafe { bitbox02_sys::ui_screen_stack_pop(); } - if let Some(ref mut on_drop) = self.on_drop { - (*on_drop)(); - } } } -/// Creates a trinary input component. -/// `result` - will be asynchronously set to `Some()` once the user confirms. -pub fn trinary_input_string_create<'a, F>( - params: &TrinaryInputStringParams, - confirm_callback: F, - cancel_callback: Option>, -) -> Component<'a> -where - // Callback must outlive component. - F: FnMut(zeroize::Zeroizing) + 'a, -{ - unsafe extern "C" fn c_confirm_callback(password: *const c_char, user_data: *mut c_void) - where - F2: FnMut(zeroize::Zeroizing), - { +pub async fn trinary_input_string<'a>( + params: &TrinaryInputStringParams<'a>, + can_cancel: bool, + preset: &str, +) -> Result, ()> { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, ()>>, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn cancel_cb(user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(Err(())); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } + } + + unsafe extern "C" fn confirm_cb(password: *const c_char, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); let pw: zeroize::Zeroizing = zeroize::Zeroizing::new( unsafe { crate::util::str_from_null_terminated_ptr(password) } .unwrap() .into(), ); - // The callback is dropped afterwards. This is safe because - // this C callback is guaranteed to be called only once. - let mut callback = unsafe { Box::from_raw(user_data as *mut F2) }; - callback(pw); - } - - unsafe extern "C" fn c_cancel_callback(user_data: *mut c_void) { - let callback = user_data as *mut ContinueCancelCb; - unsafe { (*callback)() }; + shared_state.result = Some(Ok(pw)); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } - let (cancel_cb, cancel_user_data) = match cancel_callback { - None => (None, core::ptr::null_mut()), - Some(cb) => ( - Some(c_cancel_callback as _), - Box::into_raw(Box::new(cb)) as *mut c_void, - ), + let (actual_cancel_cb, cancel_shared_state) = if can_cancel { + ( + Some(cancel_cb as unsafe extern "C" fn(*mut c_void)), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, + ) + } else { + (None, core::ptr::null_mut()) }; + let mut title_scratch = Vec::new(); let component = unsafe { bitbox02_sys::trinary_input_string_create( ¶ms.to_c_params(&mut title_scratch).data, // title copied in C - Some(c_confirm_callback::), - // passed to c_confirm_callback as `user_data`. - Box::into_raw(Box::new(confirm_callback)) as *mut _, - cancel_cb, - cancel_user_data, + Some(confirm_cb), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to confirm_cb as `user_data`. + actual_cancel_cb, + cancel_shared_state, // passed to cancel_cb as `user_data`. ) }; - Component { + if !preset.is_empty() { + unsafe { + bitbox02_sys::trinary_input_string_set_input( + component, + crate::util::str_to_cstr_vec(preset).unwrap().as_ptr(), + ) + } + } + + let mut component = Component { component, is_pushed: false, - on_drop: Some(Box::new(move || unsafe { - // Drop all callbacks. - if !cancel_user_data.is_null() { - drop(Box::from_raw(cancel_user_data as *mut ContinueCancelCb)); + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result.clone() { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending } - })), - _p: PhantomData, - } + } + }) + .await } -/// Creates a user confirmation dialog screen. -/// `result` - will be asynchronously set to `Some(bool)` once the user accets or rejects. -pub fn confirm_create<'a, F>(params: &ConfirmParams, result_callback: F) -> Component<'a> -where - // Callback must outlive component. - F: FnMut(bool) + 'a, -{ - unsafe extern "C" fn c_callback(result: bool, user_data: *mut c_void) - where - F2: FnMut(bool), - { - // The callback is dropped afterwards. This is safe because - // this C callback is guaranteed to be called only once. - let mut callback = unsafe { Box::from_raw(user_data as *mut F2) }; - callback(result); +pub async fn confirm(params: &ConfirmParams<'_>) -> bool { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(result: bool, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(result); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } + } + let mut title_scratch = Vec::new(); let mut body_scratch = Vec::new(); let component = unsafe { @@ -147,17 +176,32 @@ where ¶ms .to_c_params(&mut title_scratch, &mut body_scratch) .data, - Some(c_callback::), - // passed to the C callback as `user_data` - Box::into_raw(Box::new(result_callback)) as *mut _, + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + + let mut component = Component { component, is_pushed: false, - on_drop: None, - _p: PhantomData, - } + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } pub fn screen_process() { @@ -166,76 +210,144 @@ pub fn screen_process() { } } -pub fn status_create<'a, F>(text: &str, status_success: bool, callback: F) -> Component<'a> -where - // Callback must outlive component. - F: FnMut() + 'a, -{ - unsafe extern "C" fn c_callback(user_data: *mut c_void) - where - F2: FnMut(), - { - // The callback is dropped afterwards. This is safe because - // this C callback is guaranteed to be called only once. - let mut callback = unsafe { Box::from_raw(user_data as *mut F2) }; - callback(); +pub async fn status(text: &str, status_success: bool) { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option<()>, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(()); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } let component = unsafe { bitbox02_sys::status_create( - crate::util::str_to_cstr_vec(text).unwrap().as_ptr(), // copied in C + str_to_cstr_vec(text).unwrap().as_ptr(), // copied in C status_success, - Some(c_callback::), - Box::into_raw(Box::new(callback)) as *mut _, // passed to c_callback as `user_data`. + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + + let mut component = Component { component, is_pushed: false, - on_drop: None, - _p: PhantomData, - } + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } -pub fn sdcard_create<'a, F>(callback: F) -> Component<'a> -where - // Callback must outlive component. - F: FnMut(bool) + 'a, -{ - unsafe extern "C" fn c_callback(sd_done: bool, user_data: *mut c_void) - where - F2: FnMut(bool), - { - // The callback is dropped afterwards. This is safe because - // this C callback is guaranteed to be called only once. - let mut callback = unsafe { Box::from_raw(user_data as *mut F2) }; - callback(sd_done); +pub async fn sdcard() -> bool { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(result: bool, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(result); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } let component = unsafe { bitbox02_sys::sdcard_create( - Some(c_callback::), - // passed to the C callback as `user_data` - Box::into_raw(Box::new(callback)) as *mut _, + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + + let mut component = Component { component, is_pushed: false, - on_drop: None, - _p: PhantomData, - } + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } -pub fn menu_create(params: MenuParams<'_>) -> Component<'_> { - unsafe extern "C" fn c_select_word_cb(word_idx: u8, user_data: *mut c_void) { - let callback = user_data as *mut SelectWordCb; - unsafe { (*callback)(word_idx) }; +pub async fn menu_create(params: MenuParams<'_>) -> Result { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option>, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn select_word_cb(word_idx: u8, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(Ok(word_idx)); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } + } + + unsafe extern "C" fn continue_on_last_cb(user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(Ok(0)); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } - unsafe extern "C" fn c_continue_cancel_cb(user_data: *mut c_void) { - let callback = user_data as *mut ContinueCancelCb; - unsafe { (*callback)() }; + unsafe extern "C" fn cancel_cb(user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(Err(())); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } // We want to turn &[&str] into a C char**. @@ -252,27 +364,27 @@ pub fn menu_create(params: MenuParams<'_>) -> Component<'_> { let c_words: Vec<*const core::ffi::c_char> = words.iter().map(|word| word.as_ptr() as _).collect(); - let (select_word_cb, select_word_user_data) = match params.select_word_cb { - None => (None, core::ptr::null_mut()), - Some(cb) => ( - Some(c_select_word_cb as _), - Box::into_raw(Box::new(cb)) as *mut c_void, + let (select_word_cb, select_word_user_data) = match params.select_word { + false => (None, core::ptr::null_mut()), + true => ( + Some(select_word_cb as _), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to select_word_cb as `user_data`. ), }; - let (continue_on_last_cb, continue_on_last_user_data) = match params.continue_on_last_cb { - None => (None, core::ptr::null_mut()), - Some(cb) => ( - Some(c_continue_cancel_cb as _), - Box::into_raw(Box::new(cb)) as *mut c_void, + let (continue_on_last_cb, continue_on_last_user_data) = match params.continue_on_last { + false => (None, core::ptr::null_mut()), + true => ( + Some(continue_on_last_cb as _), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to select_word_cb as `user_data`. ), }; - let (cancel_cb, cancel_user_data) = match params.cancel_cb { - None => (None, core::ptr::null_mut()), - Some(cb) => ( - Some(c_continue_cancel_cb as _), - Box::into_raw(Box::new(cb)) as *mut c_void, + let (cancel_cb, cancel_user_data) = match params.cancel { + false => (None, core::ptr::null_mut()), + true => ( + Some(cancel_cb as _), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to select_word_cb as `user_data`. ), }; let title = params @@ -295,145 +407,200 @@ pub fn menu_create(params: MenuParams<'_>) -> Component<'_> { core::ptr::null_mut(), ) }; - Component { + let mut component = Component { component, is_pushed: false, - on_drop: Some(Box::new(move || unsafe { - // Drop all callbacks. - if !select_word_user_data.is_null() { - drop(Box::from_raw(select_word_user_data as *mut SelectWordCb)); - } - if !continue_on_last_user_data.is_null() { - drop(Box::from_raw( - continue_on_last_user_data as *mut ContinueCancelCb, - )); - } - if !cancel_user_data.is_null() { - drop(Box::from_raw(cancel_user_data as *mut ContinueCancelCb)); + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result.clone() { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending } - })), - _p: PhantomData, - } + } + }) + .await } -pub fn trinary_choice_create<'a>( - message: &'a str, - label_left: Option<&'a str>, - label_middle: Option<&'a str>, - label_right: Option<&'a str>, - chosen_callback: TrinaryChoiceCb, -) -> Component<'a> { - unsafe extern "C" fn c_chosen_cb(choice: TrinaryChoice, user_data: *mut c_void) { - let callback = user_data as *mut TrinaryChoiceCb; - unsafe { (*callback)(choice) }; +pub async fn trinary_choice( + message: &str, + label_left: Option<&str>, + label_middle: Option<&str>, + label_right: Option<&str>, +) -> TrinaryChoice { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(choice: TrinaryChoice, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(choice); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } - - let chosen_user_data = Box::into_raw(Box::new(chosen_callback)) as *mut c_void; - - let label_left = label_left.map(|label| crate::util::str_to_cstr_vec(label).unwrap()); - let label_middle = label_middle.map(|label| crate::util::str_to_cstr_vec(label).unwrap()); - let label_right = label_right.map(|label| crate::util::str_to_cstr_vec(label).unwrap()); let component = unsafe { bitbox02_sys::trinary_choice_create( - crate::util::str_to_cstr_vec(message).unwrap().as_ptr(), // copied in C + str_to_cstr_vec(&message).unwrap().as_ptr(), // copied in C // copied in C label_left .as_ref() + .map(|label| str_to_cstr_vec(label).unwrap()) .map_or_else(core::ptr::null, |label| label.as_ptr()), // copied in C label_middle .as_ref() + .map(|label| str_to_cstr_vec(label).unwrap()) .map_or_else(core::ptr::null, |label| label.as_ptr()), // copied in C label_right .as_ref() + .map(|label| str_to_cstr_vec(label).unwrap()) .map_or_else(core::ptr::null, |label| label.as_ptr()), - Some(c_chosen_cb as _), - chosen_user_data, + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. core::ptr::null_mut(), // parent component, there is no parent. ) }; - Component { + + let mut component = Component { component, is_pushed: false, - on_drop: Some(Box::new(move || unsafe { - // Drop all callbacks. - drop(Box::from_raw(chosen_user_data as *mut TrinaryChoiceCb)); - })), - _p: PhantomData, - } + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } -pub fn confirm_transaction_address_create<'a, 'b>( - amount: &'a str, - address: &'a str, - callback: AcceptRejectCb<'b>, -) -> Component<'b> { - unsafe extern "C" fn c_callback(result: bool, user_data: *mut c_void) { - let callback = user_data as *mut AcceptRejectCb; - unsafe { (*callback)(result) }; +pub async fn confirm_transaction_address_create(amount: &str, address: &str) -> bool { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(result: bool, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(result); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } - let user_data = Box::into_raw(Box::new(callback)) as *mut c_void; let component = unsafe { bitbox02_sys::confirm_transaction_address_create( crate::util::str_to_cstr_vec(amount).unwrap().as_ptr(), // copied in C crate::util::str_to_cstr_vec(address).unwrap().as_ptr(), // copied in C - Some(c_callback as _), - user_data, + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + let mut component = Component { component, is_pushed: false, - on_drop: Some(Box::new(move || unsafe { - // Drop all callbacks. - drop(Box::from_raw(user_data as *mut AcceptRejectCb)); - })), - _p: PhantomData, - } + }; + component.screen_stack_push(); + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } -pub fn confirm_transaction_fee_create<'a, 'b>( - amount: &'a str, - fee: &'a str, - longtouch: bool, - callback: AcceptRejectCb<'b>, -) -> Component<'b> { - unsafe extern "C" fn c_callback(result: bool, user_data: *mut c_void) { - let callback = user_data as *mut AcceptRejectCb; - unsafe { (*callback)(result) }; +pub async fn confirm_transaction_fee_create(amount: &str, fee: &str, longtouch: bool) -> bool { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(result: bool, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(result); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } - let user_data = Box::into_raw(Box::new(callback)) as *mut c_void; let component = unsafe { bitbox02_sys::confirm_transaction_fee_create( crate::util::str_to_cstr_vec(amount).unwrap().as_ptr(), // copied in C crate::util::str_to_cstr_vec(fee).unwrap().as_ptr(), // copied in C longtouch, - Some(c_callback as _), - user_data, + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + let mut component = Component { component, is_pushed: false, - on_drop: Some(Box::new(move || unsafe { - // Drop all callbacks. - drop(Box::from_raw(user_data as *mut AcceptRejectCb)); - })), - _p: PhantomData, - } -} - -pub fn trinary_input_string_set_input(component: &mut Component, word: &str) { - unsafe { - bitbox02_sys::trinary_input_string_set_input( - component.component, - crate::util::str_to_cstr_vec(word).unwrap().as_ptr(), - ) - } + }; + component.screen_stack_push(); + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } pub fn screen_stack_pop_all() { @@ -442,7 +609,7 @@ pub fn screen_stack_pop_all() { } } -pub fn progress_create<'a>(title: &str) -> Component<'a> { +pub fn progress_create(title: &str) -> Component { let component = unsafe { bitbox02_sys::progress_create( crate::util::str_to_cstr_vec(title).unwrap().as_ptr(), // copied in C @@ -452,8 +619,6 @@ pub fn progress_create<'a>(title: &str) -> Component<'a> { Component { component, is_pushed: false, - on_drop: None, - _p: PhantomData, } } @@ -461,67 +626,109 @@ pub fn progress_set(component: &mut Component, progress: f32) { unsafe { bitbox02_sys::progress_set(component.component, progress) } } -pub fn empty_create<'a>() -> Component<'a> { +pub fn empty_create() -> Component { Component { component: unsafe { bitbox02_sys::empty_create() }, is_pushed: false, - on_drop: None, - _p: PhantomData, } } -pub fn unlock_animation_create<'a, F>(on_done: F) -> Component<'a> -where - // Callback must outlive component. - F: FnMut() + 'a, -{ - unsafe extern "C" fn c_on_done(param: *mut c_void) - where - F2: FnMut(), - { - // The callback is dropped afterwards. This is safe because - // this C callback is guaranteed to be called only once. - let mut on_done = unsafe { Box::from_raw(param as *mut F2) }; - on_done(); +pub async fn unlock_animation() { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option<()>, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(()); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } + let component = unsafe { bitbox02_sys::unlock_animation_create( - Some(c_on_done::), - Box::into_raw(Box::new(on_done)) as *mut _, // passed to c_on_done as `param`. + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + + let mut component = Component { component, is_pushed: false, - on_drop: None, - _p: PhantomData, - } + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } -pub fn orientation_arrows<'a, F>(on_done: F) -> Component<'a> -where - // Callback must outlive component. - F: FnMut(bool) + 'a, -{ - unsafe extern "C" fn c_on_done(upside_down: bool, param: *mut c_void) - where - F2: FnOnce(bool), - { - // The callback is dropped afterwards. This is safe because - // this C callback is guaranteed to be called only once. - let on_done = unsafe { Box::from_raw(param as *mut F2) }; - on_done(upside_down); +pub async fn choose_orientation() -> bool { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(upside_down: bool, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(upside_down); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } + } + let component = unsafe { bitbox02_sys::orientation_arrows_create( - Some(c_on_done::), - Box::into_raw(Box::new(on_done)) as *mut _, // passed to c_on_done as `param`. + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + + let mut component = Component { component, is_pushed: false, - on_drop: None, - _p: PhantomData, - } + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } diff --git a/src/rust/bitbox02/src/ui/ui_stub.rs b/src/rust/bitbox02/src/ui/ui_stub.rs index 5b409c62b6..8417b73f0a 100644 --- a/src/rust/bitbox02/src/ui/ui_stub.rs +++ b/src/rust/bitbox02/src/ui/ui_stub.rs @@ -22,18 +22,15 @@ pub use super::types::{ TrinaryChoiceCb, TrinaryInputStringParams, }; -use core::marker::PhantomData; - extern crate alloc; use alloc::string::String; -pub struct Component<'a> { +pub struct Component { is_pushed: bool, - _p: PhantomData<&'a ()>, } -impl Component<'_> { +impl Component { pub fn screen_stack_push(&mut self) { if self.is_pushed { panic!("component pushed twice"); @@ -42,7 +39,7 @@ impl Component<'_> { } } -impl Drop for Component<'_> { +impl Drop for Component { fn drop(&mut self) { if !self.is_pushed { panic!("component not pushed"); @@ -50,112 +47,63 @@ impl Drop for Component<'_> { } } -pub fn trinary_input_string_create<'a, F>( - _params: &TrinaryInputStringParams, - _confirm_callback: F, - _cancel_callback: Option>, -) -> Component<'a> -where - F: FnMut(zeroize::Zeroizing) + 'a, -{ +pub async fn trinary_input_string( + _params: &TrinaryInputStringParams<'_>, + _can_cancel: bool, + _preset: &str, +) -> Result, ()> { panic!("not used"); } -pub fn confirm_create<'a, F>(_params: &ConfirmParams, _result_callback: F) -> Component<'a> -where - F: FnMut(bool) + 'a, -{ +pub async fn confirm(_params: &ConfirmParams<'_>) -> bool { panic!("not used"); } pub fn screen_process() {} -pub fn status_create<'a, F>(_text: &str, _status_success: bool, _callback: F) -> Component<'a> -where - F: FnMut() + 'a, -{ +pub async fn status(_text: &str, _status_success: bool) { panic!("not used"); } -pub fn sdcard_create<'a, F>(_callback: F) -> Component<'a> -where - F: FnMut(bool) + 'a, -{ +pub async fn sdcard() -> bool { panic!("not used"); } -pub fn menu_create(_params: MenuParams<'_>) -> Component<'_> { +pub async fn menu_create(_params: MenuParams<'_>) -> Result { panic!("not used"); } -pub fn trinary_choice_create<'a>( - _message: &'a str, - _label_left: Option<&'a str>, - _label_middle: Option<&'a str>, - _label_right: Option<&'a str>, - _chosen_callback: TrinaryChoiceCb, -) -> Component<'a> { +pub async fn trinary_choice( + _message: &str, + _label_left: Option<&str>, + _label_middle: Option<&str>, + _label_right: Option<&str>, +) -> TrinaryChoice { panic!("not used") } -pub fn confirm_transaction_address_create<'a, 'b>( - _amount: &'a str, - _address: &'a str, - _callback: AcceptRejectCb<'b>, -) -> Component<'b> { +pub async fn confirm_transaction_address_create(_amount: &str, _address: &str) -> bool { panic!("not used"); } -pub fn confirm_transaction_fee_create<'a, 'b>( - _amount: &'a str, - _fee: &'a str, - _longtouch: bool, - _callback: AcceptRejectCb<'b>, -) -> Component<'b> { +pub async fn confirm_transaction_fee_create(_amount: &str, _fee: &str, _longtouch: bool) -> bool { panic!("not used"); } -pub fn trinary_input_string_set_input(_component: &mut Component, _word: &str) { - panic!("not used") -} - pub fn screen_stack_pop_all() {} -pub fn progress_create<'a>(_title: &str) -> Component<'a> { - Component { - is_pushed: false, - _p: PhantomData, - } +pub fn progress_create(_title: &str) -> Component { + Component { is_pushed: false } } pub fn progress_set(_component: &mut Component, _progress: f32) {} -pub fn empty_create<'a>() -> Component<'a> { - Component { - is_pushed: false, - _p: PhantomData, - } +pub fn empty_create() -> Component { + Component { is_pushed: false } } -pub fn unlock_animation_create<'a, F>(mut on_done: F) -> Component<'a> -where - F: FnMut() + 'a, -{ - on_done(); - Component { - is_pushed: false, - _p: PhantomData, - } -} +pub async fn unlock_animation() {} -pub fn orientation_arrows<'a, F>(on_done: F) -> Component<'a> -where - // Callback must outlive component. - F: FnOnce(bool) + 'a, -{ - on_done(false); - Component { - is_pushed: false, - _p: PhantomData, - } +pub async fn choose_orientation() -> bool { + false } diff --git a/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs b/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs index 69a4b22c7c..6697d2945f 100644 --- a/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs +++ b/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs @@ -19,18 +19,15 @@ pub use super::types::{ TrinaryChoiceCb, TrinaryInputStringParams, }; -use core::marker::PhantomData; - extern crate alloc; use alloc::string::String; -pub struct Component<'a> { +pub struct Component { is_pushed: bool, - _p: PhantomData<&'a ()>, } -impl<'a> Component<'a> { +impl Component { pub fn screen_stack_push(&mut self) { if self.is_pushed { panic!("component pushed twice"); @@ -39,7 +36,7 @@ impl<'a> Component<'a> { } } -impl<'a> Drop for Component<'a> { +impl Drop for Component { fn drop(&mut self) { if !self.is_pushed { panic!("component not pushed"); @@ -47,115 +44,68 @@ impl<'a> Drop for Component<'a> { } } -pub fn trinary_input_string_create<'a, F>( - params: &TrinaryInputStringParams, - mut confirm_callback: F, - _cancel_callback: Option>, -) -> Component<'a> -where - F: FnMut(zeroize::Zeroizing) + 'a, -{ +pub async fn trinary_input_string( + params: &TrinaryInputStringParams<'_>, + _can_cancel: bool, + _preset: &str, +) -> Result, ()> { crate::print_stdout(&format!( "ENTER SCREEN START\nTITLE: {}\nENTER SCREEN END\n", params.title )); - confirm_callback(zeroize::Zeroizing::new("".into())); - Component { - is_pushed: false, - _p: PhantomData, - } + Ok(zeroize::Zeroizing::new("".into())) } -pub fn confirm_create<'a, F>(params: &ConfirmParams, mut result_callback: F) -> Component<'a> -where - F: FnMut(bool) + 'a, -{ +pub async fn confirm(params: &ConfirmParams<'_>) -> bool { crate::print_stdout(&format!( "CONFIRM SCREEN START\nTITLE: {}\nBODY: {}\nCONFIRM SCREEN END\n", params.title, params.body )); - result_callback(true); - Component { - is_pushed: false, - _p: PhantomData, - } + true } pub fn screen_process() {} -pub fn status_create<'a, F>(text: &str, _status_success: bool, mut callback: F) -> Component<'a> -where - F: FnMut() + 'a, -{ +pub async fn status(text: &str, _status_success: bool) { crate::print_stdout(&format!( "STATUS SCREEN START\nTITLE: {}\nSTATUS SCREEN END\n", text, )); - callback(); - Component { - is_pushed: false, - _p: PhantomData, - } } -pub fn sdcard_create<'a, F>(mut callback: F) -> Component<'a> -where - F: FnMut(bool) + 'a, -{ - callback(true); - Component { - is_pushed: false, - _p: PhantomData, - } +pub async fn sdcard() -> bool { + true } -pub fn menu_create(_params: MenuParams<'_>) -> Component<'_> { +pub async fn menu_create(_params: MenuParams<'_>) -> Result { panic!("not implemented"); } -pub fn trinary_choice_create<'a>( - _message: &'a str, - _label_left: Option<&'a str>, - _label_middle: Option<&'a str>, - _label_right: Option<&'a str>, - _chosen_callback: TrinaryChoiceCb, -) -> Component<'a> { +pub async fn trinary_choice( + _message: &str, + _label_left: Option<&str>, + _label_middle: Option<&str>, + _label_right: Option<&str>, +) -> TrinaryChoice { panic!("not implemented") } -pub fn confirm_transaction_address_create<'a, 'b>( - _amount: &'a str, - _address: &'a str, - mut callback: AcceptRejectCb<'b>, -) -> Component<'b> { +pub async fn confirm_transaction_address_create(_amount: &str, _address: &str) -> bool { crate::print_stdout(&format!( "CONFIRM TRANSACTION ADDRESS SCREEN START\nAMOUNT: {}\nADDRESS: {}\nCONFIRM TRANSACTION ADDRESS SCREEN END\n", _amount, _address )); - callback(true); - Component { - is_pushed: false, - _p: PhantomData, - } + true } -pub fn confirm_transaction_fee_create<'a, 'b>( - _amount: &'a str, - _fee: &'a str, - _longtouch: bool, - mut callback: AcceptRejectCb<'b>, -) -> Component<'b> { +pub async fn confirm_transaction_fee_create(_amount: &str, _fee: &str, _longtouch: bool) -> bool { crate::print_stdout(&format!( "CONFIRM TRANSACTION FEE SCREEN START\nAMOUNT: {}\nFEE: {}\nCONFIRM TRANSACTION FEE SCREEN END\n", _amount, _fee )); - callback(true); - Component { - is_pushed: false, - _p: PhantomData, - } + true } pub fn trinary_input_string_set_input(_component: &mut Component, _word: &str) { @@ -164,41 +114,18 @@ pub fn trinary_input_string_set_input(_component: &mut Component, _word: &str) { pub fn screen_stack_pop_all() {} -pub fn progress_create<'a>(_title: &str) -> Component<'a> { - Component { - is_pushed: false, - _p: PhantomData, - } +pub fn progress_create(_title: &str) -> Component { + Component { is_pushed: false } } pub fn progress_set(_component: &mut Component, _progress: f32) {} -pub fn empty_create<'a>() -> Component<'a> { - Component { - is_pushed: false, - _p: PhantomData, - } +pub fn empty_create() -> Component { + Component { is_pushed: false } } -pub fn unlock_animation_create<'a, F>(mut on_done: F) -> Component<'a> -where - F: FnMut() + 'a, -{ - on_done(); - Component { - is_pushed: false, - _p: PhantomData, - } -} +pub async fn unlock_animation() {} -pub fn orientation_arrows<'a, F>(on_done: F) -> Component<'a> -where - // Callback must outlive component. - F: FnOnce(bool) + 'a, -{ - on_done(false); - Component { - is_pushed: false, - _p: PhantomData, - } +pub async fn choose_orientation() -> bool { + false } diff --git a/src/firmware_main_loop.h b/src/rust/bitbox02/src/usb.rs similarity index 72% rename from src/firmware_main_loop.h rename to src/rust/bitbox02/src/usb.rs index 88272135c8..2bde58dadf 100644 --- a/src/firmware_main_loop.h +++ b/src/rust/bitbox02/src/usb.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Shift Cryptosecurity AG +// Copyright 2025 Shift Crypto AG // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,14 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef _FIRMWARE_MAIN_LOOP_H_ -#define _FIRMWARE_MAIN_LOOP_H_ +pub use bitbox02_sys::USB_REPORT_SIZE; -#include - -/** - * Runs the main UI of the bitbox. - */ -void firmware_main_loop(void); - -#endif +pub fn start() { + unsafe { + bitbox02_sys::usb_start(); + } +} diff --git a/src/rust/bitbox02/src/usb_processing.rs b/src/rust/bitbox02/src/usb_processing.rs index 0b52c04d0f..15f6e5f32f 100644 --- a/src/rust/bitbox02/src/usb_processing.rs +++ b/src/rust/bitbox02/src/usb_processing.rs @@ -24,7 +24,20 @@ pub fn init() { unsafe { bitbox02_sys::usb_processing_init() } } -#[cfg(feature = "simulator-graphical")] pub fn process_hww() { unsafe { bitbox02_sys::usb_processing_process(bitbox02_sys::usb_processing_hww()) } } + +#[cfg(feature = "app-u2f")] +pub fn process_u2f() { + unsafe { bitbox02_sys::usb_processing_process(bitbox02_sys::usb_processing_u2f()) } +} + +#[cfg(feature = "app-u2f")] +pub fn locked_u2f() -> bool { + unsafe { bitbox02_sys::usb_processing_locked(bitbox02_sys::usb_processing_u2f()) } +} + +pub fn unlock() { + unsafe { bitbox02_sys::usb_processing_unlock() } +} diff --git a/src/rust/util/src/lib.rs b/src/rust/util/src/lib.rs index 0c784b14ed..e2cfb15caf 100644 --- a/src/rust/util/src/lib.rs +++ b/src/rust/util/src/lib.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(not(test), no_std)] +#![cfg_attr(not(feature = "testing"), no_std)] pub mod ascii; pub mod bb02_async; pub mod bip32; diff --git a/src/rust/util/src/log.rs b/src/rust/util/src/log.rs index d50d6d905e..62af7f27b0 100644 --- a/src/rust/util/src/log.rs +++ b/src/rust/util/src/log.rs @@ -4,8 +4,21 @@ pub use ::rtt_target; /// Macro to log over RTT if `rtt` feature is set, otherwise noop #[macro_export] +#[cfg(all(feature = "rtt", target_os = "none"))] +macro_rules! log { + ($($arg:tt)*) => { {$crate::log::rtt_target::rprintln!($($arg)*) }}; +} + +#[macro_export] +#[cfg(all(not(feature = "rtt"), target_os = "none"))] +macro_rules! log { + ($($arg:tt)*) => {}; +} + +#[macro_export] +#[cfg(not(target_os = "none"))] macro_rules! log { - ($($arg:tt)*) => { #[cfg(feature="rtt")] {$crate::log::rtt_target::rprintln!($($arg)*) }}; + ($($arg:tt)*) => {std::println!($($arg)*) }; } // Make log macro usable in crate diff --git a/src/usb/class/hid/hww/hid_hww.c b/src/usb/class/hid/hww/hid_hww.c index 16ba518033..107224dbc0 100644 --- a/src/usb/class/hid/hww/hid_hww.c +++ b/src/usb/class/hid/hww/hid_hww.c @@ -57,6 +57,7 @@ static struct usbdc_handler _request_handler = {NULL, (FUNC_PTR)_request}; static volatile bool _send_busy = false; static volatile bool _has_data = false; static volatile bool _request_in_flight = false; +static uint8_t _write_buf[64] __attribute__((aligned(4))); // First time this function is called it initiates a transfer. Call it multiple times to poll for // completion. Once it returns true, there is data in the buffer. @@ -85,7 +86,8 @@ bool hid_hww_write_poll(const uint8_t* data) if (_send_busy) { return false; } - if (hid_write(&_func_data, data, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { + memcpy(_write_buf, data, USB_HID_REPORT_OUT_SIZE); + if (hid_write(&_func_data, _write_buf, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { _send_busy = true; return true; } diff --git a/src/usb/class/hid/u2f/hid_u2f.c b/src/usb/class/hid/u2f/hid_u2f.c index 0aa3ecb9f7..e96da48fad 100644 --- a/src/usb/class/hid/u2f/hid_u2f.c +++ b/src/usb/class/hid/u2f/hid_u2f.c @@ -45,6 +45,7 @@ static uint8_t _report_descriptor[] = {USB_DESC_U2F_REPORT}; static volatile bool _send_busy = false; static volatile bool _has_data = false; static volatile bool _request_in_flight = false; +static uint8_t _write_buf[64] __attribute__((aligned(4))); /** * The USB device core request handler callback for the U2F interface. @@ -87,7 +88,8 @@ bool hid_u2f_write_poll(const uint8_t* data) if (_send_busy) { return false; } - if (hid_write(&_func_data, data, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { + memcpy(_write_buf, data, USB_HID_REPORT_OUT_SIZE); + if (hid_write(&_func_data, _write_buf, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { _send_busy = true; return true; } diff --git a/test/simulator-graphical/Cargo.lock b/test/simulator-graphical/Cargo.lock index 492cf0afc4..df9c028158 100644 --- a/test/simulator-graphical/Cargo.lock +++ b/test/simulator-graphical/Cargo.lock @@ -183,6 +183,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "175571dd1d178ced59193a6fc02dde1b972eb0bc56c892cde9beeceac5bf0f6b" +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "atomic-waker" version = "1.1.2" @@ -297,6 +303,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bitbox02-executor" +version = "0.1.0" +dependencies = [ + "async-task", + "concurrent-queue", + "critical-section", + "futures-lite", + "pin-project-lite", +] + [[package]] name = "bitbox02-noise" version = "0.1.0" @@ -315,6 +332,7 @@ dependencies = [ "bip39", "bitbox-aes", "bitbox02", + "bitbox02-executor", "bitbox02-noise", "bitcoin", "bitcoin_hashes", diff --git a/test/simulator-graphical/src/main.rs b/test/simulator-graphical/src/main.rs index 6f0bd5e8c3..0f659e7bb4 100644 --- a/test/simulator-graphical/src/main.rs +++ b/test/simulator-graphical/src/main.rs @@ -51,6 +51,9 @@ use tracing_subscriber::{EnvFilter, filter::LevelFilter, fmt, prelude::*}; use bitbox02::ui::ugui::UG_COLOR; +// Explicitly link library +use bitbox02_rust_c as _; + static BG: &[u8; 325362] = include_bytes!("../bg.png"); const MARGIN: usize = 20; @@ -669,7 +672,6 @@ impl ApplicationHandler for App { } } // Business logic - unsafe { bitbox02_rust_c::workflow::rust_workflow_spin() } bitbox02_rust::async_usb::spin(); bitbox02::usb_processing::process_hww(); bitbox02::screen::process(); diff --git a/test/simulator/simulator.c b/test/simulator/simulator.c index 271ec1a580..dc9dc5875d 100644 --- a/test/simulator/simulator.c +++ b/test/simulator/simulator.c @@ -67,7 +67,6 @@ static void send_usb_message_socket(void) static void simulate_firmware_execution(const uint8_t* input) { usb_packet_process((const USB_FRAME*)input); - rust_workflow_spin(); rust_async_usb_spin(); usb_processing_process(usb_processing_hww()); }