diff --git a/.gitattributes b/.gitattributes index c3b959bffd..ce1d6cd14e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,2 @@ /py/bitbox02/bitbox02/generated/* linguist-generated=true -/src/rust/vendor/** linguist-generated=true +/external/vendor/** linguist-generated=true diff --git a/external/vendor/async-task/.cargo-checksum.json b/external/vendor/async-task/.cargo-checksum.json new file mode 100644 index 0000000000..09c77268b8 --- /dev/null +++ b/external/vendor/async-task/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"06e5796b3db8767807a59ab26c05cc4cd16b3be327edea4c6407f172db41ce7b","CHANGELOG.md":"8433fdce250bdc1864320a7eaab940261a9d74c9fdd42d97591d4708a81831f6","Cargo.lock":"724f4923c3a914b938ee5769bbf9bc35cd7261d129406fae00777a5e66e79416","Cargo.toml":"1ee0e30526e13349dafcd286676747fbbe1b6e876ec28dafa735ae0f603600e7","Cargo.toml.orig":"d94de6be0f8ab5af18a17337a6d93c6adeb47f28cf917b9ba0c709cff9095390","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"91a65b44a1692a16fa2d2f5bb424e78177f0c0ca0051e93db20443568377d5e5","benches/spawn.rs":"9272992dbc058734be04e4b1c0c92fa16f62eebe062c2ce74bbedfae1ce36689","examples/spawn-local.rs":"729d6b8ef55fad9387e8912567160766fb00c3880a7acbdcab7316aefe7811c3","examples/spawn-on-thread.rs":"90c5ffcbe11b847a9201a83ac2f036bf1721dfe971708781a0769ef8354e87c9","examples/spawn.rs":"360c86d609eea98333ba6284ebf8eeb73acc651f07d30b0dd5879757b4bf6125","examples/with-metadata.rs":"c5cb1f101d7320df6d7b6e3a82d5483b94b2e47523387a910facf5c390e535e3","src/header.rs":"d7486c5528b7fb4b372e66e73923747efe052d1318edc6a0797f00fa20b0b2e3","src/lib.rs":"195df753931d1259f6325789f111e79b833432c2925b7f8a90d94ab82b6cd9fe","src/raw.rs":"5879172e761591fee44293537ed134f794c42c873afdd5ccb1107a8dc7ccce6c","src/runnable.rs":"e12bec98bc1aa2f10194c85c7b022807cd5e1abd95a37db93bd5b06ced8d1a28","src/state.rs":"73ec4b98e8519faad882c1ee19711066a9e2a9b2cf9441436f145c8597e2b93d","src/task.rs":"8899dc897b21220a19134ae3755eefefeda55f18a56e32145c2a97d69be60fb6","src/utils.rs":"bc6a0073b07f50f3495962b77afd64c38a0b4ec4e5f2d7e875f72e92254a7dd3","tests/basic.rs":"081729ff928214edcdc2550644708e2571f127e59afc24fdc8af94e2e2cc441c","tests/cancel.rs":"6ffd2e52e3846e20b5221b1509fe718e187652302e8723c9751c1c7921109201","tests/join.rs":"47ae65d842d658a0d8f821535e67160a906f8182373f1780f364314371f99bae","tests/metadata.rs":"33b1d406d4f834671524cbc0c27edb6d6fb95ef16440c24329dfb1aff0db5e76","tests/panic.rs":"d5bd73f8d697277ed0a8193769e3a88989285deff08a111f3b149fd1aa760e65","tests/ready.rs":"45c8562bbbe3837f22129b42ffef734be4572366ff3c3ce6aae1e593f558d429","tests/waker_panic.rs":"6f1ac597ab8df2a8452eface16dec48a150d87770fd6afc3965f36f6c84a7dbb","tests/waker_pending.rs":"8b65a64d00fb3f2e33b8ed9db296b9a2aa8110a44eba7a899bab8dfecb902f5a","tests/waker_ready.rs":"abbc78ecb291e894f6805b18ca3c2945b3f2bc9da6ec918ffa5ab9d27a759b5a"},"package":"8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de"} \ No newline at end of file diff --git a/external/vendor/async-task/.cargo_vcs_info.json b/external/vendor/async-task/.cargo_vcs_info.json new file mode 100644 index 0000000000..0f2f0f8925 --- /dev/null +++ b/external/vendor/async-task/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "3065c372e1ef1611230195ad7f3aae80ffde8261" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/async-task/CHANGELOG.md b/external/vendor/async-task/CHANGELOG.md new file mode 100644 index 0000000000..0324dca415 --- /dev/null +++ b/external/vendor/async-task/CHANGELOG.md @@ -0,0 +1,112 @@ +# Version 4.7.1 + +- Improve the panic message for when a task is polled after completion. (#73) + +# Version 4.7.0 + +- Add `from_raw` and `into_raw` functions for `Runnable` to ease passing it + across an FFI boundary. (#65) + +# Version 4.6.0 + +- Bump MSRV to 1.57. (#63) +- Task layout computation failures are now a compile-time error instead of a + runtime abort. (#63) + +# Version 4.5.0 + +- Add a `portable-atomic` feature that enables the usage of fallback primitives for CPUs without atomics. (#58) + +# Version 4.4.1 + +- Clarify safety documentation for `spawn_unchecked`. (#49) + +# Version 4.4.0 + +- Ensure that the allocation doesn't exceed `isize::MAX` (#32) +- Add `FallibleTask::is_finished()` (#34) +- Add a metadata generic parameter to tasks (#33) +- Add panic propagation to tasks (#37) +- Add a way to tell if the task was woken while running from the schedule function (#42) + +# Version 4.3.0 + +- Bump MSRV to Rust 1.47. (#30) +- Evaluate the layouts for the tasks at compile time. (#30) +- Add layout_info field to TaskVTable so that debuggers can decode raw tasks. (#29) + +# Version 4.2.0 + +- Add `Task::is_finished`. (#19) + +# Version 4.1.0 + +- Add `FallibleTask`. (#21) + +# Version 4.0.3 + +- Document the return value of `Runnable::run()` better. + +# Version 4.0.2 + +- Nits in the docs. + +# Version 4.0.1 + +- Nits in the docs. + +# Version 4.0.0 + +- Rename `Task` to `Runnable`. +- Rename `JoinHandle` to `Task`. +- Cancel `Task` on drop. +- Add `Task::detach()` and `Task::cancel()`. +- Add `spawn_unchecked()`. + +# Version 3.0.0 + +- Use `ThreadId` in `spawn_local` because OS-provided IDs can get recycled. +- Add `std` feature to `Cargo.toml`. + +# Version 2.1.1 + +- Allocate large futures on the heap. + +# Version 2.1.0 + +- `JoinHandle` now only evaluates after the task's future has been dropped. + +# Version 2.0.0 + +- Return `true` in `Task::run()`. + +# Version 1.3.1 + +- Make `spawn_local` available only on unix and windows. + +# Version 1.3.0 + +- Add `waker_fn`. + +# Version 1.2.1 + +- Add the `no-std` category to the package. + +# Version 1.2.0 + +- The crate is now marked with `#![no_std]`. +- Add `Task::waker` and `JoinHandle::waker`. +- Add `Task::into_raw` and `Task::from_raw`. + +# Version 1.1.1 + +- Fix a use-after-free bug where the schedule function is dropped while running. + +# Version 1.1.0 + +- If a task is dropped or canceled outside the `run` method, it gets re-scheduled. +- Add `spawn_local` constructor. + +# Version 1.0.0 + +- Initial release diff --git a/external/vendor/async-task/Cargo.lock b/external/vendor/async-task/Cargo.lock new file mode 100644 index 0000000000..109a5808cb --- /dev/null +++ b/external/vendor/async-task/Cargo.lock @@ -0,0 +1,665 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "async-channel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +dependencies = [ + "concurrent-queue", + "event-listener 4.0.1", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.2.0", + "async-task 4.7.0", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] + +[[package]] +name = "async-fs" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc19683171f287921f2405677dd2ed2549c3b3bda697a563ebc3a121ace2aba1" +dependencies = [ + "async-lock 3.2.0", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-io" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" +dependencies = [ + "async-lock 3.2.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +dependencies = [ + "event-listener 4.0.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-process" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a53fc6301894e04a92cb2584fedde80cb25ba8e02d9dc39d4a87d036e22f397d" +dependencies = [ + "async-channel", + "async-io", + "async-lock 3.2.0", + "async-signal", + "async-task 4.7.0", + "blocking", + "cfg-if", + "event-listener 5.3.0", + "futures-lite", + "rustix", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-signal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +dependencies = [ + "async-io", + "async-lock 2.8.0", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + +[[package]] +name = "async-task" +version = "4.7.1" +dependencies = [ + "atomic-waker", + "easy-parallel", + "flaky_test", + "flume", + "futures-lite", + "once_cell", + "pin-project-lite", + "portable-atomic", + "smol", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel", + "async-lock 3.2.0", + "async-task 4.7.0", + "fastrand", + "futures-io", + "futures-lite", + "piper", + "tracing", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "easy-parallel" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2afbb9b0aef60e4f0d2b18129b6c0dff035a6f7dbbd17c2f38c1432102ee223c" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f2cdcf274580f2d63697192d744727b3198894b1bf02923643bf59e2c26712" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.1", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "flaky_test" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046caa1f23d7f751fc4ead3d6669a77fa5fc6cf6074960ddeb6a0b0a5b83c8da" +dependencies = [ + "flaky_test_impl", + "futures-util", +] + +[[package]] +name = "flaky_test_impl" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e35909c2f0552fdae5b40f1e95a7da12afb58c1f2f455a12c216c58d869abe" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "spin", +] + +[[package]] +name = "futures-core" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" + +[[package]] +name = "futures-io" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" + +[[package]] +name = "futures-lite" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "libc" +version = "0.2.151" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" + +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + +[[package]] +name = "polling" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustix" +version = "0.38.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smol" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e635339259e51ef85ac7aa29a1cd991b957047507288697a690e80ab97d07cad" +dependencies = [ + "async-channel", + "async-executor", + "async-fs", + "async-io", + "async-lock 3.2.0", + "async-net", + "async-process", + "blocking", + "futures-lite", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" diff --git a/external/vendor/async-task/Cargo.toml b/external/vendor/async-task/Cargo.toml new file mode 100644 index 0000000000..7719f420ac --- /dev/null +++ b/external/vendor/async-task/Cargo.toml @@ -0,0 +1,67 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.57" +name = "async-task" +version = "4.7.1" +authors = ["Stjepan Glavina "] +exclude = ["/.*"] +description = "Task abstraction for building executors" +readme = "README.md" +keywords = [ + "futures", + "task", + "executor", + "spawn", +] +categories = [ + "asynchronous", + "concurrency", + "no-std", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/async-task" + +[dependencies.portable-atomic] +version = "1" +optional = true +default-features = false + +[dev-dependencies.atomic-waker] +version = "1" + +[dev-dependencies.easy-parallel] +version = "3" + +[dev-dependencies.flaky_test] +version = "0.2" + +[dev-dependencies.flume] +version = "0.11" +default-features = false + +[dev-dependencies.futures-lite] +version = "2.0.0" + +[dev-dependencies.once_cell] +version = "1" + +[dev-dependencies.pin-project-lite] +version = "0.2.10" + +[dev-dependencies.smol] +version = "2" + +[features] +default = ["std"] +std = [] diff --git a/external/vendor/async-task/Cargo.toml.orig b/external/vendor/async-task/Cargo.toml.orig new file mode 100644 index 0000000000..7779a6a34e --- /dev/null +++ b/external/vendor/async-task/Cargo.toml.orig @@ -0,0 +1,37 @@ +[package] +name = "async-task" +# When publishing a new version: +# - Update CHANGELOG.md +# - Create "v4.x.y" git tag +version = "4.7.1" +authors = ["Stjepan Glavina "] +edition = "2021" +rust-version = "1.57" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/async-task" +description = "Task abstraction for building executors" +keywords = ["futures", "task", "executor", "spawn"] +categories = ["asynchronous", "concurrency", "no-std"] +exclude = ["/.*"] + +[features] +default = ["std"] +std = [] + +[dependencies] +# Uses portable-atomic polyfill atomics on targets without them +portable-atomic = { version = "1", optional = true, default-features = false } + +[dev-dependencies] +atomic-waker = "1" +easy-parallel = "3" +flaky_test = "0.2" +flume = { version = "0.11", default-features = false } +futures-lite = "2.0.0" +once_cell = "1" +pin-project-lite = "0.2.10" +smol = "2" + +# rewrite dependencies to use the this version of async-task when running tests +[patch.crates-io] +async-task = { path = "." } diff --git a/external/vendor/async-task/LICENSE-APACHE b/external/vendor/async-task/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/async-task/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/async-task/LICENSE-MIT b/external/vendor/async-task/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/async-task/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/async-task/README.md b/external/vendor/async-task/README.md new file mode 100644 index 0000000000..7044c9dfd9 --- /dev/null +++ b/external/vendor/async-task/README.md @@ -0,0 +1,69 @@ +# async-task + +[![Build](https://github.com/smol-rs/async-task/workflows/Build%20and%20test/badge.svg)]( +https://github.com/smol-rs/async-task/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/async-task) +[![Cargo](https://img.shields.io/crates/v/async-task.svg)]( +https://crates.io/crates/async-task) +[![Documentation](https://docs.rs/async-task/badge.svg)]( +https://docs.rs/async-task) + +Task abstraction for building executors. + +To spawn a future onto an executor, we first need to allocate it on the heap and keep some +state attached to it. The state indicates whether the future is ready for polling, waiting to +be woken up, or completed. Such a stateful future is called a *task*. + +All executors have a queue that holds scheduled tasks: + +```rust +let (sender, receiver) = flume::unbounded(); +``` + +A task is created using either `spawn()`, `spawn_local()`, or `spawn_unchecked()` which +return a `Runnable` and a `Task`: + +```rust +// A future that will be spawned. +let future = async { 1 + 2 }; + +// A function that schedules the task when it gets woken up. +let schedule = move |runnable| sender.send(runnable).unwrap(); + +// Construct a task. +let (runnable, task) = async_task::spawn(future, schedule); + +// Push the task into the queue by invoking its schedule function. +runnable.schedule(); +``` + +The `Runnable` is used to poll the task's future, and the `Task` is used to await its +output. + +Finally, we need a loop that takes scheduled tasks from the queue and runs them: + +```rust +for runnable in receiver { + runnable.run(); +} +``` + +Method `run()` polls the task's future once. Then, the `Runnable` +vanishes and only reappears when its `Waker` wakes the task, thus +scheduling it to be run again. + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/async-task/benches/spawn.rs b/external/vendor/async-task/benches/spawn.rs new file mode 100644 index 0000000000..75d059ecec --- /dev/null +++ b/external/vendor/async-task/benches/spawn.rs @@ -0,0 +1,22 @@ +#![feature(test)] + +extern crate test; + +use smol::future; +use test::Bencher; + +#[bench] +fn task_create(b: &mut Bencher) { + b.iter(|| { + let _ = async_task::spawn(async {}, drop); + }); +} + +#[bench] +fn task_run(b: &mut Bencher) { + b.iter(|| { + let (runnable, task) = async_task::spawn(async {}, drop); + runnable.run(); + future::block_on(task); + }); +} diff --git a/external/vendor/async-task/examples/spawn-local.rs b/external/vendor/async-task/examples/spawn-local.rs new file mode 100644 index 0000000000..a9da1b4de9 --- /dev/null +++ b/external/vendor/async-task/examples/spawn-local.rs @@ -0,0 +1,73 @@ +//! A simple single-threaded executor that can spawn non-`Send` futures. + +use std::cell::Cell; +use std::future::Future; +use std::rc::Rc; + +use async_task::{Runnable, Task}; + +thread_local! { + // A queue that holds scheduled tasks. + static QUEUE: (flume::Sender, flume::Receiver) = flume::unbounded(); +} + +/// Spawns a future on the executor. +fn spawn(future: F) -> Task +where + F: Future + 'static, + T: 'static, +{ + // Create a task that is scheduled by pushing itself into the queue. + let schedule = |runnable| QUEUE.with(|(s, _)| s.send(runnable).unwrap()); + let (runnable, task) = async_task::spawn_local(future, schedule); + + // Schedule the task by pushing it into the queue. + runnable.schedule(); + + task +} + +/// Runs a future to completion. +fn run(future: F) -> T +where + F: Future + 'static, + T: 'static, +{ + // Spawn a task that sends its result through a channel. + let (s, r) = flume::unbounded(); + spawn(async move { drop(s.send(future.await)) }).detach(); + + loop { + // If the original task has completed, return its result. + if let Ok(val) = r.try_recv() { + return val; + } + + // Otherwise, take a task from the queue and run it. + QUEUE.with(|(_, r)| r.recv().unwrap().run()); + } +} + +fn main() { + let val = Rc::new(Cell::new(0)); + + // Run a future that increments a non-`Send` value. + run({ + let val = val.clone(); + async move { + // Spawn a future that increments the value. + let task = spawn({ + let val = val.clone(); + async move { + val.set(dbg!(val.get()) + 1); + } + }); + + val.set(dbg!(val.get()) + 1); + task.await; + } + }); + + // The value should be 2 at the end of the program. + dbg!(val.get()); +} diff --git a/external/vendor/async-task/examples/spawn-on-thread.rs b/external/vendor/async-task/examples/spawn-on-thread.rs new file mode 100644 index 0000000000..b0ec2f20a7 --- /dev/null +++ b/external/vendor/async-task/examples/spawn-on-thread.rs @@ -0,0 +1,53 @@ +//! A function that runs a future to completion on a dedicated thread. + +use std::future::Future; +use std::sync::Arc; +use std::thread; + +use async_task::Task; +use smol::future; + +/// Spawns a future on a new dedicated thread. +/// +/// The returned task can be used to await the output of the future. +fn spawn_on_thread(future: F) -> Task +where + F: Future + Send + 'static, + T: Send + 'static, +{ + // Create a channel that holds the task when it is scheduled for running. + let (sender, receiver) = flume::unbounded(); + let sender = Arc::new(sender); + let s = Arc::downgrade(&sender); + + // Wrap the future into one that disconnects the channel on completion. + let future = async move { + // When the inner future completes, the sender gets dropped and disconnects the channel. + let _sender = sender; + future.await + }; + + // Create a task that is scheduled by sending it into the channel. + let schedule = move |runnable| s.upgrade().unwrap().send(runnable).unwrap(); + let (runnable, task) = async_task::spawn(future, schedule); + + // Schedule the task by sending it into the channel. + runnable.schedule(); + + // Spawn a thread running the task to completion. + thread::spawn(move || { + // Keep taking the task from the channel and running it until completion. + for runnable in receiver { + runnable.run(); + } + }); + + task +} + +fn main() { + // Spawn a future on a dedicated thread. + future::block_on(spawn_on_thread(async { + println!("Hello, world!"); + })); +} diff --git a/external/vendor/async-task/examples/spawn.rs b/external/vendor/async-task/examples/spawn.rs new file mode 100644 index 0000000000..3a648114c9 --- /dev/null +++ b/external/vendor/async-task/examples/spawn.rs @@ -0,0 +1,48 @@ +//! A simple single-threaded executor. + +use std::future::Future; +use std::panic::catch_unwind; +use std::thread; + +use async_task::{Runnable, Task}; +use once_cell::sync::Lazy; +use smol::future; + +/// Spawns a future on the executor. +fn spawn(future: F) -> Task +where + F: Future + Send + 'static, + T: Send + 'static, +{ + // A queue that holds scheduled tasks. + static QUEUE: Lazy> = Lazy::new(|| { + let (sender, receiver) = flume::unbounded::(); + + // Start the executor thread. + thread::spawn(|| { + for runnable in receiver { + // Ignore panics inside futures. + let _ignore_panic = catch_unwind(|| runnable.run()); + } + }); + + sender + }); + + // Create a task that is scheduled by pushing it into the queue. + let schedule = |runnable| QUEUE.send(runnable).unwrap(); + let (runnable, task) = async_task::spawn(future, schedule); + + // Schedule the task by pushing it into the queue. + runnable.schedule(); + + task +} + +fn main() { + // Spawn a future and await its result. + let task = spawn(async { + println!("Hello, world!"); + }); + future::block_on(task); +} diff --git a/external/vendor/async-task/examples/with-metadata.rs b/external/vendor/async-task/examples/with-metadata.rs new file mode 100644 index 0000000000..ed84e31f25 --- /dev/null +++ b/external/vendor/async-task/examples/with-metadata.rs @@ -0,0 +1,145 @@ +//! A single threaded executor that uses shortest-job-first scheduling. + +use std::cell::RefCell; +use std::collections::BinaryHeap; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::thread; +use std::time::{Duration, Instant}; +use std::{cell::Cell, future::Future}; + +use async_task::{Builder, Runnable, Task}; +use pin_project_lite::pin_project; +use smol::{channel, future}; + +struct ByDuration(Runnable); + +impl ByDuration { + fn duration(&self) -> Duration { + self.0.metadata().inner.get() + } +} + +impl PartialEq for ByDuration { + fn eq(&self, other: &Self) -> bool { + self.duration() == other.duration() + } +} + +impl Eq for ByDuration {} + +impl PartialOrd for ByDuration { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ByDuration { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.duration().cmp(&other.duration()).reverse() + } +} + +pin_project! { + #[must_use = "futures do nothing unless you `.await` or poll them"] + struct MeasureRuntime<'a, F> { + #[pin] + f: F, + duration: &'a Cell + } +} + +impl<'a, F: Future> Future for MeasureRuntime<'a, F> { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let duration_cell: &Cell = this.duration; + let start = Instant::now(); + let res = F::poll(this.f, cx); + let new_duration = Instant::now() - start; + duration_cell.set(duration_cell.get() / 2 + new_duration / 2); + res + } +} + +pub struct DurationMetadata { + inner: Cell, +} + +thread_local! { + // A queue that holds scheduled tasks. + static QUEUE: RefCell> = RefCell::new(BinaryHeap::new()); +} + +fn make_future_fn<'a, F>( + future: F, +) -> impl (FnOnce(&'a DurationMetadata) -> MeasureRuntime<'a, F>) { + move |duration_meta| MeasureRuntime { + f: future, + duration: &duration_meta.inner, + } +} + +fn ensure_safe_schedule(f: F) -> F { + f +} + +/// Spawns a future on the executor. +pub fn spawn(future: F) -> Task +where + F: Future + 'static, + T: 'static, +{ + let spawn_thread_id = thread::current().id(); + // Create a task that is scheduled by pushing it into the queue. + let schedule = ensure_safe_schedule(move |runnable| { + if thread::current().id() != spawn_thread_id { + panic!("Task would be run on a different thread than spawned on."); + } + QUEUE.with(move |queue| queue.borrow_mut().push(ByDuration(runnable))); + }); + let future_fn = make_future_fn(future); + let (runnable, task) = unsafe { + Builder::new() + .metadata(DurationMetadata { + inner: Cell::new(Duration::default()), + }) + .spawn_unchecked(future_fn, schedule) + }; + + // Schedule the task by pushing it into the queue. + runnable.schedule(); + + task +} + +pub fn block_on(future: F) +where + F: Future + 'static, +{ + let task = spawn(future); + while !task.is_finished() { + let Some(runnable) = QUEUE.with(|queue| queue.borrow_mut().pop()) else { + thread::yield_now(); + continue; + }; + runnable.0.run(); + } +} + +fn main() { + // Spawn a future and await its result. + block_on(async { + let (sender, receiver) = channel::bounded(1); + let world = spawn(async move { + receiver.recv().await.unwrap(); + println!("world.") + }); + let hello = spawn(async move { + sender.send(()).await.unwrap(); + print!("Hello, ") + }); + future::zip(hello, world).await; + }); +} diff --git a/external/vendor/async-task/src/header.rs b/external/vendor/async-task/src/header.rs new file mode 100644 index 0000000000..ee84035b83 --- /dev/null +++ b/external/vendor/async-task/src/header.rs @@ -0,0 +1,177 @@ +use core::cell::UnsafeCell; +use core::fmt; +use core::task::Waker; + +#[cfg(not(feature = "portable-atomic"))] +use core::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; +#[cfg(feature = "portable-atomic")] +use portable_atomic::AtomicUsize; + +use crate::raw::TaskVTable; +use crate::state::*; +use crate::utils::abort_on_panic; + +/// The header of a task. +/// +/// This header is stored in memory at the beginning of the heap-allocated task. +pub(crate) struct Header { + /// Current state of the task. + /// + /// Contains flags representing the current state and the reference count. + pub(crate) state: AtomicUsize, + + /// The task that is blocked on the `Task` handle. + /// + /// This waker needs to be woken up once the task completes or is closed. + pub(crate) awaiter: UnsafeCell>, + + /// The virtual table. + /// + /// In addition to the actual waker virtual table, it also contains pointers to several other + /// methods necessary for bookkeeping the heap-allocated task. + pub(crate) vtable: &'static TaskVTable, + + /// Metadata associated with the task. + /// + /// This metadata may be provided to the user. + pub(crate) metadata: M, + + /// Whether or not a panic that occurs in the task should be propagated. + #[cfg(feature = "std")] + pub(crate) propagate_panic: bool, +} + +impl Header { + /// Notifies the awaiter blocked on this task. + /// + /// If the awaiter is the same as the current waker, it will not be notified. + #[inline] + pub(crate) fn notify(&self, current: Option<&Waker>) { + if let Some(w) = self.take(current) { + abort_on_panic(|| w.wake()); + } + } + + /// Takes the awaiter blocked on this task. + /// + /// If there is no awaiter or if it is the same as the current waker, returns `None`. + #[inline] + pub(crate) fn take(&self, current: Option<&Waker>) -> Option { + // Set the bit indicating that the task is notifying its awaiter. + let state = self.state.fetch_or(NOTIFYING, Ordering::AcqRel); + + // If the task was not notifying or registering an awaiter... + if state & (NOTIFYING | REGISTERING) == 0 { + // Take the waker out. + let waker = unsafe { (*self.awaiter.get()).take() }; + + // Unset the bit indicating that the task is notifying its awaiter. + self.state + .fetch_and(!NOTIFYING & !AWAITER, Ordering::Release); + + // Finally, notify the waker if it's different from the current waker. + if let Some(w) = waker { + match current { + None => return Some(w), + Some(c) if !w.will_wake(c) => return Some(w), + Some(_) => abort_on_panic(|| drop(w)), + } + } + } + + None + } + + /// Registers a new awaiter blocked on this task. + /// + /// This method is called when `Task` is polled and it has not yet completed. + #[inline] + pub(crate) fn register(&self, waker: &Waker) { + // Load the state and synchronize with it. + let mut state = self.state.fetch_or(0, Ordering::Acquire); + + loop { + // There can't be two concurrent registrations because `Task` can only be polled + // by a unique pinned reference. + debug_assert!(state & REGISTERING == 0); + + // If we're in the notifying state at this moment, just wake and return without + // registering. + if state & NOTIFYING != 0 { + abort_on_panic(|| waker.wake_by_ref()); + return; + } + + // Mark the state to let other threads know we're registering a new awaiter. + match self.state.compare_exchange_weak( + state, + state | REGISTERING, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + state |= REGISTERING; + break; + } + Err(s) => state = s, + } + } + + // Put the waker into the awaiter field. + unsafe { + abort_on_panic(|| (*self.awaiter.get()) = Some(waker.clone())); + } + + // This variable will contain the newly registered waker if a notification comes in before + // we complete registration. + let mut waker = None; + + loop { + // If there was a notification, take the waker out of the awaiter field. + if state & NOTIFYING != 0 { + if let Some(w) = unsafe { (*self.awaiter.get()).take() } { + abort_on_panic(|| waker = Some(w)); + } + } + + // The new state is not being notified nor registered, but there might or might not be + // an awaiter depending on whether there was a concurrent notification. + let new = if waker.is_none() { + (state & !NOTIFYING & !REGISTERING) | AWAITER + } else { + state & !NOTIFYING & !REGISTERING & !AWAITER + }; + + match self + .state + .compare_exchange_weak(state, new, Ordering::AcqRel, Ordering::Acquire) + { + Ok(_) => break, + Err(s) => state = s, + } + } + + // If there was a notification during registration, wake the awaiter now. + if let Some(w) = waker { + abort_on_panic(|| w.wake()); + } + } +} + +impl fmt::Debug for Header { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let state = self.state.load(Ordering::SeqCst); + + f.debug_struct("Header") + .field("scheduled", &(state & SCHEDULED != 0)) + .field("running", &(state & RUNNING != 0)) + .field("completed", &(state & COMPLETED != 0)) + .field("closed", &(state & CLOSED != 0)) + .field("awaiter", &(state & AWAITER != 0)) + .field("task", &(state & TASK != 0)) + .field("ref_count", &(state / REFERENCE)) + .field("metadata", &self.metadata) + .finish() + } +} diff --git a/external/vendor/async-task/src/lib.rs b/external/vendor/async-task/src/lib.rs new file mode 100644 index 0000000000..c8f67028e8 --- /dev/null +++ b/external/vendor/async-task/src/lib.rs @@ -0,0 +1,118 @@ +//! Task abstraction for building executors. +//! +//! To spawn a future onto an executor, we first need to allocate it on the heap and keep some +//! state attached to it. The state indicates whether the future is ready for polling, waiting to +//! be woken up, or completed. Such a stateful future is called a *task*. +//! +//! All executors have a queue that holds scheduled tasks: +//! +//! ``` +//! let (sender, receiver) = flume::unbounded(); +//! # +//! # // A future that will get spawned. +//! # let future = async { 1 + 2 }; +//! # +//! # // A function that schedules the task when it gets woken up. +//! # let schedule = move |runnable| sender.send(runnable).unwrap(); +//! # +//! # // Create a task. +//! # let (runnable, task) = async_task::spawn(future, schedule); +//! ``` +//! +//! A task is created using either [`spawn()`], [`spawn_local()`], or [`spawn_unchecked()`] which +//! return a [`Runnable`] and a [`Task`]: +//! +//! ``` +//! # let (sender, receiver) = flume::unbounded(); +//! # +//! // A future that will be spawned. +//! let future = async { 1 + 2 }; +//! +//! // A function that schedules the task when it gets woken up. +//! let schedule = move |runnable| sender.send(runnable).unwrap(); +//! +//! // Construct a task. +//! let (runnable, task) = async_task::spawn(future, schedule); +//! +//! // Push the task into the queue by invoking its schedule function. +//! runnable.schedule(); +//! ``` +//! +//! The [`Runnable`] is used to poll the task's future, and the [`Task`] is used to await its +//! output. +//! +//! Finally, we need a loop that takes scheduled tasks from the queue and runs them: +//! +//! ```no_run +//! # let (sender, receiver) = flume::unbounded(); +//! # +//! # // A future that will get spawned. +//! # let future = async { 1 + 2 }; +//! # +//! # // A function that schedules the task when it gets woken up. +//! # let schedule = move |runnable| sender.send(runnable).unwrap(); +//! # +//! # // Create a task. +//! # let (runnable, task) = async_task::spawn(future, schedule); +//! # +//! # // Push the task into the queue by invoking its schedule function. +//! # runnable.schedule(); +//! # +//! for runnable in receiver { +//! runnable.run(); +//! } +//! ``` +//! +//! Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] +//! vanishes and only reappears when its [`Waker`][`core::task::Waker`] wakes the task, thus +//! scheduling it to be run again. + +#![no_std] +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc(test(attr(deny(rust_2018_idioms, warnings))))] +#![doc(test(attr(allow(unused_extern_crates, unused_variables))))] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +/// We can't use `?` in const contexts yet, so this macro acts +/// as a workaround. +macro_rules! leap { + ($x: expr) => {{ + match ($x) { + Some(val) => val, + None => return None, + } + }}; +} + +macro_rules! leap_unwrap { + ($x: expr) => {{ + match ($x) { + Some(val) => val, + None => panic!("called `Option::unwrap()` on a `None` value"), + } + }}; +} + +mod header; +mod raw; +mod runnable; +mod state; +mod task; +mod utils; + +pub use crate::runnable::{ + spawn, spawn_unchecked, Builder, Runnable, Schedule, ScheduleInfo, WithInfo, +}; +pub use crate::task::{FallibleTask, Task}; + +#[cfg(feature = "std")] +pub use crate::runnable::spawn_local; diff --git a/external/vendor/async-task/src/raw.rs b/external/vendor/async-task/src/raw.rs new file mode 100644 index 0000000000..7a45dadb6e --- /dev/null +++ b/external/vendor/async-task/src/raw.rs @@ -0,0 +1,756 @@ +use alloc::alloc::Layout as StdLayout; +use core::cell::UnsafeCell; +use core::future::Future; +use core::mem::{self, ManuallyDrop}; +use core::pin::Pin; +use core::ptr::NonNull; +use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; + +#[cfg(not(feature = "portable-atomic"))] +use core::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; +#[cfg(feature = "portable-atomic")] +use portable_atomic::AtomicUsize; + +use crate::header::Header; +use crate::runnable::{Schedule, ScheduleInfo}; +use crate::state::*; +use crate::utils::{abort, abort_on_panic, max, Layout}; +use crate::Runnable; + +#[cfg(feature = "std")] +pub(crate) type Panic = alloc::boxed::Box; + +#[cfg(not(feature = "std"))] +pub(crate) type Panic = core::convert::Infallible; + +/// The vtable for a task. +pub(crate) struct TaskVTable { + /// Schedules the task. + pub(crate) schedule: unsafe fn(*const (), ScheduleInfo), + + /// Drops the future inside the task. + pub(crate) drop_future: unsafe fn(*const ()), + + /// Returns a pointer to the output stored after completion. + pub(crate) get_output: unsafe fn(*const ()) -> *const (), + + /// Drops the task reference (`Runnable` or `Waker`). + pub(crate) drop_ref: unsafe fn(ptr: *const ()), + + /// Destroys the task. + pub(crate) destroy: unsafe fn(*const ()), + + /// Runs the task. + pub(crate) run: unsafe fn(*const ()) -> bool, + + /// Creates a new waker associated with the task. + pub(crate) clone_waker: unsafe fn(ptr: *const ()) -> RawWaker, + + /// The memory layout of the task. This information enables + /// debuggers to decode raw task memory blobs. Do not remove + /// the field, even if it appears to be unused. + #[allow(unused)] + pub(crate) layout_info: &'static TaskLayout, +} + +/// Memory layout of a task. +/// +/// This struct contains the following information: +/// +/// 1. How to allocate and deallocate the task. +/// 2. How to access the fields inside the task. +#[derive(Clone, Copy)] +pub(crate) struct TaskLayout { + /// Memory layout of the whole task. + pub(crate) layout: StdLayout, + + /// Offset into the task at which the schedule function is stored. + pub(crate) offset_s: usize, + + /// Offset into the task at which the future is stored. + pub(crate) offset_f: usize, + + /// Offset into the task at which the output is stored. + pub(crate) offset_r: usize, +} + +/// Raw pointers to the fields inside a task. +pub(crate) struct RawTask { + /// The task header. + pub(crate) header: *const Header, + + /// The schedule function. + pub(crate) schedule: *const S, + + /// The future. + pub(crate) future: *mut F, + + /// The output of the future. + pub(crate) output: *mut Result, +} + +impl Copy for RawTask {} + +impl Clone for RawTask { + fn clone(&self) -> Self { + *self + } +} + +impl RawTask { + const TASK_LAYOUT: TaskLayout = Self::eval_task_layout(); + + /// Computes the memory layout for a task. + #[inline] + const fn eval_task_layout() -> TaskLayout { + // Compute the layouts for `Header`, `S`, `F`, and `T`. + let layout_header = Layout::new::>(); + let layout_s = Layout::new::(); + let layout_f = Layout::new::(); + let layout_r = Layout::new::>(); + + // Compute the layout for `union { F, T }`. + let size_union = max(layout_f.size(), layout_r.size()); + let align_union = max(layout_f.align(), layout_r.align()); + let layout_union = Layout::from_size_align(size_union, align_union); + + // Compute the layout for `Header` followed `S` and `union { F, T }`. + let layout = layout_header; + let (layout, offset_s) = leap_unwrap!(layout.extend(layout_s)); + let (layout, offset_union) = leap_unwrap!(layout.extend(layout_union)); + let offset_f = offset_union; + let offset_r = offset_union; + + TaskLayout { + layout: unsafe { layout.into_std() }, + offset_s, + offset_f, + offset_r, + } + } +} + +impl RawTask +where + F: Future, + S: Schedule, +{ + const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( + Self::clone_waker, + Self::wake, + Self::wake_by_ref, + Self::drop_waker, + ); + + /// Allocates a task with the given `future` and `schedule` function. + /// + /// It is assumed that initially only the `Runnable` and the `Task` exist. + pub(crate) fn allocate<'a, Gen: FnOnce(&'a M) -> F>( + future: Gen, + schedule: S, + builder: crate::Builder, + ) -> NonNull<()> + where + F: 'a, + M: 'a, + { + // Compute the layout of the task for allocation. Abort if the computation fails. + // + // n.b. notgull: task_layout now automatically aborts instead of panicking + let task_layout = Self::task_layout(); + + unsafe { + // Allocate enough space for the entire task. + let ptr = match NonNull::new(alloc::alloc::alloc(task_layout.layout) as *mut ()) { + None => abort(), + Some(p) => p, + }; + + let raw = Self::from_ptr(ptr.as_ptr()); + + let crate::Builder { + metadata, + #[cfg(feature = "std")] + propagate_panic, + } = builder; + + // Write the header as the first field of the task. + (raw.header as *mut Header).write(Header { + state: AtomicUsize::new(SCHEDULED | TASK | REFERENCE), + awaiter: UnsafeCell::new(None), + vtable: &TaskVTable { + schedule: Self::schedule, + drop_future: Self::drop_future, + get_output: Self::get_output, + drop_ref: Self::drop_ref, + destroy: Self::destroy, + run: Self::run, + clone_waker: Self::clone_waker, + layout_info: &Self::TASK_LAYOUT, + }, + metadata, + #[cfg(feature = "std")] + propagate_panic, + }); + + // Write the schedule function as the third field of the task. + (raw.schedule as *mut S).write(schedule); + + // Generate the future, now that the metadata has been pinned in place. + let future = abort_on_panic(|| future(&(*raw.header).metadata)); + + // Write the future as the fourth field of the task. + raw.future.write(future); + + ptr + } + } + + /// Creates a `RawTask` from a raw task pointer. + #[inline] + pub(crate) fn from_ptr(ptr: *const ()) -> Self { + let task_layout = Self::task_layout(); + let p = ptr as *const u8; + + unsafe { + Self { + header: p as *const Header, + schedule: p.add(task_layout.offset_s) as *const S, + future: p.add(task_layout.offset_f) as *mut F, + output: p.add(task_layout.offset_r) as *mut Result, + } + } + } + + /// Returns the layout of the task. + #[inline] + fn task_layout() -> TaskLayout { + Self::TASK_LAYOUT + } + /// Wakes a waker. + unsafe fn wake(ptr: *const ()) { + // This is just an optimization. If the schedule function has captured variables, then + // we'll do less reference counting if we wake the waker by reference and then drop it. + if mem::size_of::() > 0 { + Self::wake_by_ref(ptr); + Self::drop_waker(ptr); + return; + } + + let raw = Self::from_ptr(ptr); + + let mut state = (*raw.header).state.load(Ordering::Acquire); + + loop { + // If the task is completed or closed, it can't be woken up. + if state & (COMPLETED | CLOSED) != 0 { + // Drop the waker. + Self::drop_waker(ptr); + break; + } + + // If the task is already scheduled, we just need to synchronize with the thread that + // will run the task by "publishing" our current view of the memory. + if state & SCHEDULED != 0 { + // Update the state without actually modifying it. + match (*raw.header).state.compare_exchange_weak( + state, + state, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Drop the waker. + Self::drop_waker(ptr); + break; + } + Err(s) => state = s, + } + } else { + // Mark the task as scheduled. + match (*raw.header).state.compare_exchange_weak( + state, + state | SCHEDULED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the task is not yet scheduled and isn't currently running, now is the + // time to schedule it. + if state & RUNNING == 0 { + // Schedule the task. + Self::schedule(ptr, ScheduleInfo::new(false)); + } else { + // Drop the waker. + Self::drop_waker(ptr); + } + + break; + } + Err(s) => state = s, + } + } + } + } + + /// Wakes a waker by reference. + unsafe fn wake_by_ref(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + let mut state = (*raw.header).state.load(Ordering::Acquire); + + loop { + // If the task is completed or closed, it can't be woken up. + if state & (COMPLETED | CLOSED) != 0 { + break; + } + + // If the task is already scheduled, we just need to synchronize with the thread that + // will run the task by "publishing" our current view of the memory. + if state & SCHEDULED != 0 { + // Update the state without actually modifying it. + match (*raw.header).state.compare_exchange_weak( + state, + state, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => break, + Err(s) => state = s, + } + } else { + // If the task is not running, we can schedule right away. + let new = if state & RUNNING == 0 { + (state | SCHEDULED) + REFERENCE + } else { + state | SCHEDULED + }; + + // Mark the task as scheduled. + match (*raw.header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the task is not running, now is the time to schedule. + if state & RUNNING == 0 { + // If the reference count overflowed, abort. + if state > isize::MAX as usize { + abort(); + } + + // Schedule the task. There is no need to call `Self::schedule(ptr)` + // because the schedule function cannot be destroyed while the waker is + // still alive. + let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); + (*raw.schedule).schedule(task, ScheduleInfo::new(false)); + } + + break; + } + Err(s) => state = s, + } + } + } + } + + /// Clones a waker. + unsafe fn clone_waker(ptr: *const ()) -> RawWaker { + let raw = Self::from_ptr(ptr); + + // Increment the reference count. With any kind of reference-counted data structure, + // relaxed ordering is appropriate when incrementing the counter. + let state = (*raw.header).state.fetch_add(REFERENCE, Ordering::Relaxed); + + // If the reference count overflowed, abort. + if state > isize::MAX as usize { + abort(); + } + + RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE) + } + + /// Drops a waker. + /// + /// This function will decrement the reference count. If it drops down to zero, the associated + /// `Task` has been dropped too, and the task has not been completed, then it will get + /// scheduled one more time so that its future gets dropped by the executor. + #[inline] + unsafe fn drop_waker(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + // Decrement the reference count. + let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; + + // If this was the last reference to the task and the `Task` has been dropped too, + // then we need to decide how to destroy the task. + if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { + if new & (COMPLETED | CLOSED) == 0 { + // If the task was not completed nor closed, close it and schedule one more time so + // that its future gets dropped by the executor. + (*raw.header) + .state + .store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release); + Self::schedule(ptr, ScheduleInfo::new(false)); + } else { + // Otherwise, destroy the task right away. + Self::destroy(ptr); + } + } + } + + /// Drops a task reference (`Runnable` or `Waker`). + /// + /// This function will decrement the reference count. If it drops down to zero and the + /// associated `Task` handle has been dropped too, then the task gets destroyed. + #[inline] + unsafe fn drop_ref(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + // Decrement the reference count. + let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; + + // If this was the last reference to the task and the `Task` has been dropped too, + // then destroy the task. + if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { + Self::destroy(ptr); + } + } + + /// Schedules a task for running. + /// + /// This function doesn't modify the state of the task. It only passes the task reference to + /// its schedule function. + unsafe fn schedule(ptr: *const (), info: ScheduleInfo) { + let raw = Self::from_ptr(ptr); + + // If the schedule function has captured variables, create a temporary waker that prevents + // the task from getting deallocated while the function is being invoked. + let _waker; + if mem::size_of::() > 0 { + _waker = Waker::from_raw(Self::clone_waker(ptr)); + } + + let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); + (*raw.schedule).schedule(task, info); + } + + /// Drops the future inside a task. + #[inline] + unsafe fn drop_future(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + + // We need a safeguard against panics because the destructor can panic. + abort_on_panic(|| { + raw.future.drop_in_place(); + }) + } + + /// Returns a pointer to the output inside a task. + unsafe fn get_output(ptr: *const ()) -> *const () { + let raw = Self::from_ptr(ptr); + raw.output as *const () + } + + /// Cleans up task's resources and deallocates it. + /// + /// The schedule function will be dropped, and the task will then get deallocated. + /// The task must be closed before this function is called. + #[inline] + unsafe fn destroy(ptr: *const ()) { + let raw = Self::from_ptr(ptr); + let task_layout = Self::task_layout(); + + // We need a safeguard against panics because destructors can panic. + abort_on_panic(|| { + // Drop the header along with the metadata. + (raw.header as *mut Header).drop_in_place(); + + // Drop the schedule function. + (raw.schedule as *mut S).drop_in_place(); + }); + + // Finally, deallocate the memory reserved by the task. + alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout); + } + + /// Runs a task. + /// + /// If polling its future panics, the task will be closed and the panic will be propagated into + /// the caller. + unsafe fn run(ptr: *const ()) -> bool { + let raw = Self::from_ptr(ptr); + + // Create a context from the raw task pointer and the vtable inside the its header. + let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE))); + let cx = &mut Context::from_waker(&waker); + + let mut state = (*raw.header).state.load(Ordering::Acquire); + + // Update the task's state before polling its future. + loop { + // If the task has already been closed, drop the task reference and return. + if state & CLOSED != 0 { + // Drop the future. + Self::drop_future(ptr); + + // Mark the task as unscheduled. + let state = (*raw.header).state.fetch_and(!SCHEDULED, Ordering::AcqRel); + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + Self::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + return false; + } + + // Mark the task as unscheduled and running. + match (*raw.header).state.compare_exchange_weak( + state, + (state & !SCHEDULED) | RUNNING, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Update the state because we're continuing with polling the future. + state = (state & !SCHEDULED) | RUNNING; + break; + } + Err(s) => state = s, + } + } + + // Poll the inner future, but surround it with a guard that closes the task in case polling + // panics. + // If available, we should also try to catch the panic so that it is propagated correctly. + let guard = Guard(raw); + + // Panic propagation is not available for no_std. + #[cfg(not(feature = "std"))] + let poll = ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok); + + #[cfg(feature = "std")] + let poll = { + // Check if we should propagate panics. + if (*raw.header).propagate_panic { + // Use catch_unwind to catch the panic. + match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + ::poll(Pin::new_unchecked(&mut *raw.future), cx) + })) { + Ok(Poll::Ready(v)) => Poll::Ready(Ok(v)), + Ok(Poll::Pending) => Poll::Pending, + Err(e) => Poll::Ready(Err(e)), + } + } else { + ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok) + } + }; + + mem::forget(guard); + + match poll { + Poll::Ready(out) => { + // Replace the future with its output. + Self::drop_future(ptr); + raw.output.write(out); + + // The task is now completed. + loop { + // If the `Task` is dropped, we'll need to close it and drop the output. + let new = if state & TASK == 0 { + (state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED + } else { + (state & !RUNNING & !SCHEDULED) | COMPLETED + }; + + // Mark the task as not running and completed. + match (*raw.header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the `Task` is dropped or if the task was closed while running, + // now it's time to drop the output. + if state & TASK == 0 || state & CLOSED != 0 { + // Drop the output. + abort_on_panic(|| raw.output.drop_in_place()); + } + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + Self::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + break; + } + Err(s) => state = s, + } + } + } + Poll::Pending => { + let mut future_dropped = false; + + // The task is still not completed. + loop { + // If the task was closed while running, we'll need to unschedule in case it + // was woken up and then destroy it. + let new = if state & CLOSED != 0 { + state & !RUNNING & !SCHEDULED + } else { + state & !RUNNING + }; + + if state & CLOSED != 0 && !future_dropped { + // The thread that closed the task didn't drop the future because it was + // running so now it's our responsibility to do so. + Self::drop_future(ptr); + future_dropped = true; + } + + // Mark the task as not running. + match (*raw.header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(state) => { + // If the task was closed while running, we need to notify the awaiter. + // If the task was woken up while running, we need to schedule it. + // Otherwise, we just drop the task reference. + if state & CLOSED != 0 { + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + Self::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + } else if state & SCHEDULED != 0 { + // The thread that woke the task up didn't reschedule it because + // it was running so now it's our responsibility to do so. + Self::schedule(ptr, ScheduleInfo::new(true)); + return true; + } else { + // Drop the task reference. + Self::drop_ref(ptr); + } + break; + } + Err(s) => state = s, + } + } + } + } + + return false; + + /// A guard that closes the task if polling its future panics. + struct Guard(RawTask) + where + F: Future, + S: Schedule; + + impl Drop for Guard + where + F: Future, + S: Schedule, + { + fn drop(&mut self) { + let raw = self.0; + let ptr = raw.header as *const (); + + unsafe { + let mut state = (*raw.header).state.load(Ordering::Acquire); + + loop { + // If the task was closed while running, then unschedule it, drop its + // future, and drop the task reference. + if state & CLOSED != 0 { + // The thread that closed the task didn't drop the future because it + // was running so now it's our responsibility to do so. + RawTask::::drop_future(ptr); + + // Mark the task as not running and not scheduled. + (*raw.header) + .state + .fetch_and(!RUNNING & !SCHEDULED, Ordering::AcqRel); + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + RawTask::::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + break; + } + + // Mark the task as not running, not scheduled, and closed. + match (*raw.header).state.compare_exchange_weak( + state, + (state & !RUNNING & !SCHEDULED) | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(state) => { + // Drop the future because the task is now closed. + RawTask::::drop_future(ptr); + + // Take the awaiter out. + let mut awaiter = None; + if state & AWAITER != 0 { + awaiter = (*raw.header).take(None); + } + + // Drop the task reference. + RawTask::::drop_ref(ptr); + + // Notify the awaiter that the future has been dropped. + if let Some(w) = awaiter { + abort_on_panic(|| w.wake()); + } + break; + } + Err(s) => state = s, + } + } + } + } + } + } +} diff --git a/external/vendor/async-task/src/runnable.rs b/external/vendor/async-task/src/runnable.rs new file mode 100644 index 0000000000..25d44dced7 --- /dev/null +++ b/external/vendor/async-task/src/runnable.rs @@ -0,0 +1,945 @@ +use core::fmt; +use core::future::Future; +use core::marker::PhantomData; +use core::mem; +use core::ptr::NonNull; +use core::sync::atomic::Ordering; +use core::task::Waker; + +use alloc::boxed::Box; + +use crate::header::Header; +use crate::raw::RawTask; +use crate::state::*; +use crate::Task; + +mod sealed { + use super::*; + pub trait Sealed {} + + impl Sealed for F where F: Fn(Runnable) {} + + impl Sealed for WithInfo where F: Fn(Runnable, ScheduleInfo) {} +} + +/// A builder that creates a new task. +#[derive(Debug)] +pub struct Builder { + /// The metadata associated with the task. + pub(crate) metadata: M, + + /// Whether or not a panic that occurs in the task should be propagated. + #[cfg(feature = "std")] + pub(crate) propagate_panic: bool, +} + +impl Default for Builder { + fn default() -> Self { + Builder::new().metadata(M::default()) + } +} + +/// Extra scheduling information that can be passed to the scheduling function. +/// +/// The data source of this struct is directly from the actual implementation +/// of the crate itself, different from [`Runnable`]'s metadata, which is +/// managed by the caller. +/// +/// # Examples +/// +/// ``` +/// use async_task::{Runnable, ScheduleInfo, WithInfo}; +/// use std::sync::{Arc, Mutex}; +/// +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // If the task gets woken up while running, it will be sent into this channel. +/// let (s, r) = flume::unbounded(); +/// // Otherwise, it will be placed into this slot. +/// let lifo_slot = Arc::new(Mutex::new(None)); +/// let schedule = move |runnable: Runnable, info: ScheduleInfo| { +/// if info.woken_while_running { +/// s.send(runnable).unwrap() +/// } else { +/// let last = lifo_slot.lock().unwrap().replace(runnable); +/// if let Some(last) = last { +/// s.send(last).unwrap() +/// } +/// } +/// }; +/// +/// // Create the actual scheduler to be spawned with some future. +/// let scheduler = WithInfo(schedule); +/// // Create a task with the future and the scheduler. +/// let (runnable, task) = async_task::spawn(future, scheduler); +/// ``` +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub struct ScheduleInfo { + /// Indicates whether the task gets woken up while running. + /// + /// It is set to true usually because the task has yielded itself to the + /// scheduler. + pub woken_while_running: bool, +} + +impl ScheduleInfo { + pub(crate) fn new(woken_while_running: bool) -> Self { + ScheduleInfo { + woken_while_running, + } + } +} + +/// The trait for scheduling functions. +pub trait Schedule: sealed::Sealed { + /// The actual scheduling procedure. + fn schedule(&self, runnable: Runnable, info: ScheduleInfo); +} + +impl Schedule for F +where + F: Fn(Runnable), +{ + fn schedule(&self, runnable: Runnable, _: ScheduleInfo) { + self(runnable) + } +} + +/// Pass a scheduling function with more scheduling information - a.k.a. +/// [`ScheduleInfo`]. +/// +/// Sometimes, it's useful to pass the runnable's state directly to the +/// scheduling function, such as whether it's woken up while running. The +/// scheduler can thus use the information to determine its scheduling +/// strategy. +/// +/// The data source of [`ScheduleInfo`] is directly from the actual +/// implementation of the crate itself, different from [`Runnable`]'s metadata, +/// which is managed by the caller. +/// +/// # Examples +/// +/// ``` +/// use async_task::{ScheduleInfo, WithInfo}; +/// use std::sync::{Arc, Mutex}; +/// +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // If the task gets woken up while running, it will be sent into this channel. +/// let (s, r) = flume::unbounded(); +/// // Otherwise, it will be placed into this slot. +/// let lifo_slot = Arc::new(Mutex::new(None)); +/// let schedule = move |runnable, info: ScheduleInfo| { +/// if info.woken_while_running { +/// s.send(runnable).unwrap() +/// } else { +/// let last = lifo_slot.lock().unwrap().replace(runnable); +/// if let Some(last) = last { +/// s.send(last).unwrap() +/// } +/// } +/// }; +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = async_task::spawn(future, WithInfo(schedule)); +/// ``` +#[derive(Debug)] +pub struct WithInfo(pub F); + +impl From for WithInfo { + fn from(value: F) -> Self { + WithInfo(value) + } +} + +impl Schedule for WithInfo +where + F: Fn(Runnable, ScheduleInfo), +{ + fn schedule(&self, runnable: Runnable, info: ScheduleInfo) { + (self.0)(runnable, info) + } +} + +impl Builder<()> { + /// Creates a new task builder. + /// + /// By default, this task builder has no metadata. Use the [`metadata`] method to + /// set the metadata. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// + /// let (runnable, task) = Builder::new().spawn(|()| async {}, |_| {}); + /// ``` + pub fn new() -> Builder<()> { + Builder { + metadata: (), + #[cfg(feature = "std")] + propagate_panic: false, + } + } + + /// Adds metadata to the task. + /// + /// In certain cases, it may be useful to associate some metadata with a task. For instance, + /// you may want to associate a name with a task, or a priority for a priority queue. This + /// method allows the user to attach arbitrary metadata to a task that is available through + /// the [`Runnable`] or the [`Task`]. + /// + /// # Examples + /// + /// This example creates an executor that associates a "priority" number with each task, and + /// then runs the tasks in order of priority. + /// + /// ``` + /// use async_task::{Builder, Runnable}; + /// use once_cell::sync::Lazy; + /// use std::cmp; + /// use std::collections::BinaryHeap; + /// use std::sync::Mutex; + /// + /// # smol::future::block_on(async { + /// /// A wrapper around a `Runnable` that implements `Ord` so that it can be used in a + /// /// priority queue. + /// struct TaskWrapper(Runnable); + /// + /// impl PartialEq for TaskWrapper { + /// fn eq(&self, other: &Self) -> bool { + /// self.0.metadata() == other.0.metadata() + /// } + /// } + /// + /// impl Eq for TaskWrapper {} + /// + /// impl PartialOrd for TaskWrapper { + /// fn partial_cmp(&self, other: &Self) -> Option { + /// Some(self.cmp(other)) + /// } + /// } + /// + /// impl Ord for TaskWrapper { + /// fn cmp(&self, other: &Self) -> cmp::Ordering { + /// self.0.metadata().cmp(other.0.metadata()) + /// } + /// } + /// + /// static EXECUTOR: Lazy>> = Lazy::new(|| { + /// Mutex::new(BinaryHeap::new()) + /// }); + /// + /// let schedule = |runnable| { + /// EXECUTOR.lock().unwrap().push(TaskWrapper(runnable)); + /// }; + /// + /// // Spawn a few tasks with different priorities. + /// let spawn_task = move |priority| { + /// let (runnable, task) = Builder::new().metadata(priority).spawn( + /// move |_| async move { priority }, + /// schedule, + /// ); + /// runnable.schedule(); + /// task + /// }; + /// + /// let t1 = spawn_task(1); + /// let t2 = spawn_task(2); + /// let t3 = spawn_task(3); + /// + /// // Run the tasks in order of priority. + /// let mut metadata_seen = vec![]; + /// while let Some(TaskWrapper(runnable)) = EXECUTOR.lock().unwrap().pop() { + /// metadata_seen.push(*runnable.metadata()); + /// runnable.run(); + /// } + /// + /// assert_eq!(metadata_seen, vec![3, 2, 1]); + /// assert_eq!(t1.await, 1); + /// assert_eq!(t2.await, 2); + /// assert_eq!(t3.await, 3); + /// # }); + /// ``` + pub fn metadata(self, metadata: M) -> Builder { + Builder { + metadata, + #[cfg(feature = "std")] + propagate_panic: self.propagate_panic, + } + } +} + +impl Builder { + /// Propagates panics that occur in the task. + /// + /// When this is `true`, panics that occur in the task will be propagated to the caller of + /// the [`Task`]. When this is false, no special action is taken when a panic occurs in the + /// task, meaning that the caller of [`Runnable::run`] will observe a panic. + /// + /// This is only available when the `std` feature is enabled. By default, this is `false`. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// use futures_lite::future::poll_fn; + /// use std::future::Future; + /// use std::panic; + /// use std::pin::Pin; + /// use std::task::{Context, Poll}; + /// + /// fn did_panic(f: F) -> bool { + /// panic::catch_unwind(panic::AssertUnwindSafe(f)).is_err() + /// } + /// + /// # smol::future::block_on(async { + /// let (runnable1, mut task1) = Builder::new() + /// .propagate_panic(true) + /// .spawn(|()| async move { panic!() }, |_| {}); + /// + /// let (runnable2, mut task2) = Builder::new() + /// .propagate_panic(false) + /// .spawn(|()| async move { panic!() }, |_| {}); + /// + /// assert!(!did_panic(|| { runnable1.run(); })); + /// assert!(did_panic(|| { runnable2.run(); })); + /// + /// let waker = poll_fn(|cx| Poll::Ready(cx.waker().clone())).await; + /// let mut cx = Context::from_waker(&waker); + /// assert!(did_panic(|| { let _ = Pin::new(&mut task1).poll(&mut cx); })); + /// assert!(did_panic(|| { let _ = Pin::new(&mut task2).poll(&mut cx); })); + /// # }); + /// ``` + #[cfg(feature = "std")] + pub fn propagate_panic(self, propagate_panic: bool) -> Builder { + Builder { + metadata: self.metadata, + propagate_panic, + } + } + + /// Creates a new task. + /// + /// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its + /// output. + /// + /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] + /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run + /// again. + /// + /// When the task is woken, its [`Runnable`] is passed to the `schedule` function. + /// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it + /// should push it into a task queue so that it can be processed later. + /// + /// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider + /// using [`spawn_local()`] or [`spawn_unchecked()`] instead. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// + /// // The future inside the task. + /// let future = async { + /// println!("Hello, world!"); + /// }; + /// + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = Builder::new().spawn(|()| future, schedule); + /// ``` + pub fn spawn(self, future: F, schedule: S) -> (Runnable, Task) + where + F: FnOnce(&M) -> Fut, + Fut: Future + Send + 'static, + Fut::Output: Send + 'static, + S: Schedule + Send + Sync + 'static, + { + unsafe { self.spawn_unchecked(future, schedule) } + } + + /// Creates a new thread-local task. + /// + /// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the + /// [`Runnable`] is used or dropped on another thread, a panic will occur. + /// + /// This function is only available when the `std` feature for this crate is enabled. + /// + /// # Examples + /// + /// ``` + /// use async_task::{Builder, Runnable}; + /// use flume::{Receiver, Sender}; + /// use std::rc::Rc; + /// + /// thread_local! { + /// // A queue that holds scheduled tasks. + /// static QUEUE: (Sender, Receiver) = flume::unbounded(); + /// } + /// + /// // Make a non-Send future. + /// let msg: Rc = "Hello, world!".into(); + /// let future = async move { + /// println!("{}", msg); + /// }; + /// + /// // A function that schedules the task when it gets woken up. + /// let s = QUEUE.with(|(s, _)| s.clone()); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = Builder::new().spawn_local(move |()| future, schedule); + /// ``` + #[cfg(feature = "std")] + pub fn spawn_local( + self, + future: F, + schedule: S, + ) -> (Runnable, Task) + where + F: FnOnce(&M) -> Fut, + Fut: Future + 'static, + Fut::Output: 'static, + S: Schedule + Send + Sync + 'static, + { + use std::mem::ManuallyDrop; + use std::pin::Pin; + use std::task::{Context, Poll}; + use std::thread::{self, ThreadId}; + + #[inline] + fn thread_id() -> ThreadId { + std::thread_local! { + static ID: ThreadId = thread::current().id(); + } + ID.try_with(|id| *id) + .unwrap_or_else(|_| thread::current().id()) + } + + struct Checked { + id: ThreadId, + inner: ManuallyDrop, + } + + impl Drop for Checked { + fn drop(&mut self) { + assert!( + self.id == thread_id(), + "local task dropped by a thread that didn't spawn it" + ); + unsafe { + ManuallyDrop::drop(&mut self.inner); + } + } + } + + impl Future for Checked { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + assert!( + self.id == thread_id(), + "local task polled by a thread that didn't spawn it" + ); + unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) } + } + } + + // Wrap the future into one that checks which thread it's on. + let future = move |meta| { + let future = future(meta); + + Checked { + id: thread_id(), + inner: ManuallyDrop::new(future), + } + }; + + unsafe { self.spawn_unchecked(future, schedule) } + } + + /// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. + /// + /// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and + /// `'static` on `future` and `schedule`. + /// + /// # Safety + /// + /// - If `Fut` is not [`Send`], its [`Runnable`] must be used and dropped on the original + /// thread. + /// - If `Fut` is not `'static`, borrowed non-metadata variables must outlive its [`Runnable`]. + /// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] + /// must be used and dropped on the original thread. + /// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the + /// [`Runnable`]'s [`Waker`]. + /// + /// # Examples + /// + /// ``` + /// use async_task::Builder; + /// + /// // The future inside the task. + /// let future = async { + /// println!("Hello, world!"); + /// }; + /// + /// // If the task gets woken up, it will be sent into this channel. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = unsafe { Builder::new().spawn_unchecked(move |()| future, schedule) }; + /// ``` + pub unsafe fn spawn_unchecked<'a, F, Fut, S>( + self, + future: F, + schedule: S, + ) -> (Runnable, Task) + where + F: FnOnce(&'a M) -> Fut, + Fut: Future + 'a, + S: Schedule, + M: 'a, + { + // Allocate large futures on the heap. + let ptr = if mem::size_of::() >= 2048 { + let future = |meta| { + let future = future(meta); + Box::pin(future) + }; + + RawTask::<_, Fut::Output, S, M>::allocate(future, schedule, self) + } else { + RawTask::::allocate(future, schedule, self) + }; + + let runnable = Runnable::from_raw(ptr); + let task = Task { + ptr, + _marker: PhantomData, + }; + (runnable, task) + } +} + +/// Creates a new task. +/// +/// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its +/// output. +/// +/// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] +/// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run +/// again. +/// +/// When the task is woken, its [`Runnable`] is passed to the `schedule` function. +/// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it +/// should push it into a task queue so that it can be processed later. +/// +/// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider +/// using [`spawn_local()`] or [`spawn_unchecked()`] instead. +/// +/// # Examples +/// +/// ``` +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // A function that schedules the task when it gets woken up. +/// let (s, r) = flume::unbounded(); +/// let schedule = move |runnable| s.send(runnable).unwrap(); +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = async_task::spawn(future, schedule); +/// ``` +pub fn spawn(future: F, schedule: S) -> (Runnable, Task) +where + F: Future + Send + 'static, + F::Output: Send + 'static, + S: Schedule + Send + Sync + 'static, +{ + unsafe { spawn_unchecked(future, schedule) } +} + +/// Creates a new thread-local task. +/// +/// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the +/// [`Runnable`] is used or dropped on another thread, a panic will occur. +/// +/// This function is only available when the `std` feature for this crate is enabled. +/// +/// # Examples +/// +/// ``` +/// use async_task::Runnable; +/// use flume::{Receiver, Sender}; +/// use std::rc::Rc; +/// +/// thread_local! { +/// // A queue that holds scheduled tasks. +/// static QUEUE: (Sender, Receiver) = flume::unbounded(); +/// } +/// +/// // Make a non-Send future. +/// let msg: Rc = "Hello, world!".into(); +/// let future = async move { +/// println!("{}", msg); +/// }; +/// +/// // A function that schedules the task when it gets woken up. +/// let s = QUEUE.with(|(s, _)| s.clone()); +/// let schedule = move |runnable| s.send(runnable).unwrap(); +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = async_task::spawn_local(future, schedule); +/// ``` +#[cfg(feature = "std")] +pub fn spawn_local(future: F, schedule: S) -> (Runnable, Task) +where + F: Future + 'static, + F::Output: 'static, + S: Schedule + Send + Sync + 'static, +{ + Builder::new().spawn_local(move |()| future, schedule) +} + +/// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. +/// +/// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and +/// `'static` on `future` and `schedule`. +/// +/// # Safety +/// +/// - If `future` is not [`Send`], its [`Runnable`] must be used and dropped on the original +/// thread. +/// - If `future` is not `'static`, borrowed variables must outlive its [`Runnable`]. +/// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] +/// must be used and dropped on the original thread. +/// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the +/// [`Runnable`]'s [`Waker`]. +/// +/// # Examples +/// +/// ``` +/// // The future inside the task. +/// let future = async { +/// println!("Hello, world!"); +/// }; +/// +/// // If the task gets woken up, it will be sent into this channel. +/// let (s, r) = flume::unbounded(); +/// let schedule = move |runnable| s.send(runnable).unwrap(); +/// +/// // Create a task with the future and the schedule function. +/// let (runnable, task) = unsafe { async_task::spawn_unchecked(future, schedule) }; +/// ``` +pub unsafe fn spawn_unchecked(future: F, schedule: S) -> (Runnable, Task) +where + F: Future, + S: Schedule, +{ + Builder::new().spawn_unchecked(move |()| future, schedule) +} + +/// A handle to a runnable task. +/// +/// Every spawned task has a single [`Runnable`] handle, which only exists when the task is +/// scheduled for running. +/// +/// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] +/// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run +/// again. +/// +/// Dropping a [`Runnable`] cancels the task, which means its future won't be polled again, and +/// awaiting the [`Task`] after that will result in a panic. +/// +/// # Examples +/// +/// ``` +/// use async_task::Runnable; +/// use once_cell::sync::Lazy; +/// use std::{panic, thread}; +/// +/// // A simple executor. +/// static QUEUE: Lazy> = Lazy::new(|| { +/// let (sender, receiver) = flume::unbounded::(); +/// thread::spawn(|| { +/// for runnable in receiver { +/// let _ignore_panic = panic::catch_unwind(|| runnable.run()); +/// } +/// }); +/// sender +/// }); +/// +/// // Create a task with a simple future. +/// let schedule = |runnable| QUEUE.send(runnable).unwrap(); +/// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); +/// +/// // Schedule the task and await its output. +/// runnable.schedule(); +/// assert_eq!(smol::future::block_on(task), 3); +/// ``` +pub struct Runnable { + /// A pointer to the heap-allocated task. + pub(crate) ptr: NonNull<()>, + + /// A marker capturing generic type `M`. + pub(crate) _marker: PhantomData, +} + +unsafe impl Send for Runnable {} +unsafe impl Sync for Runnable {} + +#[cfg(feature = "std")] +impl std::panic::UnwindSafe for Runnable {} +#[cfg(feature = "std")] +impl std::panic::RefUnwindSafe for Runnable {} + +impl Runnable { + /// Get the metadata associated with this task. + /// + /// Tasks can be created with a metadata object associated with them; by default, this + /// is a `()` value. See the [`Builder::metadata()`] method for more information. + pub fn metadata(&self) -> &M { + &self.header().metadata + } + + /// Schedules the task. + /// + /// This is a convenience method that passes the [`Runnable`] to the schedule function. + /// + /// # Examples + /// + /// ``` + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with a simple future and the schedule function. + /// let (runnable, task) = async_task::spawn(async {}, schedule); + /// + /// // Schedule the task. + /// assert_eq!(r.len(), 0); + /// runnable.schedule(); + /// assert_eq!(r.len(), 1); + /// ``` + pub fn schedule(self) { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + mem::forget(self); + + unsafe { + ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); + } + } + + /// Runs the task by polling its future. + /// + /// Returns `true` if the task was woken while running, in which case the [`Runnable`] gets + /// rescheduled at the end of this method invocation. Otherwise, returns `false` and the + /// [`Runnable`] vanishes until the task is woken. + /// The return value is just a hint: `true` usually indicates that the task has yielded, i.e. + /// it woke itself and then gave the control back to the executor. + /// + /// If the [`Task`] handle was dropped or if [`cancel()`][`Task::cancel()`] was called, then + /// this method simply destroys the task. + /// + /// If the polled future panics, this method propagates the panic, and awaiting the [`Task`] + /// after that will also result in a panic. + /// + /// # Examples + /// + /// ``` + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with a simple future and the schedule function. + /// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); + /// + /// // Run the task and check its output. + /// runnable.run(); + /// assert_eq!(smol::future::block_on(task), 3); + /// ``` + pub fn run(self) -> bool { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + mem::forget(self); + + unsafe { ((*header).vtable.run)(ptr) } + } + + /// Returns a waker associated with this task. + /// + /// # Examples + /// + /// ``` + /// use smol::future; + /// + /// // A function that schedules the task when it gets woken up. + /// let (s, r) = flume::unbounded(); + /// let schedule = move |runnable| s.send(runnable).unwrap(); + /// + /// // Create a task with a simple future and the schedule function. + /// let (runnable, task) = async_task::spawn(future::pending::<()>(), schedule); + /// + /// // Take a waker and run the task. + /// let waker = runnable.waker(); + /// runnable.run(); + /// + /// // Reschedule the task by waking it. + /// assert_eq!(r.len(), 0); + /// waker.wake(); + /// assert_eq!(r.len(), 1); + /// ``` + pub fn waker(&self) -> Waker { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let raw_waker = ((*header).vtable.clone_waker)(ptr); + Waker::from_raw(raw_waker) + } + } + + fn header(&self) -> &Header { + unsafe { &*(self.ptr.as_ptr() as *const Header) } + } + + /// Converts this task into a raw pointer. + /// + /// To avoid a memory leak the pointer must be converted back to a Runnable using [`Runnable::from_raw`][from_raw]. + /// + /// `into_raw` does not change the state of the [`Task`], but there is no guarantee that it will be in the same state after calling [`Runnable::from_raw`][from_raw], + /// as the corresponding [`Task`] might have been dropped or cancelled. + /// + /// # Examples + /// + /// ```rust + /// use async_task::{Runnable, spawn}; + + /// let (runnable, task) = spawn(async {}, |_| {}); + /// let runnable_pointer = runnable.into_raw(); + /// + /// unsafe { + /// // Convert back to an `Runnable` to prevent leak. + /// let runnable = Runnable::<()>::from_raw(runnable_pointer); + /// runnable.run(); + /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. + /// } + /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! + /// ``` + /// [from_raw]: #method.from_raw + pub fn into_raw(self) -> NonNull<()> { + let ptr = self.ptr; + mem::forget(self); + ptr + } + + /// Converts a raw pointer into a Runnable. + /// + /// # Safety + /// + /// This method should only be used with raw pointers returned from [`Runnable::into_raw`][into_raw]. + /// It is not safe to use the provided pointer once it is passed to `from_raw`. + /// Crucially, it is unsafe to call `from_raw` multiple times with the same pointer - even if the resulting [`Runnable`] is not used - + /// as internally `async-task` uses reference counting. + /// + /// It is however safe to call [`Runnable::into_raw`][into_raw] on a [`Runnable`] created with `from_raw` or + /// after the [`Task`] associated with a given Runnable has been dropped or cancelled. + /// + /// The state of the [`Runnable`] created with `from_raw` is not specified. + /// + /// # Examples + /// + /// ```rust + /// use async_task::{Runnable, spawn}; + + /// let (runnable, task) = spawn(async {}, |_| {}); + /// let runnable_pointer = runnable.into_raw(); + /// + /// drop(task); + /// unsafe { + /// // Convert back to an `Runnable` to prevent leak. + /// let runnable = Runnable::<()>::from_raw(runnable_pointer); + /// let did_poll = runnable.run(); + /// assert!(!did_poll); + /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. + /// } + /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! + /// ``` + + /// [into_raw]: #method.into_raw + pub unsafe fn from_raw(ptr: NonNull<()>) -> Self { + Self { + ptr, + _marker: Default::default(), + } + } +} + +impl Drop for Runnable { + fn drop(&mut self) { + let ptr = self.ptr.as_ptr(); + let header = self.header(); + + unsafe { + let mut state = header.state.load(Ordering::Acquire); + + loop { + // If the task has been completed or closed, it can't be canceled. + if state & (COMPLETED | CLOSED) != 0 { + break; + } + + // Mark the task as closed. + match header.state.compare_exchange_weak( + state, + state | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => break, + Err(s) => state = s, + } + } + + // Drop the future. + (header.vtable.drop_future)(ptr); + + // Mark the task as unscheduled. + let state = header.state.fetch_and(!SCHEDULED, Ordering::AcqRel); + + // Notify the awaiter that the future has been dropped. + if state & AWAITER != 0 { + (*header).notify(None); + } + + // Drop the task reference. + (header.vtable.drop_ref)(ptr); + } + } +} + +impl fmt::Debug for Runnable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + f.debug_struct("Runnable") + .field("header", unsafe { &(*header) }) + .finish() + } +} diff --git a/external/vendor/async-task/src/state.rs b/external/vendor/async-task/src/state.rs new file mode 100644 index 0000000000..2fc6cf3711 --- /dev/null +++ b/external/vendor/async-task/src/state.rs @@ -0,0 +1,69 @@ +/// Set if the task is scheduled for running. +/// +/// A task is considered to be scheduled whenever its `Runnable` exists. +/// +/// This flag can't be set when the task is completed. However, it can be set while the task is +/// running, in which case it will be rescheduled as soon as polling finishes. +pub(crate) const SCHEDULED: usize = 1 << 0; + +/// Set if the task is running. +/// +/// A task is in running state while its future is being polled. +/// +/// This flag can't be set when the task is completed. However, it can be in scheduled state while +/// it is running, in which case it will be rescheduled as soon as polling finishes. +pub(crate) const RUNNING: usize = 1 << 1; + +/// Set if the task has been completed. +/// +/// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored +/// inside the task until it becomes closed. In fact, `Task` picks up the output by marking +/// the task as closed. +/// +/// This flag can't be set when the task is scheduled or running. +pub(crate) const COMPLETED: usize = 1 << 2; + +/// Set if the task is closed. +/// +/// If a task is closed, that means it's either canceled or its output has been consumed by the +/// `Task`. A task becomes closed in the following cases: +/// +/// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. +/// 2. Its output gets awaited by the `Task`. +/// 3. It panics while polling the future. +/// 4. It is completed and the `Task` gets dropped. +pub(crate) const CLOSED: usize = 1 << 3; + +/// Set if the `Task` still exists. +/// +/// The `Task` is a special case in that it is only tracked by this flag, while all other +/// task references (`Runnable` and `Waker`s) are tracked by the reference count. +pub(crate) const TASK: usize = 1 << 4; + +/// Set if the `Task` is awaiting the output. +/// +/// This flag is set while there is a registered awaiter of type `Waker` inside the task. When the +/// task gets closed or completed, we need to wake the awaiter. This flag can be used as a fast +/// check that tells us if we need to wake anyone. +pub(crate) const AWAITER: usize = 1 << 5; + +/// Set if an awaiter is being registered. +/// +/// This flag is set when `Task` is polled and we are registering a new awaiter. +pub(crate) const REGISTERING: usize = 1 << 6; + +/// Set if the awaiter is being notified. +/// +/// This flag is set when notifying the awaiter. If an awaiter is concurrently registered and +/// notified, whichever side came first will take over the reposibility of resolving the race. +pub(crate) const NOTIFYING: usize = 1 << 7; + +/// A single reference. +/// +/// The lower bits in the state contain various flags representing the task state, while the upper +/// bits contain the reference count. The value of `REFERENCE` represents a single reference in the +/// total reference count. +/// +/// Note that the reference counter only tracks the `Runnable` and `Waker`s. The `Task` is +/// tracked separately by the `TASK` flag. +pub(crate) const REFERENCE: usize = 1 << 8; diff --git a/external/vendor/async-task/src/task.rs b/external/vendor/async-task/src/task.rs new file mode 100644 index 0000000000..da45cd8d06 --- /dev/null +++ b/external/vendor/async-task/src/task.rs @@ -0,0 +1,565 @@ +use core::fmt; +use core::future::Future; +use core::marker::PhantomData; +use core::mem; +use core::pin::Pin; +use core::ptr::NonNull; +use core::sync::atomic::Ordering; +use core::task::{Context, Poll}; + +use crate::header::Header; +use crate::raw::Panic; +use crate::runnable::ScheduleInfo; +use crate::state::*; + +/// A spawned task. +/// +/// A [`Task`] can be awaited to retrieve the output of its future. +/// +/// Dropping a [`Task`] cancels it, which means its future won't be polled again. To drop the +/// [`Task`] handle without canceling it, use [`detach()`][`Task::detach()`] instead. To cancel a +/// task gracefully and wait until it is fully destroyed, use the [`cancel()`][Task::cancel()] +/// method. +/// +/// Note that canceling a task actually wakes it and reschedules one last time. Then, the executor +/// can destroy the task by simply dropping its [`Runnable`][`super::Runnable`] or by invoking +/// [`run()`][`super::Runnable::run()`]. +/// +/// # Examples +/// +/// ``` +/// use smol::{future, Executor}; +/// use std::thread; +/// +/// let ex = Executor::new(); +/// +/// // Spawn a future onto the executor. +/// let task = ex.spawn(async { +/// println!("Hello from a task!"); +/// 1 + 2 +/// }); +/// +/// // Run an executor thread. +/// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); +/// +/// // Wait for the task's output. +/// assert_eq!(future::block_on(task), 3); +/// ``` +#[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] +pub struct Task { + /// A raw task pointer. + pub(crate) ptr: NonNull<()>, + + /// A marker capturing generic types `T` and `M`. + pub(crate) _marker: PhantomData<(T, M)>, +} + +unsafe impl Send for Task {} +unsafe impl Sync for Task {} + +impl Unpin for Task {} + +#[cfg(feature = "std")] +impl std::panic::UnwindSafe for Task {} +#[cfg(feature = "std")] +impl std::panic::RefUnwindSafe for Task {} + +impl Task { + /// Detaches the task to let it keep running in the background. + /// + /// # Examples + /// + /// ``` + /// use smol::{Executor, Timer}; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// ex.spawn(async { + /// loop { + /// println!("I'm a daemon task looping forever."); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }) + /// .detach(); + /// ``` + pub fn detach(self) { + let mut this = self; + let _out = this.set_detached(); + mem::forget(this); + } + + /// Cancels the task and waits for it to stop running. + /// + /// Returns the task's output if it was completed just before it got canceled, or [`None`] if + /// it didn't complete. + /// + /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of + /// canceling because it also waits for the task to stop running. + /// + /// # Examples + /// + /// ``` + /// # if cfg!(miri) { return; } // Miri does not support epoll + /// use smol::{future, Executor, Timer}; + /// use std::thread; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// let task = ex.spawn(async { + /// loop { + /// println!("Even though I'm in an infinite loop, you can still cancel me!"); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }); + /// + /// // Run an executor thread. + /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); + /// + /// future::block_on(async { + /// Timer::after(Duration::from_secs(3)).await; + /// task.cancel().await; + /// }); + /// ``` + pub async fn cancel(self) -> Option { + let mut this = self; + this.set_canceled(); + this.fallible().await + } + + /// Converts this task into a [`FallibleTask`]. + /// + /// Like [`Task`], a fallible task will poll the task's output until it is + /// completed or cancelled due to its [`Runnable`][`super::Runnable`] being + /// dropped without being run. Resolves to the task's output when completed, + /// or [`None`] if it didn't complete. + /// + /// # Examples + /// + /// ``` + /// use smol::{future, Executor}; + /// use std::thread; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a future onto the executor. + /// let task = ex.spawn(async { + /// println!("Hello from a task!"); + /// 1 + 2 + /// }) + /// .fallible(); + /// + /// // Run an executor thread. + /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); + /// + /// // Wait for the task's output. + /// assert_eq!(future::block_on(task), Some(3)); + /// ``` + /// + /// ``` + /// use smol::future; + /// + /// // Schedule function which drops the runnable without running it. + /// let schedule = move |runnable| drop(runnable); + /// + /// // Create a task with the future and the schedule function. + /// let (runnable, task) = async_task::spawn(async { + /// println!("Hello from a task!"); + /// 1 + 2 + /// }, schedule); + /// runnable.schedule(); + /// + /// // Wait for the task's output. + /// assert_eq!(future::block_on(task.fallible()), None); + /// ``` + pub fn fallible(self) -> FallibleTask { + FallibleTask { task: self } + } + + /// Puts the task in canceled state. + fn set_canceled(&mut self) { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let mut state = (*header).state.load(Ordering::Acquire); + + loop { + // If the task has been completed or closed, it can't be canceled. + if state & (COMPLETED | CLOSED) != 0 { + break; + } + + // If the task is not scheduled nor running, we'll need to schedule it. + let new = if state & (SCHEDULED | RUNNING) == 0 { + (state | SCHEDULED | CLOSED) + REFERENCE + } else { + state | CLOSED + }; + + // Mark the task as closed. + match (*header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If the task is not scheduled nor running, schedule it one more time so + // that its future gets dropped by the executor. + if state & (SCHEDULED | RUNNING) == 0 { + ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); + } + + // Notify the awaiter that the task has been closed. + if state & AWAITER != 0 { + (*header).notify(None); + } + + break; + } + Err(s) => state = s, + } + } + } + } + + /// Puts the task in detached state. + fn set_detached(&mut self) -> Option> { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + // A place where the output will be stored in case it needs to be dropped. + let mut output = None; + + // Optimistically assume the `Task` is being detached just after creating the task. + // This is a common case so if the `Task` is datached, the overhead of it is only one + // compare-exchange operation. + if let Err(mut state) = (*header).state.compare_exchange_weak( + SCHEDULED | TASK | REFERENCE, + SCHEDULED | REFERENCE, + Ordering::AcqRel, + Ordering::Acquire, + ) { + loop { + // If the task has been completed but not yet closed, that means its output + // must be dropped. + if state & COMPLETED != 0 && state & CLOSED == 0 { + // Mark the task as closed in order to grab its output. + match (*header).state.compare_exchange_weak( + state, + state | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Read the output. + output = Some( + (((*header).vtable.get_output)(ptr) as *mut Result) + .read(), + ); + + // Update the state variable because we're continuing the loop. + state |= CLOSED; + } + Err(s) => state = s, + } + } else { + // If this is the last reference to the task and it's not closed, then + // close it and schedule one more time so that its future gets dropped by + // the executor. + let new = if state & (!(REFERENCE - 1) | CLOSED) == 0 { + SCHEDULED | CLOSED | REFERENCE + } else { + state & !TASK + }; + + // Unset the `TASK` flag. + match (*header).state.compare_exchange_weak( + state, + new, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // If this is the last reference to the task, we need to either + // schedule dropping its future or destroy it. + if state & !(REFERENCE - 1) == 0 { + if state & CLOSED == 0 { + ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); + } else { + ((*header).vtable.destroy)(ptr); + } + } + + break; + } + Err(s) => state = s, + } + } + } + } + + output + } + } + + /// Polls the task to retrieve its output. + /// + /// Returns `Some` if the task has completed or `None` if it was closed. + /// + /// A task becomes closed in the following cases: + /// + /// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. + /// 2. Its output gets awaited by the `Task`. + /// 3. It panics while polling the future. + /// 4. It is completed and the `Task` gets dropped. + fn poll_task(&mut self, cx: &mut Context<'_>) -> Poll> { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let mut state = (*header).state.load(Ordering::Acquire); + + loop { + // If the task has been closed, notify the awaiter and return `None`. + if state & CLOSED != 0 { + // If the task is scheduled or running, we need to wait until its future is + // dropped. + if state & (SCHEDULED | RUNNING) != 0 { + // Replace the waker with one associated with the current task. + (*header).register(cx.waker()); + + // Reload the state after registering. It is possible changes occurred just + // before registration so we need to check for that. + state = (*header).state.load(Ordering::Acquire); + + // If the task is still scheduled or running, we need to wait because its + // future is not dropped yet. + if state & (SCHEDULED | RUNNING) != 0 { + return Poll::Pending; + } + } + + // Even though the awaiter is most likely the current task, it could also be + // another task. + (*header).notify(Some(cx.waker())); + return Poll::Ready(None); + } + + // If the task is not completed, register the current task. + if state & COMPLETED == 0 { + // Replace the waker with one associated with the current task. + (*header).register(cx.waker()); + + // Reload the state after registering. It is possible that the task became + // completed or closed just before registration so we need to check for that. + state = (*header).state.load(Ordering::Acquire); + + // If the task has been closed, restart. + if state & CLOSED != 0 { + continue; + } + + // If the task is still not completed, we're blocked on it. + if state & COMPLETED == 0 { + return Poll::Pending; + } + } + + // Since the task is now completed, mark it as closed in order to grab its output. + match (*header).state.compare_exchange( + state, + state | CLOSED, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => { + // Notify the awaiter. Even though the awaiter is most likely the current + // task, it could also be another task. + if state & AWAITER != 0 { + (*header).notify(Some(cx.waker())); + } + + // Take the output from the task. + let output = ((*header).vtable.get_output)(ptr) as *mut Result; + let output = output.read(); + + // Propagate the panic if the task panicked. + let output = match output { + Ok(output) => output, + Err(panic) => { + #[cfg(feature = "std")] + std::panic::resume_unwind(panic); + + #[cfg(not(feature = "std"))] + match panic {} + } + }; + + return Poll::Ready(Some(output)); + } + Err(s) => state = s, + } + } + } + } + + fn header(&self) -> &Header { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + unsafe { &*header } + } + + /// Returns `true` if the current task is finished. + /// + /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. + pub fn is_finished(&self) -> bool { + let ptr = self.ptr.as_ptr(); + let header = ptr as *const Header; + + unsafe { + let state = (*header).state.load(Ordering::Acquire); + state & (CLOSED | COMPLETED) != 0 + } + } + + /// Get the metadata associated with this task. + /// + /// Tasks can be created with a metadata object associated with them; by default, this + /// is a `()` value. See the [`Builder::metadata()`] method for more information. + pub fn metadata(&self) -> &M { + &self.header().metadata + } +} + +impl Drop for Task { + fn drop(&mut self) { + self.set_canceled(); + self.set_detached(); + } +} + +impl Future for Task { + type Output = T; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.poll_task(cx) { + Poll::Ready(t) => Poll::Ready(t.expect("Task polled after completion")), + Poll::Pending => Poll::Pending, + } + } +} + +impl fmt::Debug for Task { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Task") + .field("header", self.header()) + .finish() + } +} + +/// A spawned task with a fallible response. +/// +/// This type behaves like [`Task`], however it produces an `Option` when +/// polled and will return `None` if the executor dropped its +/// [`Runnable`][`super::Runnable`] without being run. +/// +/// This can be useful to avoid the panic produced when polling the `Task` +/// future if the executor dropped its `Runnable`. +#[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] +pub struct FallibleTask { + task: Task, +} + +impl FallibleTask { + /// Detaches the task to let it keep running in the background. + /// + /// # Examples + /// + /// ``` + /// use smol::{Executor, Timer}; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// ex.spawn(async { + /// loop { + /// println!("I'm a daemon task looping forever."); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }) + /// .fallible() + /// .detach(); + /// ``` + pub fn detach(self) { + self.task.detach() + } + + /// Cancels the task and waits for it to stop running. + /// + /// Returns the task's output if it was completed just before it got canceled, or [`None`] if + /// it didn't complete. + /// + /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of + /// canceling because it also waits for the task to stop running. + /// + /// # Examples + /// + /// ``` + /// # if cfg!(miri) { return; } // Miri does not support epoll + /// use smol::{future, Executor, Timer}; + /// use std::thread; + /// use std::time::Duration; + /// + /// let ex = Executor::new(); + /// + /// // Spawn a deamon future. + /// let task = ex.spawn(async { + /// loop { + /// println!("Even though I'm in an infinite loop, you can still cancel me!"); + /// Timer::after(Duration::from_secs(1)).await; + /// } + /// }) + /// .fallible(); + /// + /// // Run an executor thread. + /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); + /// + /// future::block_on(async { + /// Timer::after(Duration::from_secs(3)).await; + /// task.cancel().await; + /// }); + /// ``` + pub async fn cancel(self) -> Option { + self.task.cancel().await + } + + /// Returns `true` if the current task is finished. + /// + /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. + pub fn is_finished(&self) -> bool { + self.task.is_finished() + } +} + +impl Future for FallibleTask { + type Output = Option; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.task.poll_task(cx) + } +} + +impl fmt::Debug for FallibleTask { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FallibleTask") + .field("header", self.task.header()) + .finish() + } +} diff --git a/external/vendor/async-task/src/utils.rs b/external/vendor/async-task/src/utils.rs new file mode 100644 index 0000000000..5c2170c89e --- /dev/null +++ b/external/vendor/async-task/src/utils.rs @@ -0,0 +1,127 @@ +use core::alloc::Layout as StdLayout; +use core::mem; + +/// Aborts the process. +/// +/// To abort, this function simply panics while panicking. +pub(crate) fn abort() -> ! { + struct Panic; + + impl Drop for Panic { + fn drop(&mut self) { + panic!("aborting the process"); + } + } + + let _panic = Panic; + panic!("aborting the process"); +} + +/// Calls a function and aborts if it panics. +/// +/// This is useful in unsafe code where we can't recover from panics. +#[inline] +pub(crate) fn abort_on_panic(f: impl FnOnce() -> T) -> T { + struct Bomb; + + impl Drop for Bomb { + fn drop(&mut self) { + abort(); + } + } + + let bomb = Bomb; + let t = f(); + mem::forget(bomb); + t +} + +/// A version of `alloc::alloc::Layout` that can be used in the const +/// position. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Layout { + size: usize, + align: usize, +} + +impl Layout { + /// Creates a new `Layout` with the given size and alignment. + #[inline] + pub(crate) const fn from_size_align(size: usize, align: usize) -> Self { + Self { size, align } + } + + /// Creates a new `Layout` for the given sized type. + #[inline] + pub(crate) const fn new() -> Self { + Self::from_size_align(mem::size_of::(), mem::align_of::()) + } + + /// Convert this into the standard library's layout type. + /// + /// # Safety + /// + /// - `align` must be non-zero and a power of two. + /// - When rounded up to the nearest multiple of `align`, the size + /// must not overflow. + #[inline] + pub(crate) const unsafe fn into_std(self) -> StdLayout { + StdLayout::from_size_align_unchecked(self.size, self.align) + } + + /// Get the alignment of this layout. + #[inline] + pub(crate) const fn align(&self) -> usize { + self.align + } + + /// Get the size of this layout. + #[inline] + pub(crate) const fn size(&self) -> usize { + self.size + } + + /// Returns the layout for `a` followed by `b` and the offset of `b`. + /// + /// This function was adapted from the `Layout::extend()`: + /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.extend + #[inline] + pub(crate) const fn extend(self, other: Layout) -> Option<(Layout, usize)> { + let new_align = max(self.align(), other.align()); + let pad = self.padding_needed_for(other.align()); + + let offset = leap!(self.size().checked_add(pad)); + let new_size = leap!(offset.checked_add(other.size())); + + // return None if any of the following are true: + // - align is 0 (implied false by is_power_of_two()) + // - align is not a power of 2 + // - size rounded up to align overflows + if !new_align.is_power_of_two() || new_size > isize::MAX as usize - (new_align - 1) { + return None; + } + + let layout = Layout::from_size_align(new_size, new_align); + Some((layout, offset)) + } + + /// Returns the padding after `layout` that aligns the following address to `align`. + /// + /// This function was adapted from the `Layout::padding_needed_for()`: + /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.padding_needed_for + #[inline] + pub(crate) const fn padding_needed_for(self, align: usize) -> usize { + let len = self.size(); + let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); + len_rounded_up.wrapping_sub(len) + } +} + +#[inline] +pub(crate) const fn max(left: usize, right: usize) -> usize { + if left > right { + left + } else { + right + } +} diff --git a/external/vendor/async-task/tests/basic.rs b/external/vendor/async-task/tests/basic.rs new file mode 100644 index 0000000000..727a05ee1f --- /dev/null +++ b/external/vendor/async-task/tests/basic.rs @@ -0,0 +1,325 @@ +use std::future::Future; +use std::pin::Pin; +use std::ptr::NonNull; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use async_task::Runnable; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP)` +// +// The future `f` always returns `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Box; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + Poll::Ready(Box::new(0)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |_runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn try_await(f: impl Future) -> Option { + future::block_on(future::poll_once(f)) +} + +#[test] +fn drop_and_detach() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(runnable); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn detach_and_drop() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(runnable); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn detach_and_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn run_and_detach() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn cancel_and_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn run_and_cancel() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn cancel_join() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + assert!(try_await(&mut task).is_none()); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(try_await(&mut task).is_some()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn schedule() { + let (s, r) = flume::unbounded(); + let schedule = move |runnable| s.send(runnable).unwrap(); + let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); + + assert!(r.is_empty()); + runnable.schedule(); + + let runnable = r.recv().unwrap(); + assert!(r.is_empty()); + runnable.schedule(); + + let runnable = r.recv().unwrap(); + assert!(r.is_empty()); + runnable.schedule(); + + r.recv().unwrap(); +} + +#[test] +fn schedule_counter() { + static COUNT: AtomicUsize = AtomicUsize::new(0); + + let (s, r) = flume::unbounded(); + let schedule = move |runnable: Runnable| { + COUNT.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); + runnable.schedule(); + + r.recv().unwrap().schedule(); + r.recv().unwrap().schedule(); + assert_eq!(COUNT.load(Ordering::SeqCst), 3); + r.recv().unwrap(); +} + +#[test] +fn drop_inside_schedule() { + struct DropGuard(AtomicUsize); + impl Drop for DropGuard { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let guard = DropGuard(AtomicUsize::new(0)); + + let (runnable, _) = async_task::spawn(async {}, move |runnable| { + assert_eq!(guard.0.load(Ordering::SeqCst), 0); + drop(runnable); + assert_eq!(guard.0.load(Ordering::SeqCst), 0); + }); + runnable.schedule(); +} + +#[test] +fn waker() { + let (s, r) = flume::unbounded(); + let schedule = move |runnable| s.send(runnable).unwrap(); + let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); + + assert!(r.is_empty()); + let waker = runnable.waker(); + runnable.run(); + waker.wake_by_ref(); + + let runnable = r.recv().unwrap(); + runnable.run(); + waker.wake(); + r.recv().unwrap(); +} + +#[test] +fn raw() { + // Dispatch schedules a function for execution at a later point. For tests, we execute it straight away. + fn dispatch(trampoline: extern "C" fn(NonNull<()>), context: NonNull<()>) { + trampoline(context) + } + extern "C" fn trampoline(runnable: NonNull<()>) { + let task = unsafe { Runnable::<()>::from_raw(runnable) }; + task.run(); + } + + let task_got_executed = Arc::new(AtomicBool::new(false)); + let (runnable, _handle) = async_task::spawn( + { + let task_got_executed = task_got_executed.clone(); + async move { task_got_executed.store(true, Ordering::SeqCst) } + }, + |runnable: Runnable<()>| dispatch(trampoline, runnable.into_raw()), + ); + runnable.schedule(); + + assert!(task_got_executed.load(Ordering::SeqCst)); +} diff --git a/external/vendor/async-task/tests/cancel.rs b/external/vendor/async-task/tests/cancel.rs new file mode 100644 index 0000000000..033336762c --- /dev/null +++ b/external/vendor/async-task/tests/cancel.rs @@ -0,0 +1,183 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP_F, DROP_T)` +// +// The future `f` outputs `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP_F` is incremented. +// When the output gets dropped, `DROP_T` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop_f: AtomicUsize = AtomicUsize::new(0); + static $drop_t: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Out; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + Poll::Ready(Out(Box::new(0), true)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop_f.fetch_add(1, Ordering::SeqCst); + } + } + + #[derive(Default)] + struct Out(#[allow(dead_code)] Box, bool); + + impl Drop for Out { + fn drop(&mut self) { + if self.1 { + $drop_t.fetch_add(1, Ordering::SeqCst); + } + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |runnable: Runnable| { + let _ = &guard; + runnable.schedule(); + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn run_and_cancel() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(future::block_on(task.cancel()).is_some()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn cancel_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + runnable.run(); + + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + assert!(future::block_on(task.cancel()).is_none()); + + thread::sleep(ms(200)); + + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn cancel_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + + thread::sleep(ms(200)); + + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + assert!(future::block_on(task.cancel()).is_none()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} diff --git a/external/vendor/async-task/tests/join.rs b/external/vendor/async-task/tests/join.rs new file mode 100644 index 0000000000..089b5c10e5 --- /dev/null +++ b/external/vendor/async-task/tests/join.rs @@ -0,0 +1,386 @@ +use std::cell::Cell; +use std::future::Future; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP_F, DROP_T)` +// +// The future `f` outputs `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP_F` is incremented. +// When the output gets dropped, `DROP_T` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop_f: AtomicUsize = AtomicUsize::new(0); + static $drop_t: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Out; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + Poll::Ready(Out(Box::new(0), true)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop_f.fetch_add(1, Ordering::SeqCst); + } + } + + #[derive(Default)] + struct Out(#[allow(dead_code)] Box, bool); + + impl Drop for Out { + fn drop(&mut self) { + if self.1 { + $drop_t.fetch_add(1, Ordering::SeqCst); + } + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |runnable: Runnable| { + let _ = &guard; + runnable.schedule(); + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn drop_and_join() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + drop(runnable); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); +} + +#[test] +fn run_and_join() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_ok()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); +} + +#[test] +fn detach_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + task.detach(); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); +} + +#[test] +fn join_twice() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + runnable.run(); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + future::block_on(&mut task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + assert!(catch_unwind(AssertUnwindSafe(|| future::block_on(&mut task))).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + task.detach(); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn join_and_cancel() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + drop(runnable); + + thread::sleep(ms(400)); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + + thread::sleep(ms(200)); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn join_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(400)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + future::block_on(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_and_run_and_join() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(400)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + future::block_on(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_and_cancel_and_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + }) + .run(); +} + +#[test] +fn try_join_and_run_and_cancel() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + thread::sleep(ms(200)); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + }) + .add(|| { + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(400)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn await_output() { + struct Fut(Cell>); + + impl Fut { + fn new(t: T) -> Fut { + Fut(Cell::new(Some(t))) + } + } + + impl Future for Fut { + type Output = T; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + Poll::Ready(self.0.take().unwrap()) + } + } + + for i in 0..10 { + let (runnable, task) = async_task::spawn(Fut::new(i), drop); + runnable.run(); + assert_eq!(future::block_on(task), i); + } + + for i in 0..10 { + let (runnable, task) = async_task::spawn(Fut::new(vec![7; i]), drop); + runnable.run(); + assert_eq!(future::block_on(task), vec![7; i]); + } + + let (runnable, task) = async_task::spawn(Fut::new("foo".to_string()), drop); + runnable.run(); + assert_eq!(future::block_on(task), "foo"); +} diff --git a/external/vendor/async-task/tests/metadata.rs b/external/vendor/async-task/tests/metadata.rs new file mode 100644 index 0000000000..d3d8d53483 --- /dev/null +++ b/external/vendor/async-task/tests/metadata.rs @@ -0,0 +1,58 @@ +use async_task::{Builder, Runnable}; +use flume::unbounded; +use smol::future; + +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[test] +fn metadata_use_case() { + // Each future has a counter that is incremented every time it is scheduled. + let (sender, receiver) = unbounded::>(); + let schedule = move |runnable: Runnable| { + runnable.metadata().fetch_add(1, Ordering::SeqCst); + sender.send(runnable).ok(); + }; + + async fn my_future(counter: &AtomicUsize) { + loop { + // Loop until we've been scheduled five times. + let count = counter.load(Ordering::SeqCst); + if count < 5 { + // Make sure that we are immediately scheduled again. + future::yield_now().await; + continue; + } + + // We've been scheduled five times, so we're done. + break; + } + } + + let make_task = || { + // SAFETY: We are spawning a non-'static future, so we need to use the unsafe API. + // The borrowed variables, in this case the metadata, are guaranteed to outlive the runnable. + let (runnable, task) = unsafe { + Builder::new() + .metadata(AtomicUsize::new(0)) + .spawn_unchecked(my_future, schedule.clone()) + }; + + runnable.schedule(); + task + }; + + // Make tasks. + let t1 = make_task(); + let t2 = make_task(); + + // Run the tasks. + while let Ok(runnable) = receiver.try_recv() { + runnable.run(); + } + + // Unwrap the tasks. + smol::future::block_on(async move { + t1.await; + t2.await; + }); +} diff --git a/external/vendor/async-task/tests/panic.rs b/external/vendor/async-task/tests/panic.rs new file mode 100644 index 0000000000..726e385d46 --- /dev/null +++ b/external/vendor/async-task/tests/panic.rs @@ -0,0 +1,234 @@ +use std::future::Future; +use std::panic::catch_unwind; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP)` +// +// The future `f` sleeps for 200 ms and then panics. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = (); + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + panic!() + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |_runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn cancel_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + }) + .run(); +} + +#[test] +fn run_and_join() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn try_join_and_run_and_join() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + future::block_on(future::or(&mut task, future::ready(()))); + assert_eq!(POLL.load(Ordering::SeqCst), 0); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn join_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + assert!(catch_unwind(|| future::block_on(task)).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + future::block_on(future::or(&mut task, future::ready(()))); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + drop(task); + }) + .run(); +} + +#[test] +fn detach_during_run() { + future!(f, POLL, DROP_F); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + }) + .run(); +} diff --git a/external/vendor/async-task/tests/ready.rs b/external/vendor/async-task/tests/ready.rs new file mode 100644 index 0000000000..aefb36e8f8 --- /dev/null +++ b/external/vendor/async-task/tests/ready.rs @@ -0,0 +1,225 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, POLL, DROP_F, DROP_T)` +// +// The future `f` sleeps for 200 ms and outputs `Poll::Ready`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP_F` is incremented. +// When the output gets dropped, `DROP_T` is incremented. +macro_rules! future { + ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop_f: AtomicUsize = AtomicUsize::new(0); + static $drop_t: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Out; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + Poll::Ready(Out(Box::new(0), true)) + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop_f.fetch_add(1, Ordering::SeqCst); + } + } + + #[derive(Default)] + struct Out(#[allow(dead_code)] Box, bool); + + impl Drop for Out { + fn drop(&mut self) { + if self.1 { + $drop_t.fetch_add(1, Ordering::SeqCst); + } + } + } + + Fut(Box::new(0)) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, SCHED, DROP)` +// +// The schedule function `s` does nothing. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +macro_rules! schedule { + ($name:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let $name = { + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + move |_runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + } + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn cancel_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn join_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + future::block_on(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + + thread::sleep(ms(200)); + + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + }) + .run(); +} + +#[test] +fn try_join_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, mut task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + future::block_on(future::or(&mut task, future::ready(Default::default()))); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + drop(task); + }) + .run(); +} + +#[test] +fn detach_during_run() { + future!(f, POLL, DROP_F, DROP_T); + schedule!(s, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(DROP_T.load(Ordering::SeqCst), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(DROP_T.load(Ordering::SeqCst), 0); + }) + .run(); +} diff --git a/external/vendor/async-task/tests/waker_panic.rs b/external/vendor/async-task/tests/waker_panic.rs new file mode 100644 index 0000000000..5b54f9dbe9 --- /dev/null +++ b/external/vendor/async-task/tests/waker_panic.rs @@ -0,0 +1,330 @@ +use std::cell::Cell; +use std::future::Future; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use atomic_waker::AtomicWaker; +use easy_parallel::Parallel; +use smol::future; + +// Creates a future with event counters. +// +// Usage: `future!(f, get_waker, POLL, DROP)` +// +// The future `f` always sleeps for 200 ms, and panics the second time it is polled. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Every time the future is run, it stores the waker into a global variable. +// This waker can be extracted using the `get_waker()` function. +macro_rules! future { + ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + static WAKER: AtomicWaker = AtomicWaker::new(); + + let ($name, $get_waker) = { + struct Fut(Cell, #[allow(dead_code)] Box); + + impl Future for Fut { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + WAKER.register(cx.waker()); + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + + if self.0.get() { + panic!() + } else { + self.0.set(true); + Poll::Pending + } + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, chan, SCHED, DROP)` +// +// The schedule function `s` pushes the task into `chan`. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Receiver `chan` extracts the task when it is scheduled. +macro_rules! schedule { + ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let ($name, $chan) = { + let (s, r) = flume::unbounded(); + + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + let sched = move |runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + + (sched, r) + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +fn try_await(f: impl Future) -> Option { + future::block_on(future::poll_once(f)) +} + +#[test] +fn wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake(); + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn wake_and_cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[flaky_test::flaky_test] +fn cancel_and_wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + POLL.store(0, Ordering::SeqCst); + DROP_F.store(0, Ordering::SeqCst); + SCHEDULE.store(0, Ordering::SeqCst); + DROP_S.store(0, Ordering::SeqCst); + + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + assert!(catch_unwind(|| runnable.run()).is_err()); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn panic_and_poll() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + get_waker().wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + let mut task = task; + assert!(try_await(&mut task).is_none()); + + let runnable = chan.recv().unwrap(); + assert!(catch_unwind(|| runnable.run()).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + assert!(catch_unwind(AssertUnwindSafe(|| try_await(&mut task))).is_err()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + + drop(get_waker()); + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} diff --git a/external/vendor/async-task/tests/waker_pending.rs b/external/vendor/async-task/tests/waker_pending.rs new file mode 100644 index 0000000000..ccd540b4ae --- /dev/null +++ b/external/vendor/async-task/tests/waker_pending.rs @@ -0,0 +1,365 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use atomic_waker::AtomicWaker; +use easy_parallel::Parallel; + +// Creates a future with event counters. +// +// Usage: `future!(f, get_waker, POLL, DROP)` +// +// The future `f` always sleeps for 200 ms and returns `Poll::Pending`. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Every time the future is run, it stores the waker into a global variable. +// This waker can be extracted using the `get_waker()` function. +macro_rules! future { + ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + static WAKER: AtomicWaker = AtomicWaker::new(); + + let ($name, $get_waker) = { + struct Fut(#[allow(dead_code)] Box); + + impl Future for Fut { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + WAKER.register(cx.waker()); + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(400)); + Poll::Pending + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + (Fut(Box::new(0)), || WAKER.take().unwrap()) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, chan, SCHED, DROP)` +// +// The schedule function `s` pushes the task into `chan`. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Receiver `chan` extracts the task when it is scheduled. +macro_rules! schedule { + ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let ($name, $chan) = { + let (s, r) = flume::unbounded(); + + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + let sched = move |runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + + (sched, r) + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, _task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake_by_ref(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + }) + .run(); + + chan.recv().unwrap(); + drop(get_waker()); +} + +#[test] +fn cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn wake_and_cancel_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn cancel_and_wake_during_run() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + waker.wake_by_ref(); + let runnable = chan.recv().unwrap(); + + Parallel::new() + .add(|| { + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .add(|| { + thread::sleep(ms(200)); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + thread::sleep(ms(400)); + + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); + }) + .run(); +} + +#[test] +fn drop_last_waker() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + let waker = get_waker(); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(waker); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn cancel_last_task() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(task); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn drop_last_task() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + + runnable.run(); + drop(get_waker()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + task.detach(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 1); + + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} diff --git a/external/vendor/async-task/tests/waker_ready.rs b/external/vendor/async-task/tests/waker_ready.rs new file mode 100644 index 0000000000..b6f6b5fdf8 --- /dev/null +++ b/external/vendor/async-task/tests/waker_ready.rs @@ -0,0 +1,279 @@ +use std::cell::Cell; +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::task::{Context, Poll}; +use std::thread; +use std::time::Duration; + +use async_task::Runnable; +use atomic_waker::AtomicWaker; + +// Creates a future with event counters. +// +// Usage: `future!(f, get_waker, POLL, DROP)` +// +// The future `f` always sleeps for 200 ms, and returns `Poll::Ready` the second time it is polled. +// When it gets polled, `POLL` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Every time the future is run, it stores the waker into a global variable. +// This waker can be extracted using the `get_waker()` function. +macro_rules! future { + ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { + static $poll: AtomicUsize = AtomicUsize::new(0); + static $drop: AtomicUsize = AtomicUsize::new(0); + static WAKER: AtomicWaker = AtomicWaker::new(); + + let ($name, $get_waker) = { + struct Fut(Cell, #[allow(dead_code)] Box); + + impl Future for Fut { + type Output = Box; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + WAKER.register(cx.waker()); + $poll.fetch_add(1, Ordering::SeqCst); + thread::sleep(ms(200)); + + if self.0.get() { + Poll::Ready(Box::new(0)) + } else { + self.0.set(true); + Poll::Pending + } + } + } + + impl Drop for Fut { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) + }; + }; +} + +// Creates a schedule function with event counters. +// +// Usage: `schedule!(s, chan, SCHED, DROP)` +// +// The schedule function `s` pushes the task into `chan`. +// When it gets invoked, `SCHED` is incremented. +// When it gets dropped, `DROP` is incremented. +// +// Receiver `chan` extracts the task when it is scheduled. +macro_rules! schedule { + ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { + static $drop: AtomicUsize = AtomicUsize::new(0); + static $sched: AtomicUsize = AtomicUsize::new(0); + + let ($name, $chan) = { + let (s, r) = flume::unbounded(); + + struct Guard(#[allow(dead_code)] Box); + + impl Drop for Guard { + fn drop(&mut self) { + $drop.fetch_add(1, Ordering::SeqCst); + } + } + + let guard = Guard(Box::new(0)); + let sched = move |runnable: Runnable| { + let _ = &guard; + $sched.fetch_add(1, Ordering::SeqCst); + s.send(runnable).unwrap(); + }; + + (sched, r) + }; + }; +} + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn wake() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (mut runnable, task) = async_task::spawn(f, s); + task.detach(); + + assert!(chan.is_empty()); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake(); + runnable = chan.recv().unwrap(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn wake_by_ref() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (mut runnable, task) = async_task::spawn(f, s); + task.detach(); + + assert!(chan.is_empty()); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake_by_ref(); + runnable = chan.recv().unwrap(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake_by_ref(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[allow(clippy::redundant_clone)] // This is intentional +#[test] +fn clone() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (mut runnable, task) = async_task::spawn(f, s); + task.detach(); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + let w2 = get_waker().clone(); + let w3 = w2.clone(); + let w4 = w3.clone(); + w4.wake(); + + runnable = chan.recv().unwrap(); + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + w3.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + drop(w2); + drop(get_waker()); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); +} + +#[test] +fn wake_dropped() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + task.detach(); + + runnable.run(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + let waker = get_waker(); + + waker.wake_by_ref(); + drop(chan.recv().unwrap()); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} + +#[test] +fn wake_completed() { + future!(f, get_waker, POLL, DROP_F); + schedule!(s, chan, SCHEDULE, DROP_S); + let (runnable, task) = async_task::spawn(f, s); + task.detach(); + + runnable.run(); + let waker = get_waker(); + assert_eq!(POLL.load(Ordering::SeqCst), 1); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); + assert_eq!(DROP_F.load(Ordering::SeqCst), 0); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + waker.wake(); + chan.recv().unwrap().run(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 0); + assert_eq!(chan.len(), 0); + + get_waker().wake(); + assert_eq!(POLL.load(Ordering::SeqCst), 2); + assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); + assert_eq!(DROP_F.load(Ordering::SeqCst), 1); + assert_eq!(DROP_S.load(Ordering::SeqCst), 1); + assert_eq!(chan.len(), 0); +} diff --git a/external/vendor/concurrent-queue/.cargo-checksum.json b/external/vendor/concurrent-queue/.cargo-checksum.json new file mode 100644 index 0000000000..ec22307e94 --- /dev/null +++ b/external/vendor/concurrent-queue/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a70c1490e6daa375beb4928e5f4f97a6d9d6ba215fb4ff8b03ebc02a58a16439","CHANGELOG.md":"e9a4a11edce8b62146fdade24e1a74ee624601b2efcaa7035359c464a1ff7ff7","Cargo.toml":"d14f713829a83746178dd8a52732e1d106c895b3b4370bb9436fb190a2d763b2","Cargo.toml.orig":"18cfe5d32e53cdd29e2566612323ba36657b9cf2de41f7fae2a13e3880b9f458","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"d7a326783ef72b063a5a237b8c64e209e80fe61b9dda20e4686b5d8b19b16fb1","benches/bench.rs":"6bac7fcdfbd1b1caa2b36089a347fb120091b95ca7bd399249a96f1271e1bf08","src/bounded.rs":"f161cc0e03f59cc764a44dc0782f7fcef7325fc328dfc8cb8c7fd608fc259cc8","src/lib.rs":"dc69f8a48cc28fe73ea1be88d77cd1aba98947d5a673019e61c630cc04c537ad","src/single.rs":"610671ffb6f3b3bc9d375b99f4e004c61eece74caa29c2a3af6977d4764185f4","src/sync.rs":"7dc9bba96eda875ee3a1e5b808e4e2317cdd03293a38492a214e26e538159eef","src/unbounded.rs":"e90ea841f3f1eac5503b1c3cd2949de64956fc6a164ca65150c6c2bba02d0e16","tests/bounded.rs":"07a357eae995a79c5b6ac586037a86ed49df754ef3893d16891dc3c686299c6b","tests/loom.rs":"63e40d2598f80c97cada351c8db9c8d5e79d97bae870bdf9fe510d2b21510616","tests/single.rs":"7866f94d1c350e9a860aab550165806a8422649845ac6e9c95045886ce3e7659","tests/unbounded.rs":"3f49e41c33c14ab7ac255ef48c0af4f0f1cfcc9352fc73f21918df3039ff10d9"},"package":"4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"} \ No newline at end of file diff --git a/external/vendor/concurrent-queue/.cargo_vcs_info.json b/external/vendor/concurrent-queue/.cargo_vcs_info.json new file mode 100644 index 0000000000..33d501db65 --- /dev/null +++ b/external/vendor/concurrent-queue/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "e874f701f8413de01948c4903f894e5c845d8950" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/external/vendor/concurrent-queue/CHANGELOG.md b/external/vendor/concurrent-queue/CHANGELOG.md new file mode 100644 index 0000000000..f4f8bfd27f --- /dev/null +++ b/external/vendor/concurrent-queue/CHANGELOG.md @@ -0,0 +1,69 @@ +# Version 2.5.0 + +- Add a `force_push` method that can be used to add an element to the queue by displacing another. (#58) +- Make `ConcurrentQueue::unbounded()` into a `const` function. (#67) +- Fix a compilation error in the Loom implementation. (#65) + +# Version 2.4.0 + +- Remove unnecessary heap allocations from inside of the `ConcurrentQueue` type. (#53) + +# Version 2.3.0 + +- Implement `UnwindSafe` without libstd. (#49) +- Bump `fastrand` to `v2.0.0`. (#43) +- Use inline assembly in the `full_fence` funtion. (#47) + +# Version 2.2.0 + +- Add the try_iter method. (#36) + +# Version 2.1.0 + +- Update `portable-atomic` to 1.0. (#33) + +# Version 2.0.0 + +- Add support for the `portable-atomic` and `loom` crates. (#27) +- **Breaking:** Add an `std` feature that can be disabled to use this crate on `no_std` platforms. (#22) +- Replace usage of `cache-padded` with `crossbeam-utils`. (#26) + +# Version 1.2.4 + +- Fix fence on x86 and miri. (#18) +- Revert 1.2.3. (#18) + +# Version 1.2.3 + +**Note:** This release has been yanked, see #17 for details. + +- Fix fence on non-x86 architectures and miri. (#16) + +# Version 1.2.2 + +- Add a special, efficient `bounded(1)` implementation. + +# Version 1.2.1 + +- In the bounded queue, use boxed slice instead of raw pointers. + +# Version 1.2.0 + +- Update dependencies. +- Implement `UnwindSafe` and `RefUnwindSafe` for `ConcurrentQueue`. + +# Version 1.1.2 + +- Optimize `SeqCst` fences. + +# Version 1.1.1 + +- Clarify errors in docs. + +# Version 1.1.0 + +- Add extra methods to error types. + +# Version 1.0.0 + +- Initial version diff --git a/external/vendor/concurrent-queue/Cargo.toml b/external/vendor/concurrent-queue/Cargo.toml new file mode 100644 index 0000000000..cdce2b4b6b --- /dev/null +++ b/external/vendor/concurrent-queue/Cargo.toml @@ -0,0 +1,72 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "concurrent-queue" +version = "2.5.0" +authors = [ + "Stjepan Glavina ", + "Taiki Endo ", + "John Nunley ", +] +exclude = ["/.*"] +description = "Concurrent multi-producer multi-consumer queue" +readme = "README.md" +keywords = [ + "channel", + "mpmc", + "spsc", + "spmc", + "mpsc", +] +categories = ["concurrency"] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/concurrent-queue" + +[lib] +bench = false + +[[bench]] +name = "bench" +harness = false + +[dependencies.crossbeam-utils] +version = "0.8.11" +default-features = false + +[dependencies.portable-atomic] +version = "1" +optional = true +default-features = false + +[dev-dependencies.criterion] +version = "0.5" +features = ["cargo_bench_support"] +default-features = false + +[dev-dependencies.easy-parallel] +version = "3.1.0" + +[dev-dependencies.fastrand] +version = "2.0.0" + +[features] +default = ["std"] +std = [] + +[target."cfg(loom)".dependencies.loom] +version = "0.7" +optional = true + +[target."cfg(target_family = \"wasm\")".dev-dependencies.wasm-bindgen-test] +version = "0.3" diff --git a/external/vendor/concurrent-queue/Cargo.toml.orig b/external/vendor/concurrent-queue/Cargo.toml.orig new file mode 100644 index 0000000000..462958cb74 --- /dev/null +++ b/external/vendor/concurrent-queue/Cargo.toml.orig @@ -0,0 +1,47 @@ +[package] +name = "concurrent-queue" +# When publishing a new version: +# - Update CHANGELOG.md +# - Create "v2.x.y" git tag +version = "2.5.0" +authors = [ + "Stjepan Glavina ", + "Taiki Endo ", + "John Nunley " +] +edition = "2021" +rust-version = "1.60" +description = "Concurrent multi-producer multi-consumer queue" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/concurrent-queue" +keywords = ["channel", "mpmc", "spsc", "spmc", "mpsc"] +categories = ["concurrency"] +exclude = ["/.*"] + +[lib] +bench = false + +[dependencies] +crossbeam-utils = { version = "0.8.11", default-features = false } +portable-atomic = { version = "1", default-features = false, optional = true } + +# Enables loom testing. This feature is permanently unstable and the API may +# change at any time. +[target.'cfg(loom)'.dependencies] +loom = { version = "0.7", optional = true } + +[[bench]] +name = "bench" +harness = false + +[dev-dependencies] +criterion = { version = "0.5", features = ["cargo_bench_support"], default-features = false } +easy-parallel = "3.1.0" +fastrand = "2.0.0" + +[target.'cfg(target_family = "wasm")'.dev-dependencies] +wasm-bindgen-test = "0.3" + +[features] +default = ["std"] +std = [] diff --git a/external/vendor/concurrent-queue/LICENSE-APACHE b/external/vendor/concurrent-queue/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/concurrent-queue/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/concurrent-queue/LICENSE-MIT b/external/vendor/concurrent-queue/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/external/vendor/concurrent-queue/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/concurrent-queue/README.md b/external/vendor/concurrent-queue/README.md new file mode 100644 index 0000000000..dfa9871d99 --- /dev/null +++ b/external/vendor/concurrent-queue/README.md @@ -0,0 +1,51 @@ +# concurrent-queue + +[![Build](https://github.com/smol-rs/concurrent-queue/workflows/Build%20and%20test/badge.svg)]( +https://github.com/smol-rs/concurrent-queue/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/concurrent-queue) +[![Cargo](https://img.shields.io/crates/v/concurrent-queue.svg)]( +https://crates.io/crates/concurrent-queue) +[![Documentation](https://docs.rs/concurrent-queue/badge.svg)]( +https://docs.rs/concurrent-queue) + +A concurrent multi-producer multi-consumer queue. + +There are two kinds of queues: + +1. Bounded queue with limited capacity. +2. Unbounded queue with unlimited capacity. + +Queues also have the capability to get closed at any point. When closed, no more items can be +pushed into the queue, although the remaining items can still be popped. + +These features make it easy to build channels similar to `std::sync::mpsc` on top of this +crate. + +## Examples + +```rust +use concurrent_queue::ConcurrentQueue; + +let q = ConcurrentQueue::unbounded(); +q.push(1).unwrap(); +q.push(2).unwrap(); + +assert_eq!(q.pop(), Ok(1)); +assert_eq!(q.pop(), Ok(2)); +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/concurrent-queue/benches/bench.rs b/external/vendor/concurrent-queue/benches/bench.rs new file mode 100644 index 0000000000..6e82019dda --- /dev/null +++ b/external/vendor/concurrent-queue/benches/bench.rs @@ -0,0 +1,93 @@ +use std::{any::type_name, fmt::Debug}; + +use concurrent_queue::{ConcurrentQueue, PopError}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use easy_parallel::Parallel; + +const COUNT: usize = 100_000; +const THREADS: usize = 7; + +fn spsc(recv: &ConcurrentQueue, send: &ConcurrentQueue) { + Parallel::new() + .add(|| loop { + match recv.pop() { + Ok(_) => (), + Err(PopError::Empty) => (), + Err(PopError::Closed) => break, + } + }) + .add(|| { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + send.close(); + }) + .run(); +} + +fn mpsc(recv: &ConcurrentQueue, send: &ConcurrentQueue) { + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + }) + .add(|| { + let mut recieved = 0; + while recieved < THREADS * COUNT { + match recv.pop() { + Ok(_) => recieved += 1, + Err(PopError::Empty) => (), + Err(PopError::Closed) => unreachable!(), + } + } + }) + .run(); +} + +fn single_thread( + recv: &ConcurrentQueue, + send: &ConcurrentQueue, +) { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + for _ in 0..COUNT { + recv.pop().unwrap(); + } +} + +// Because we can't pass generic functions as const parameters. +macro_rules! bench_all( + ($name:ident, $f:ident) => { + fn $name(c: &mut Criterion) { + fn helper(c: &mut Criterion) { + let name = format!("unbounded_{}_{}", stringify!($f), type_name::()); + + c.bench_function(&name, |b| b.iter(|| { + let q = ConcurrentQueue::unbounded(); + $f::(black_box(&q), black_box(&q)); + })); + + let name = format!("bounded_{}_{}", stringify!($f), type_name::()); + + c.bench_function(&name, |b| b.iter(|| { + let q = ConcurrentQueue::bounded(THREADS * COUNT); + $f::(black_box(&q), black_box(&q)); + })); + } + helper::(c); + helper::(c); + helper::(c); + helper::(c); + helper::(c); + } + } +); + +bench_all!(bench_spsc, spsc); +bench_all!(bench_mpsc, mpsc); +bench_all!(bench_single_thread, single_thread); + +criterion_group!(generic_group, bench_single_thread, bench_spsc, bench_mpsc); +criterion_main!(generic_group); diff --git a/external/vendor/concurrent-queue/src/bounded.rs b/external/vendor/concurrent-queue/src/bounded.rs new file mode 100644 index 0000000000..dab3a2953b --- /dev/null +++ b/external/vendor/concurrent-queue/src/bounded.rs @@ -0,0 +1,408 @@ +use alloc::{boxed::Box, vec::Vec}; +use core::mem::MaybeUninit; + +use crossbeam_utils::CachePadded; + +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, ForcePushError, PopError, PushError}; + +/// A slot in a queue. +struct Slot { + /// The current stamp. + stamp: AtomicUsize, + + /// The value in this slot. + value: UnsafeCell>, +} + +/// A bounded queue. +pub struct Bounded { + /// The head of the queue. + /// + /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but + /// packed into a single `usize`. The lower bits represent the index, while the upper bits + /// represent the lap. The mark bit in the head is always zero. + /// + /// Values are popped from the head of the queue. + head: CachePadded, + + /// The tail of the queue. + /// + /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but + /// packed into a single `usize`. The lower bits represent the index, while the upper bits + /// represent the lap. The mark bit indicates that the queue is closed. + /// + /// Values are pushed into the tail of the queue. + tail: CachePadded, + + /// The buffer holding slots. + buffer: Box<[Slot]>, + + /// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`. + one_lap: usize, + + /// If this bit is set in the tail, that means the queue is closed. + mark_bit: usize, +} + +impl Bounded { + /// Creates a new bounded queue. + pub fn new(cap: usize) -> Bounded { + assert!(cap > 0, "capacity must be positive"); + + // Head is initialized to `{ lap: 0, mark: 0, index: 0 }`. + let head = 0; + // Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`. + let tail = 0; + + // Allocate a buffer of `cap` slots initialized with stamps. + let mut buffer = Vec::with_capacity(cap); + for i in 0..cap { + // Set the stamp to `{ lap: 0, mark: 0, index: i }`. + buffer.push(Slot { + stamp: AtomicUsize::new(i), + value: UnsafeCell::new(MaybeUninit::uninit()), + }); + } + + // Compute constants `mark_bit` and `one_lap`. + let mark_bit = (cap + 1).next_power_of_two(); + let one_lap = mark_bit * 2; + + Bounded { + buffer: buffer.into(), + one_lap, + mark_bit, + head: CachePadded::new(AtomicUsize::new(head)), + tail: CachePadded::new(AtomicUsize::new(tail)), + } + } + + /// Attempts to push an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + self.push_or_else(value, |value, tail, _, _| { + let head = self.head.load(Ordering::Relaxed); + + // If the head lags one lap behind the tail as well... + if head.wrapping_add(self.one_lap) == tail { + // ...then the queue is full. + Err(PushError::Full(value)) + } else { + Ok(value) + } + }) + } + + /// Pushes an item into the queue, displacing another item if needed. + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + let result = self.push_or_else(value, |value, tail, new_tail, slot| { + let head = tail.wrapping_sub(self.one_lap); + let new_head = new_tail.wrapping_sub(self.one_lap); + + // Try to move the head. + if self + .head + .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Relaxed) + .is_ok() + { + // Move the tail. + self.tail.store(new_tail, Ordering::SeqCst); + + // Swap out the old value. + // SAFETY: We know this is initialized, since it's covered by the current queue. + let old = unsafe { + slot.value + .with_mut(|slot| slot.replace(MaybeUninit::new(value)).assume_init()) + }; + + // Update the stamp. + slot.stamp.store(tail + 1, Ordering::Release); + + // Return a PushError. + Err(PushError::Full(old)) + } else { + Ok(value) + } + }); + + match result { + Ok(()) => Ok(None), + Err(PushError::Full(old_value)) => Ok(Some(old_value)), + Err(PushError::Closed(value)) => Err(ForcePushError(value)), + } + } + + /// Attempts to push an item into the queue, running a closure on failure. + /// + /// `fail` is run when there is no more room left in the tail of the queue. The parameters of + /// this function are as follows: + /// + /// - The item that failed to push. + /// - The value of `self.tail` before the new value would be inserted. + /// - The value of `self.tail` after the new value would be inserted. + /// - The slot that we attempted to push into. + /// + /// If `fail` returns `Ok(val)`, we will try pushing `val` to the head of the queue. Otherwise, + /// this function will return the error. + fn push_or_else(&self, mut value: T, mut fail: F) -> Result<(), PushError> + where + F: FnMut(T, usize, usize, &Slot) -> Result>, + { + let mut tail = self.tail.load(Ordering::Relaxed); + + loop { + // Check if the queue is closed. + if tail & self.mark_bit != 0 { + return Err(PushError::Closed(value)); + } + + // Deconstruct the tail. + let index = tail & (self.mark_bit - 1); + let lap = tail & !(self.one_lap - 1); + + // Calculate the new location of the tail. + let new_tail = if index + 1 < self.buffer.len() { + // Same lap, incremented index. + // Set to `{ lap: lap, mark: 0, index: index + 1 }`. + tail + 1 + } else { + // One lap forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. + lap.wrapping_add(self.one_lap) + }; + + // Inspect the corresponding slot. + let slot = &self.buffer[index]; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the tail and the stamp match, we may attempt to push. + if tail == stamp { + // Try moving the tail. + match self.tail.compare_exchange_weak( + tail, + new_tail, + Ordering::SeqCst, + Ordering::Relaxed, + ) { + Ok(_) => { + // Write the value into the slot and update the stamp. + slot.value.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + slot.stamp.store(tail + 1, Ordering::Release); + return Ok(()); + } + Err(t) => { + tail = t; + } + } + } else if stamp.wrapping_add(self.one_lap) == tail + 1 { + crate::full_fence(); + + // We've failed to push; run our failure closure. + value = fail(value, tail, new_tail, slot)?; + + // Loom complains if there isn't an explicit busy wait here. + #[cfg(loom)] + busy_wait(); + + tail = self.tail.load(Ordering::Relaxed); + } else { + // Yield because we need to wait for the stamp to get updated. + busy_wait(); + tail = self.tail.load(Ordering::Relaxed); + } + } + } + + /// Attempts to pop an item from the queue. + pub fn pop(&self) -> Result { + let mut head = self.head.load(Ordering::Relaxed); + + loop { + // Deconstruct the head. + let index = head & (self.mark_bit - 1); + let lap = head & !(self.one_lap - 1); + + // Inspect the corresponding slot. + let slot = &self.buffer[index]; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the the stamp is ahead of the head by 1, we may attempt to pop. + if head + 1 == stamp { + let new = if index + 1 < self.buffer.len() { + // Same lap, incremented index. + // Set to `{ lap: lap, mark: 0, index: index + 1 }`. + head + 1 + } else { + // One lap forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. + lap.wrapping_add(self.one_lap) + }; + + // Try moving the head. + match self.head.compare_exchange_weak( + head, + new, + Ordering::SeqCst, + Ordering::Relaxed, + ) { + Ok(_) => { + // Read the value from the slot and update the stamp. + let value = slot + .value + .with_mut(|slot| unsafe { slot.read().assume_init() }); + slot.stamp + .store(head.wrapping_add(self.one_lap), Ordering::Release); + return Ok(value); + } + Err(h) => { + head = h; + } + } + } else if stamp == head { + crate::full_fence(); + let tail = self.tail.load(Ordering::Relaxed); + + // If the tail equals the head, that means the queue is empty. + if (tail & !self.mark_bit) == head { + // Check if the queue is closed. + if tail & self.mark_bit != 0 { + return Err(PopError::Closed); + } else { + return Err(PopError::Empty); + } + } + + // Loom complains if there isn't a busy-wait here. + #[cfg(loom)] + busy_wait(); + + head = self.head.load(Ordering::Relaxed); + } else { + // Yield because we need to wait for the stamp to get updated. + busy_wait(); + head = self.head.load(Ordering::Relaxed); + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + loop { + // Load the tail, then load the head. + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // If the tail didn't change, we've got consistent values to work with. + if self.tail.load(Ordering::SeqCst) == tail { + let hix = head & (self.mark_bit - 1); + let tix = tail & (self.mark_bit - 1); + + return if hix < tix { + tix - hix + } else if hix > tix { + self.buffer.len() - hix + tix + } else if (tail & !self.mark_bit) == head { + 0 + } else { + self.buffer.len() + }; + } + } + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + let head = self.head.load(Ordering::SeqCst); + let tail = self.tail.load(Ordering::SeqCst); + + // Is the tail equal to the head? + // + // Note: If the head changes just before we load the tail, that means there was a moment + // when the queue was not empty, so it is safe to just return `false`. + (tail & !self.mark_bit) == head + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // Is the head lagging one lap behind tail? + // + // Note: If the tail changes just before we load the head, that means there was a moment + // when the queue was not full, so it is safe to just return `false`. + head.wrapping_add(self.one_lap) == tail & !self.mark_bit + } + + /// Returns the capacity of the queue. + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst); + tail & self.mark_bit == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.tail.load(Ordering::SeqCst) & self.mark_bit != 0 + } +} + +impl Drop for Bounded { + fn drop(&mut self) { + // Get the index of the head. + let Self { + head, + tail, + buffer, + mark_bit, + .. + } = self; + + let mark_bit = *mark_bit; + + head.with_mut(|&mut head| { + tail.with_mut(|&mut tail| { + let hix = head & (mark_bit - 1); + let tix = tail & (mark_bit - 1); + + let len = if hix < tix { + tix - hix + } else if hix > tix { + buffer.len() - hix + tix + } else if (tail & !mark_bit) == head { + 0 + } else { + buffer.len() + }; + + // Loop over all slots that hold a value and drop them. + for i in 0..len { + // Compute the index of the next slot holding a value. + let index = if hix + i < buffer.len() { + hix + i + } else { + hix + i - buffer.len() + }; + + // Drop the value in the slot. + let slot = &buffer[index]; + slot.value.with_mut(|slot| unsafe { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + } + }); + }); + } +} diff --git a/external/vendor/concurrent-queue/src/lib.rs b/external/vendor/concurrent-queue/src/lib.rs new file mode 100644 index 0000000000..a4d26b501f --- /dev/null +++ b/external/vendor/concurrent-queue/src/lib.rs @@ -0,0 +1,660 @@ +//! A concurrent multi-producer multi-consumer queue. +//! +//! There are two kinds of queues: +//! +//! 1. [Bounded] queue with limited capacity. +//! 2. [Unbounded] queue with unlimited capacity. +//! +//! Queues also have the capability to get [closed] at any point. When closed, no more items can be +//! pushed into the queue, although the remaining items can still be popped. +//! +//! These features make it easy to build channels similar to [`std::sync::mpsc`] on top of this +//! crate. +//! +//! # Examples +//! +//! ``` +//! use concurrent_queue::ConcurrentQueue; +//! +//! let q = ConcurrentQueue::unbounded(); +//! q.push(1).unwrap(); +//! q.push(2).unwrap(); +//! +//! assert_eq!(q.pop(), Ok(1)); +//! assert_eq!(q.pop(), Ok(2)); +//! ``` +//! +//! # Features +//! +//! `concurrent-queue` uses an `std` default feature. With this feature enabled, this crate will +//! use [`std::thread::yield_now`] to avoid busy waiting in tight loops. However, with this +//! feature disabled, [`core::hint::spin_loop`] will be used instead. Disabling `std` will allow +//! this crate to be used on `no_std` platforms at the potential expense of more busy waiting. +//! +//! There is also a `portable-atomic` feature, which uses a polyfill from the +//! [`portable-atomic`] crate to provide atomic operations on platforms that do not support them. +//! See the [`README`] for the [`portable-atomic`] crate for more information on how to use it. +//! Note that even with this feature enabled, `concurrent-queue` still requires a global allocator +//! to be available. See the documentation for the [`std::alloc::GlobalAlloc`] trait for more +//! information. +//! +//! [Bounded]: `ConcurrentQueue::bounded()` +//! [Unbounded]: `ConcurrentQueue::unbounded()` +//! [closed]: `ConcurrentQueue::close()` +//! [`portable-atomic`]: https://crates.io/crates/portable-atomic +//! [`README`]: https://github.com/taiki-e/portable-atomic/blob/main/README.md#optional-cfg + +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![no_std] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +use core::fmt; +use core::panic::{RefUnwindSafe, UnwindSafe}; +use sync::atomic::{self, Ordering}; + +#[cfg(feature = "std")] +use std::error; + +use crate::bounded::Bounded; +use crate::single::Single; +use crate::sync::busy_wait; +use crate::unbounded::Unbounded; + +mod bounded; +mod single; +mod unbounded; + +mod sync; + +/// Make the given function const if the given condition is true. +macro_rules! const_fn { + ( + const_if: #[cfg($($cfg:tt)+)]; + $(#[$($attr:tt)*])* + $vis:vis const fn $($rest:tt)* + ) => { + #[cfg($($cfg)+)] + $(#[$($attr)*])* + $vis const fn $($rest)* + #[cfg(not($($cfg)+))] + $(#[$($attr)*])* + $vis fn $($rest)* + }; +} + +pub(crate) use const_fn; + +/// A concurrent queue. +/// +/// # Examples +/// +/// ``` +/// use concurrent_queue::{ConcurrentQueue, PopError, PushError}; +/// +/// let q = ConcurrentQueue::bounded(2); +/// +/// assert_eq!(q.push('a'), Ok(())); +/// assert_eq!(q.push('b'), Ok(())); +/// assert_eq!(q.push('c'), Err(PushError::Full('c'))); +/// +/// assert_eq!(q.pop(), Ok('a')); +/// assert_eq!(q.pop(), Ok('b')); +/// assert_eq!(q.pop(), Err(PopError::Empty)); +/// ``` +pub struct ConcurrentQueue(Inner); + +unsafe impl Send for ConcurrentQueue {} +unsafe impl Sync for ConcurrentQueue {} + +impl UnwindSafe for ConcurrentQueue {} +impl RefUnwindSafe for ConcurrentQueue {} + +#[allow(clippy::large_enum_variant)] +enum Inner { + Single(Single), + Bounded(Bounded), + Unbounded(Unbounded), +} + +impl ConcurrentQueue { + /// Creates a new bounded queue. + /// + /// The queue allocates enough space for `cap` items. + /// + /// # Panics + /// + /// If the capacity is zero, this constructor will panic. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::bounded(100); + /// ``` + pub fn bounded(cap: usize) -> ConcurrentQueue { + if cap == 1 { + ConcurrentQueue(Inner::Single(Single::new())) + } else { + ConcurrentQueue(Inner::Bounded(Bounded::new(cap))) + } + } + + const_fn!( + const_if: #[cfg(not(loom))]; + /// Creates a new unbounded queue. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// ``` + pub const fn unbounded() -> ConcurrentQueue { + ConcurrentQueue(Inner::Unbounded(Unbounded::new())) + } + ); + + /// Attempts to push an item into the queue. + /// + /// If the queue is full or closed, the item is returned back as an error. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PushError}; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// // Push succeeds because there is space in the queue. + /// assert_eq!(q.push(10), Ok(())); + /// + /// // Push errors because the queue is now full. + /// assert_eq!(q.push(20), Err(PushError::Full(20))); + /// + /// // Close the queue, which will prevent further pushes. + /// q.close(); + /// + /// // Pushing now errors indicating the queue is closed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// + /// // Pop the single item in the queue. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // Even though there is space, no more items can be pushed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// ``` + pub fn push(&self, value: T) -> Result<(), PushError> { + match &self.0 { + Inner::Single(q) => q.push(value), + Inner::Bounded(q) => q.push(value), + Inner::Unbounded(q) => q.push(value), + } + } + + /// Push an element into the queue, potentially displacing another element. + /// + /// Attempts to push an element into the queue. If the queue is full, one item from the + /// queue is replaced with the provided item. The displaced item is returned as `Some(T)`. + /// If the queue is closed, an error is returned. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, ForcePushError, PushError}; + /// + /// let q = ConcurrentQueue::bounded(3); + /// + /// // We can push to the queue. + /// for i in 1..=3 { + /// assert_eq!(q.force_push(i), Ok(None)); + /// } + /// + /// // Push errors because the queue is now full. + /// assert_eq!(q.push(4), Err(PushError::Full(4))); + /// + /// // Pushing a new value replaces the old ones. + /// assert_eq!(q.force_push(5), Ok(Some(1))); + /// assert_eq!(q.force_push(6), Ok(Some(2))); + /// + /// // Close the queue to stop further pushes. + /// q.close(); + /// + /// // Pushing will return an error. + /// assert_eq!(q.force_push(7), Err(ForcePushError(7))); + /// + /// // Popping items will return the force-pushed ones. + /// assert_eq!(q.pop(), Ok(3)); + /// assert_eq!(q.pop(), Ok(5)); + /// assert_eq!(q.pop(), Ok(6)); + /// ``` + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + match &self.0 { + Inner::Single(q) => q.force_push(value), + Inner::Bounded(q) => q.force_push(value), + Inner::Unbounded(q) => match q.push(value) { + Ok(()) => Ok(None), + Err(PushError::Closed(value)) => Err(ForcePushError(value)), + Err(PushError::Full(_)) => unreachable!(), + }, + } + } + + /// Attempts to pop an item from the queue. + /// + /// If the queue is empty, an error is returned. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PopError}; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// // Pop errors when the queue is empty. + /// assert_eq!(q.pop(), Err(PopError::Empty)); + /// + /// // Push one item and close the queue. + /// assert_eq!(q.push(10), Ok(())); + /// q.close(); + /// + /// // Remaining items can be popped. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // Again, pop errors when the queue is empty, + /// // but now also indicates that the queue is closed. + /// assert_eq!(q.pop(), Err(PopError::Closed)); + /// ``` + pub fn pop(&self) -> Result { + match &self.0 { + Inner::Single(q) => q.pop(), + Inner::Bounded(q) => q.pop(), + Inner::Unbounded(q) => q.pop(), + } + } + + /// Get an iterator over the items in the queue. + /// + /// The iterator will continue until the queue is empty or closed. It will never block; + /// if the queue is empty, the iterator will return `None`. If new items are pushed into + /// the queue, the iterator may return `Some` in the future after returning `None`. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::bounded(5); + /// q.push(1).unwrap(); + /// q.push(2).unwrap(); + /// q.push(3).unwrap(); + /// + /// let mut iter = q.try_iter(); + /// assert_eq!(iter.by_ref().sum::(), 6); + /// assert_eq!(iter.next(), None); + /// + /// // Pushing more items will make them available to the iterator. + /// q.push(4).unwrap(); + /// assert_eq!(iter.next(), Some(4)); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn try_iter(&self) -> TryIter<'_, T> { + TryIter { queue: self } + } + + /// Returns `true` if the queue is empty. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// + /// assert!(q.is_empty()); + /// q.push(1).unwrap(); + /// assert!(!q.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_empty(), + Inner::Bounded(q) => q.is_empty(), + Inner::Unbounded(q) => q.is_empty(), + } + } + + /// Returns `true` if the queue is full. + /// + /// An unbounded queue is never full. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// assert!(!q.is_full()); + /// q.push(1).unwrap(); + /// assert!(q.is_full()); + /// ``` + pub fn is_full(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_full(), + Inner::Bounded(q) => q.is_full(), + Inner::Unbounded(q) => q.is_full(), + } + } + + /// Returns the number of items in the queue. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::unbounded(); + /// assert_eq!(q.len(), 0); + /// + /// assert_eq!(q.push(10), Ok(())); + /// assert_eq!(q.len(), 1); + /// + /// assert_eq!(q.push(20), Ok(())); + /// assert_eq!(q.len(), 2); + /// ``` + pub fn len(&self) -> usize { + match &self.0 { + Inner::Single(q) => q.len(), + Inner::Bounded(q) => q.len(), + Inner::Unbounded(q) => q.len(), + } + } + + /// Returns the capacity of the queue. + /// + /// Unbounded queues have infinite capacity, represented as [`None`]. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::bounded(7); + /// assert_eq!(q.capacity(), Some(7)); + /// + /// let q = ConcurrentQueue::::unbounded(); + /// assert_eq!(q.capacity(), None); + /// ``` + pub fn capacity(&self) -> Option { + match &self.0 { + Inner::Single(_) => Some(1), + Inner::Bounded(q) => Some(q.capacity()), + Inner::Unbounded(_) => None, + } + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue, or `false` if it was already closed. + /// + /// When a queue is closed, no more items can be pushed but the remaining items can still be + /// popped. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PopError, PushError}; + /// + /// let q = ConcurrentQueue::unbounded(); + /// assert_eq!(q.push(10), Ok(())); + /// + /// assert!(q.close()); // `true` because this call closes the queue. + /// assert!(!q.close()); // `false` because the queue is already closed. + /// + /// // Cannot push any more items when closed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// + /// // Remaining items can still be popped. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // When no more items are present, the error is `Closed`. + /// assert_eq!(q.pop(), Err(PopError::Closed)); + /// ``` + pub fn close(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.close(), + Inner::Bounded(q) => q.close(), + Inner::Unbounded(q) => q.close(), + } + } + + /// Returns `true` if the queue is closed. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// + /// assert!(!q.is_closed()); + /// q.close(); + /// assert!(q.is_closed()); + /// ``` + pub fn is_closed(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_closed(), + Inner::Bounded(q) => q.is_closed(), + Inner::Unbounded(q) => q.is_closed(), + } + } +} + +impl fmt::Debug for ConcurrentQueue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ConcurrentQueue") + .field("len", &self.len()) + .field("capacity", &self.capacity()) + .field("is_closed", &self.is_closed()) + .finish() + } +} + +/// An iterator that pops items from a [`ConcurrentQueue`]. +/// +/// This iterator will never block; it will return `None` once the queue has +/// been exhausted. Calling `next` after `None` may yield `Some(item)` if more items +/// are pushed to the queue. +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone)] +pub struct TryIter<'a, T> { + queue: &'a ConcurrentQueue, +} + +impl fmt::Debug for TryIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Iter").field(&self.queue).finish() + } +} + +impl Iterator for TryIter<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.queue.pop().ok() + } +} + +/// Error which occurs when popping from an empty queue. +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum PopError { + /// The queue is empty but not closed. + Empty, + + /// The queue is empty and closed. + Closed, +} + +impl PopError { + /// Returns `true` if the queue is empty but not closed. + pub fn is_empty(&self) -> bool { + match self { + PopError::Empty => true, + PopError::Closed => false, + } + } + + /// Returns `true` if the queue is empty and closed. + pub fn is_closed(&self) -> bool { + match self { + PopError::Empty => false, + PopError::Closed => true, + } + } +} + +#[cfg(feature = "std")] +impl error::Error for PopError {} + +impl fmt::Debug for PopError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PopError::Empty => write!(f, "Empty"), + PopError::Closed => write!(f, "Closed"), + } + } +} + +impl fmt::Display for PopError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PopError::Empty => write!(f, "Empty"), + PopError::Closed => write!(f, "Closed"), + } + } +} + +/// Error which occurs when pushing into a full or closed queue. +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum PushError { + /// The queue is full but not closed. + Full(T), + + /// The queue is closed. + Closed(T), +} + +impl PushError { + /// Unwraps the item that couldn't be pushed. + pub fn into_inner(self) -> T { + match self { + PushError::Full(t) => t, + PushError::Closed(t) => t, + } + } + + /// Returns `true` if the queue is full but not closed. + pub fn is_full(&self) -> bool { + match self { + PushError::Full(_) => true, + PushError::Closed(_) => false, + } + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + match self { + PushError::Full(_) => false, + PushError::Closed(_) => true, + } + } +} + +#[cfg(feature = "std")] +impl error::Error for PushError {} + +impl fmt::Debug for PushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PushError::Full(t) => f.debug_tuple("Full").field(t).finish(), + PushError::Closed(t) => f.debug_tuple("Closed").field(t).finish(), + } + } +} + +impl fmt::Display for PushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PushError::Full(_) => write!(f, "Full"), + PushError::Closed(_) => write!(f, "Closed"), + } + } +} + +/// Error that occurs when force-pushing into a full queue. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct ForcePushError(pub T); + +impl ForcePushError { + /// Return the inner value that failed to be force-pushed. + pub fn into_inner(self) -> T { + self.0 + } +} + +impl fmt::Debug for ForcePushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("ForcePushError").field(&self.0).finish() + } +} + +impl fmt::Display for ForcePushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Closed") + } +} + +#[cfg(feature = "std")] +impl error::Error for ForcePushError {} + +/// Equivalent to `atomic::fence(Ordering::SeqCst)`, but in some cases faster. +#[inline] +fn full_fence() { + #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri), not(loom)))] + { + use core::{arch::asm, cell::UnsafeCell}; + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. A `lock ` instruction. + // + // Both instructions have the effect of a full barrier, but empirical benchmarks have shown + // that the second one is sometimes a bit faster. + let a = UnsafeCell::new(0_usize); + // It is common to use `lock or` here, but when using a local variable, `lock not`, which + // does not change the flag, should be slightly more efficient. + // Refs: https://www.felixcloutier.com/x86/not + unsafe { + #[cfg(target_pointer_width = "64")] + asm!("lock not qword ptr [{0}]", in(reg) a.get(), options(nostack, preserves_flags)); + #[cfg(target_pointer_width = "32")] + asm!("lock not dword ptr [{0:e}]", in(reg) a.get(), options(nostack, preserves_flags)); + } + return; + } + #[allow(unreachable_code)] + { + atomic::fence(Ordering::SeqCst); + } +} diff --git a/external/vendor/concurrent-queue/src/single.rs b/external/vendor/concurrent-queue/src/single.rs new file mode 100644 index 0000000000..f88c4783a0 --- /dev/null +++ b/external/vendor/concurrent-queue/src/single.rs @@ -0,0 +1,187 @@ +use core::mem::MaybeUninit; +use core::ptr; + +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, ForcePushError, PopError, PushError}; + +const LOCKED: usize = 1 << 0; +const PUSHED: usize = 1 << 1; +const CLOSED: usize = 1 << 2; + +/// A single-element queue. +pub struct Single { + state: AtomicUsize, + slot: UnsafeCell>, +} + +impl Single { + /// Creates a new single-element queue. + pub fn new() -> Single { + Single { + state: AtomicUsize::new(0), + slot: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Attempts to push an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + // Lock and fill the slot. + let state = self + .state + .compare_exchange(0, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst) + .unwrap_or_else(|x| x); + + if state == 0 { + // Write the value and unlock. + self.slot.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + self.state.fetch_and(!LOCKED, Ordering::Release); + Ok(()) + } else if state & CLOSED != 0 { + Err(PushError::Closed(value)) + } else { + Err(PushError::Full(value)) + } + } + + /// Attempts to push an item into the queue, displacing another if necessary. + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + // Attempt to lock the slot. + let mut state = 0; + + loop { + // Lock the slot. + let prev = self + .state + .compare_exchange(state, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst) + .unwrap_or_else(|x| x); + + if prev & CLOSED != 0 { + return Err(ForcePushError(value)); + } + + if prev == state { + // If the value was pushed, swap out the value. + let prev_value = if prev & PUSHED == 0 { + // SAFETY: write is safe because we have locked the state. + self.slot.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + None + } else { + // SAFETY: replace is safe because we have locked the state, and + // assume_init is safe because we have checked that the value was pushed. + let prev_value = unsafe { + self.slot.with_mut(move |slot| { + ptr::replace(slot, MaybeUninit::new(value)).assume_init() + }) + }; + Some(prev_value) + }; + + // We can unlock the slot now. + self.state.fetch_and(!LOCKED, Ordering::Release); + + // Return the old value. + return Ok(prev_value); + } + + // Try to go for the current (pushed) state. + if prev & LOCKED == 0 { + state = prev; + } else { + // State is locked. + busy_wait(); + state = prev & !LOCKED; + } + } + } + + /// Attempts to pop an item from the queue. + pub fn pop(&self) -> Result { + let mut state = PUSHED; + loop { + // Lock and empty the slot. + let prev = self + .state + .compare_exchange( + state, + (state | LOCKED) & !PUSHED, + Ordering::SeqCst, + Ordering::SeqCst, + ) + .unwrap_or_else(|x| x); + + if prev == state { + // Read the value and unlock. + let value = self + .slot + .with_mut(|slot| unsafe { slot.read().assume_init() }); + self.state.fetch_and(!LOCKED, Ordering::Release); + return Ok(value); + } + + if prev & PUSHED == 0 { + if prev & CLOSED == 0 { + return Err(PopError::Empty); + } else { + return Err(PopError::Closed); + } + } + + if prev & LOCKED == 0 { + state = prev; + } else { + busy_wait(); + state = prev & !LOCKED; + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + usize::from(self.state.load(Ordering::SeqCst) & PUSHED != 0) + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + self.len() == 1 + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let state = self.state.fetch_or(CLOSED, Ordering::SeqCst); + state & CLOSED == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.state.load(Ordering::SeqCst) & CLOSED != 0 + } +} + +impl Drop for Single { + fn drop(&mut self) { + // Drop the value in the slot. + let Self { state, slot } = self; + state.with_mut(|state| { + if *state & PUSHED != 0 { + slot.with_mut(|slot| unsafe { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + } + }); + } +} diff --git a/external/vendor/concurrent-queue/src/sync.rs b/external/vendor/concurrent-queue/src/sync.rs new file mode 100644 index 0000000000..d1b0a89a1b --- /dev/null +++ b/external/vendor/concurrent-queue/src/sync.rs @@ -0,0 +1,114 @@ +//! Synchronization facade to choose between `core` primitives and `loom` primitives. + +#[cfg(all(feature = "portable-atomic", not(loom)))] +mod sync_impl { + pub(crate) use core::cell; + pub(crate) use portable_atomic as atomic; + + #[cfg(not(feature = "std"))] + pub(crate) use atomic::hint::spin_loop; + + #[cfg(feature = "std")] + pub(crate) use std::thread::yield_now; +} + +#[cfg(all(not(feature = "portable-atomic"), not(loom)))] +mod sync_impl { + pub(crate) use core::cell; + pub(crate) use core::sync::atomic; + + #[cfg(not(feature = "std"))] + #[inline] + pub(crate) fn spin_loop() { + #[allow(deprecated)] + atomic::spin_loop_hint(); + } + + #[cfg(feature = "std")] + pub(crate) use std::thread::yield_now; +} + +#[cfg(loom)] +mod sync_impl { + pub(crate) use loom::cell; + + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::*; + } + + #[cfg(not(feature = "std"))] + pub(crate) use loom::hint::spin_loop; + #[cfg(feature = "std")] + pub(crate) use loom::thread::yield_now; +} + +pub(crate) use sync_impl::*; + +/// Notify the CPU that we are currently busy-waiting. +#[inline] +pub(crate) fn busy_wait() { + #[cfg(feature = "std")] + yield_now(); + + #[cfg(not(feature = "std"))] + spin_loop(); +} + +#[cfg(loom)] +pub(crate) mod prelude {} + +#[cfg(not(loom))] +pub(crate) mod prelude { + use super::{atomic, cell}; + + /// Emulate `loom::UnsafeCell`'s API. + pub(crate) trait UnsafeCellExt { + type Value; + + fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut Self::Value) -> R; + } + + impl UnsafeCellExt for cell::UnsafeCell { + type Value = T; + + fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut Self::Value) -> R, + { + f(self.get()) + } + } + + /// Emulate `loom::Atomic*`'s API. + pub(crate) trait AtomicExt { + type Value; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R; + } + + impl AtomicExt for atomic::AtomicUsize { + type Value = usize; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R, + { + f(self.get_mut()) + } + } + + impl AtomicExt for atomic::AtomicPtr { + type Value = *mut T; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R, + { + f(self.get_mut()) + } + } +} diff --git a/external/vendor/concurrent-queue/src/unbounded.rs b/external/vendor/concurrent-queue/src/unbounded.rs new file mode 100644 index 0000000000..8e1c40d192 --- /dev/null +++ b/external/vendor/concurrent-queue/src/unbounded.rs @@ -0,0 +1,452 @@ +use alloc::boxed::Box; +use core::mem::MaybeUninit; +use core::ptr; + +use crossbeam_utils::CachePadded; + +use crate::const_fn; +use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, PopError, PushError}; + +// Bits indicating the state of a slot: +// * If a value has been written into the slot, `WRITE` is set. +// * If a value has been read from the slot, `READ` is set. +// * If the block is being destroyed, `DESTROY` is set. +const WRITE: usize = 1; +const READ: usize = 2; +const DESTROY: usize = 4; + +// Each block covers one "lap" of indices. +const LAP: usize = 32; +// The maximum number of items a block can hold. +const BLOCK_CAP: usize = LAP - 1; +// How many lower bits are reserved for metadata. +const SHIFT: usize = 1; +// Has two different purposes: +// * If set in head, indicates that the block is not the last one. +// * If set in tail, indicates that the queue is closed. +const MARK_BIT: usize = 1; + +/// A slot in a block. +struct Slot { + /// The value. + value: UnsafeCell>, + + /// The state of the slot. + state: AtomicUsize, +} + +impl Slot { + #[cfg(not(loom))] + const UNINIT: Slot = Slot { + value: UnsafeCell::new(MaybeUninit::uninit()), + state: AtomicUsize::new(0), + }; + + #[cfg(not(loom))] + fn uninit_block() -> [Slot; BLOCK_CAP] { + [Self::UNINIT; BLOCK_CAP] + } + + #[cfg(loom)] + fn uninit_block() -> [Slot; BLOCK_CAP] { + // Repeat this expression 31 times. + // Update if we change BLOCK_CAP + macro_rules! repeat_31 { + ($e: expr) => { + [ + $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, + $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, + ] + }; + } + + repeat_31!(Slot { + value: UnsafeCell::new(MaybeUninit::uninit()), + state: AtomicUsize::new(0), + }) + } + + /// Waits until a value is written into the slot. + fn wait_write(&self) { + while self.state.load(Ordering::Acquire) & WRITE == 0 { + busy_wait(); + } + } +} + +/// A block in a linked list. +/// +/// Each block in the list can hold up to `BLOCK_CAP` values. +struct Block { + /// The next block in the linked list. + next: AtomicPtr>, + + /// Slots for values. + slots: [Slot; BLOCK_CAP], +} + +impl Block { + /// Creates an empty block. + fn new() -> Block { + Block { + next: AtomicPtr::new(ptr::null_mut()), + slots: Slot::uninit_block(), + } + } + + /// Waits until the next pointer is set. + fn wait_next(&self) -> *mut Block { + loop { + let next = self.next.load(Ordering::Acquire); + if !next.is_null() { + return next; + } + busy_wait(); + } + } + + /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. + unsafe fn destroy(this: *mut Block, start: usize) { + // It is not necessary to set the `DESTROY` bit in the last slot because that slot has + // begun destruction of the block. + for i in start..BLOCK_CAP - 1 { + let slot = (*this).slots.get_unchecked(i); + + // Mark the `DESTROY` bit if a thread is still using the slot. + if slot.state.load(Ordering::Acquire) & READ == 0 + && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 + { + // If a thread is still using the slot, it will continue destruction of the block. + return; + } + } + + // No thread is using the block, now it is safe to destroy it. + drop(Box::from_raw(this)); + } +} + +/// A position in a queue. +struct Position { + /// The index in the queue. + index: AtomicUsize, + + /// The block in the linked list. + block: AtomicPtr>, +} + +/// An unbounded queue. +pub struct Unbounded { + /// The head of the queue. + head: CachePadded>, + + /// The tail of the queue. + tail: CachePadded>, +} + +impl Unbounded { + const_fn!( + const_if: #[cfg(not(loom))]; + /// Creates a new unbounded queue. + pub const fn new() -> Unbounded { + Unbounded { + head: CachePadded::new(Position { + block: AtomicPtr::new(ptr::null_mut()), + index: AtomicUsize::new(0), + }), + tail: CachePadded::new(Position { + block: AtomicPtr::new(ptr::null_mut()), + index: AtomicUsize::new(0), + }), + } + } + ); + + /// Pushes an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + let mut tail = self.tail.index.load(Ordering::Acquire); + let mut block = self.tail.block.load(Ordering::Acquire); + let mut next_block = None; + + loop { + // Check if the queue is closed. + if tail & MARK_BIT != 0 { + return Err(PushError::Closed(value)); + } + + // Calculate the offset of the index into the block. + let offset = (tail >> SHIFT) % LAP; + + // If we reached the end of the block, wait until the next one is installed. + if offset == BLOCK_CAP { + busy_wait(); + tail = self.tail.index.load(Ordering::Acquire); + block = self.tail.block.load(Ordering::Acquire); + continue; + } + + // If we're going to have to install the next block, allocate it in advance in order to + // make the wait for other threads as short as possible. + if offset + 1 == BLOCK_CAP && next_block.is_none() { + next_block = Some(Box::new(Block::::new())); + } + + // If this is the first value to be pushed into the queue, we need to allocate the + // first block and install it. + if block.is_null() { + let new = Box::into_raw(Box::new(Block::::new())); + + if self + .tail + .block + .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed) + .is_ok() + { + self.head.block.store(new, Ordering::Release); + block = new; + } else { + next_block = unsafe { Some(Box::from_raw(new)) }; + tail = self.tail.index.load(Ordering::Acquire); + block = self.tail.block.load(Ordering::Acquire); + continue; + } + } + + let new_tail = tail + (1 << SHIFT); + + // Try advancing the tail forward. + match self.tail.index.compare_exchange_weak( + tail, + new_tail, + Ordering::SeqCst, + Ordering::Acquire, + ) { + Ok(_) => unsafe { + // If we've reached the end of the block, install the next one. + if offset + 1 == BLOCK_CAP { + let next_block = Box::into_raw(next_block.unwrap()); + self.tail.block.store(next_block, Ordering::Release); + self.tail.index.fetch_add(1 << SHIFT, Ordering::Release); + (*block).next.store(next_block, Ordering::Release); + } + + // Write the value into the slot. + let slot = (*block).slots.get_unchecked(offset); + slot.value.with_mut(|slot| { + slot.write(MaybeUninit::new(value)); + }); + slot.state.fetch_or(WRITE, Ordering::Release); + return Ok(()); + }, + Err(t) => { + tail = t; + block = self.tail.block.load(Ordering::Acquire); + } + } + } + } + + /// Pops an item from the queue. + pub fn pop(&self) -> Result { + let mut head = self.head.index.load(Ordering::Acquire); + let mut block = self.head.block.load(Ordering::Acquire); + + loop { + // Calculate the offset of the index into the block. + let offset = (head >> SHIFT) % LAP; + + // If we reached the end of the block, wait until the next one is installed. + if offset == BLOCK_CAP { + busy_wait(); + head = self.head.index.load(Ordering::Acquire); + block = self.head.block.load(Ordering::Acquire); + continue; + } + + let mut new_head = head + (1 << SHIFT); + + if new_head & MARK_BIT == 0 { + crate::full_fence(); + let tail = self.tail.index.load(Ordering::Relaxed); + + // If the tail equals the head, that means the queue is empty. + if head >> SHIFT == tail >> SHIFT { + // Check if the queue is closed. + if tail & MARK_BIT != 0 { + return Err(PopError::Closed); + } else { + return Err(PopError::Empty); + } + } + + // If head and tail are not in the same block, set `MARK_BIT` in head. + if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { + new_head |= MARK_BIT; + } + } + + // The block can be null here only if the first push operation is in progress. + if block.is_null() { + busy_wait(); + head = self.head.index.load(Ordering::Acquire); + block = self.head.block.load(Ordering::Acquire); + continue; + } + + // Try moving the head index forward. + match self.head.index.compare_exchange_weak( + head, + new_head, + Ordering::SeqCst, + Ordering::Acquire, + ) { + Ok(_) => unsafe { + // If we've reached the end of the block, move to the next one. + if offset + 1 == BLOCK_CAP { + let next = (*block).wait_next(); + let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT); + if !(*next).next.load(Ordering::Relaxed).is_null() { + next_index |= MARK_BIT; + } + + self.head.block.store(next, Ordering::Release); + self.head.index.store(next_index, Ordering::Release); + } + + // Read the value. + let slot = (*block).slots.get_unchecked(offset); + slot.wait_write(); + let value = slot.value.with_mut(|slot| slot.read().assume_init()); + + // Destroy the block if we've reached the end, or if another thread wanted to + // destroy but couldn't because we were busy reading from the slot. + if offset + 1 == BLOCK_CAP { + Block::destroy(block, 0); + } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { + Block::destroy(block, offset + 1); + } + + return Ok(value); + }, + Err(h) => { + head = h; + block = self.head.block.load(Ordering::Acquire); + } + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + loop { + // Load the tail index, then load the head index. + let mut tail = self.tail.index.load(Ordering::SeqCst); + let mut head = self.head.index.load(Ordering::SeqCst); + + // If the tail index didn't change, we've got consistent indices to work with. + if self.tail.index.load(Ordering::SeqCst) == tail { + // Erase the lower bits. + tail &= !((1 << SHIFT) - 1); + head &= !((1 << SHIFT) - 1); + + // Fix up indices if they fall onto block ends. + if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { + tail = tail.wrapping_add(1 << SHIFT); + } + if (head >> SHIFT) & (LAP - 1) == LAP - 1 { + head = head.wrapping_add(1 << SHIFT); + } + + // Rotate indices so that head falls into the first block. + let lap = (head >> SHIFT) / LAP; + tail = tail.wrapping_sub((lap * LAP) << SHIFT); + head = head.wrapping_sub((lap * LAP) << SHIFT); + + // Remove the lower bits. + tail >>= SHIFT; + head >>= SHIFT; + + // Return the difference minus the number of blocks between tail and head. + return tail - head - tail / LAP; + } + } + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + let head = self.head.index.load(Ordering::SeqCst); + let tail = self.tail.index.load(Ordering::SeqCst); + head >> SHIFT == tail >> SHIFT + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + false + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst); + tail & MARK_BIT == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0 + } +} + +impl Drop for Unbounded { + fn drop(&mut self) { + let Self { head, tail } = self; + let Position { index: head, block } = &mut **head; + + head.with_mut(|&mut mut head| { + tail.index.with_mut(|&mut mut tail| { + // Erase the lower bits. + head &= !((1 << SHIFT) - 1); + tail &= !((1 << SHIFT) - 1); + + unsafe { + // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. + while head != tail { + let offset = (head >> SHIFT) % LAP; + + if offset < BLOCK_CAP { + // Drop the value in the slot. + block.with_mut(|block| { + let slot = (**block).slots.get_unchecked(offset); + slot.value.with_mut(|slot| { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + }); + } else { + // Deallocate the block and move to the next one. + block.with_mut(|block| { + let next_block = (**block).next.with_mut(|next| *next); + drop(Box::from_raw(*block)); + *block = next_block; + }); + } + + head = head.wrapping_add(1 << SHIFT); + } + + // Deallocate the last remaining block. + block.with_mut(|block| { + if !block.is_null() { + drop(Box::from_raw(*block)); + } + }); + } + }); + }); + } +} diff --git a/external/vendor/concurrent-queue/tests/bounded.rs b/external/vendor/concurrent-queue/tests/bounded.rs new file mode 100644 index 0000000000..6f402b7f8b --- /dev/null +++ b/external/vendor/concurrent-queue/tests/bounded.rs @@ -0,0 +1,371 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::bounded(2); + + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn capacity() { + for i in 1..10 { + let q = ConcurrentQueue::::bounded(i); + assert_eq!(q.capacity(), Some(i)); + } +} + +#[test] +#[should_panic(expected = "capacity must be positive")] +fn zero_capacity() { + let _ = ConcurrentQueue::::bounded(0); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::bounded(2); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 2); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), true); + + q.pop().unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), false); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn len() { + const COUNT: usize = if cfg!(miri) { 50 } else { 25_000 }; + const CAP: usize = if cfg!(miri) { 50 } else { 1000 }; + + let q = ConcurrentQueue::bounded(CAP); + assert_eq!(q.len(), 0); + + for _ in 0..CAP / 10 { + for i in 0..50 { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for i in 0..50 { + q.pop().unwrap(); + assert_eq!(q.len(), 50 - i - 1); + } + } + assert_eq!(q.len(), 0); + + for i in 0..CAP { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for _ in 0..CAP { + q.pop().unwrap(); + } + assert_eq!(q.len(), 0); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + let len = q.len(); + assert!(len <= CAP); + } + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + let len = q.len(); + assert!(len <= CAP); + } + }) + .run(); + + assert_eq!(q.len(), 0); +} + +#[test] +fn close() { + let q = ConcurrentQueue::bounded(2); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[test] +fn force_push() { + let q = ConcurrentQueue::::bounded(5); + + for i in 1..=5 { + assert_eq!(q.force_push(i), Ok(None)); + } + + assert!(!q.is_closed()); + for i in 6..=10 { + assert_eq!(q.force_push(i), Ok(Some(i - 5))); + } + assert_eq!(q.pop(), Ok(6)); + assert_eq!(q.force_push(11), Ok(None)); + for i in 12..=15 { + assert_eq!(q.force_push(i), Ok(Some(i - 5))); + } + + assert!(q.close()); + assert_eq!(q.force_push(40), Err(ForcePushError(40))); + for i in 11..=15 { + assert_eq!(q.pop(), Ok(i)); + } + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::bounded(3); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 10 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(..STEPS); + let additional = fastrand::usize(..50); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::bounded(50); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + while q.push(DropCounter).is_err() { + DROPS.fetch_sub(1, Ordering::SeqCst); + } + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn linearizable() { + const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::bounded(THREADS); + + Parallel::new() + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + while q.push(0).is_err() {} + q.pop().unwrap(); + } + }) + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + if q.force_push(0).unwrap().is_none() { + q.pop().unwrap(); + } + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 }; + + let t = AtomicUsize::new(1); + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .add(|| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .add(|| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), 1); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let t = AtomicUsize::new(THREADS); + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} diff --git a/external/vendor/concurrent-queue/tests/loom.rs b/external/vendor/concurrent-queue/tests/loom.rs new file mode 100644 index 0000000000..77f99d4945 --- /dev/null +++ b/external/vendor/concurrent-queue/tests/loom.rs @@ -0,0 +1,307 @@ +#![cfg(loom)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; +use loom::sync::atomic::{AtomicUsize, Ordering}; +use loom::sync::{Arc, Condvar, Mutex}; +use loom::thread; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +/// A basic MPMC channel based on a ConcurrentQueue and loom primitives. +struct Channel { + /// The queue used to contain items. + queue: ConcurrentQueue, + + /// The number of senders. + senders: AtomicUsize, + + /// The number of receivers. + receivers: AtomicUsize, + + /// The event that is signaled when a new item is pushed. + push_event: Event, + + /// The event that is signaled when a new item is popped. + pop_event: Event, +} + +/// The sending side of a channel. +struct Sender { + /// The channel. + channel: Arc>, +} + +/// The receiving side of a channel. +struct Receiver { + /// The channel. + channel: Arc>, +} + +/// Create a new pair of senders/receivers based on a queue. +fn pair(queue: ConcurrentQueue) -> (Sender, Receiver) { + let channel = Arc::new(Channel { + queue, + senders: AtomicUsize::new(1), + receivers: AtomicUsize::new(1), + push_event: Event::new(), + pop_event: Event::new(), + }); + + ( + Sender { + channel: channel.clone(), + }, + Receiver { channel }, + ) +} + +impl Clone for Sender { + fn clone(&self) -> Self { + self.channel.senders.fetch_add(1, Ordering::SeqCst); + Sender { + channel: self.channel.clone(), + } + } +} + +impl Drop for Sender { + fn drop(&mut self) { + if self.channel.senders.fetch_sub(1, Ordering::SeqCst) == 1 { + // Close the channel and notify the receivers. + self.channel.queue.close(); + self.channel.push_event.signal_all(); + } + } +} + +impl Clone for Receiver { + fn clone(&self) -> Self { + self.channel.receivers.fetch_add(1, Ordering::SeqCst); + Receiver { + channel: self.channel.clone(), + } + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + if self.channel.receivers.fetch_sub(1, Ordering::SeqCst) == 1 { + // Close the channel and notify the senders. + self.channel.queue.close(); + self.channel.pop_event.signal_all(); + } + } +} + +impl Sender { + /// Send a value. + /// + /// Returns an error with the value if the channel is closed. + fn send(&self, mut value: T) -> Result<(), T> { + loop { + match self.channel.queue.push(value) { + Ok(()) => { + // Notify a single receiver. + self.channel.push_event.signal(); + return Ok(()); + } + Err(PushError::Closed(val)) => return Err(val), + Err(PushError::Full(val)) => { + // Wait for a receiver to pop an item. + value = val; + self.channel.pop_event.wait(); + } + } + } + } + + /// Send a value forcefully. + fn force_send(&self, value: T) -> Result, T> { + match self.channel.queue.force_push(value) { + Ok(bumped) => { + self.channel.push_event.signal(); + Ok(bumped) + } + + Err(ForcePushError(val)) => Err(val), + } + } +} + +impl Receiver { + /// Channel capacity. + fn capacity(&self) -> Option { + self.channel.queue.capacity() + } + + /// Receive a value. + /// + /// Returns an error if the channel is closed. + fn recv(&self) -> Result { + loop { + match self.channel.queue.pop() { + Ok(value) => { + // Notify a single sender. + self.channel.pop_event.signal(); + return Ok(value); + } + Err(PopError::Closed) => return Err(()), + Err(PopError::Empty) => { + // Wait for a sender to push an item. + self.channel.push_event.wait(); + } + } + } + } +} + +/// An event that can be waited on and then signaled. +struct Event { + /// The condition variable used to wait on the event. + condvar: Condvar, + + /// The mutex used to protect the event. + /// + /// Inside is the event's state. The first bit is used to indicate if the + /// notify_one method was called. The second bit is used to indicate if the + /// notify_all method was called. + mutex: Mutex, +} + +impl Event { + /// Create a new event. + fn new() -> Self { + Self { + condvar: Condvar::new(), + mutex: Mutex::new(0), + } + } + + /// Wait for the event to be signaled. + fn wait(&self) { + let mut state = self.mutex.lock().unwrap(); + + loop { + if *state & 0b11 != 0 { + // The event was signaled. + *state &= !0b01; + return; + } + + // Wait for the event to be signaled. + state = self.condvar.wait(state).unwrap(); + } + } + + /// Signal the event. + fn signal(&self) { + let mut state = self.mutex.lock().unwrap(); + *state |= 1; + drop(state); + + self.condvar.notify_one(); + } + + /// Signal the event, but notify all waiters. + fn signal_all(&self) { + let mut state = self.mutex.lock().unwrap(); + *state |= 3; + drop(state); + + self.condvar.notify_all(); + } +} + +/// Wrapper to run tests on all three queues. +fn run_test, usize) + Send + Sync + Clone + 'static>(f: F) { + // The length of a loom test seems to increase exponentially the higher this number is. + const LIMIT: usize = 4; + + let fc = f.clone(); + loom::model(move || { + fc(ConcurrentQueue::bounded(1), LIMIT); + }); + + let fc = f.clone(); + loom::model(move || { + fc(ConcurrentQueue::bounded(LIMIT / 2), LIMIT); + }); + + loom::model(move || { + f(ConcurrentQueue::unbounded(), LIMIT); + }); +} + +#[test] +fn spsc() { + run_test(|q, limit| { + // Create a new pair of senders/receivers. + let (tx, rx) = pair(q); + + // Push each onto a thread and run them. + let handle = thread::spawn(move || { + for i in 0..limit { + if tx.send(i).is_err() { + break; + } + } + }); + + let mut recv_values = vec![]; + + loop { + match rx.recv() { + Ok(value) => recv_values.push(value), + Err(()) => break, + } + } + + // Values may not be in order. + recv_values.sort_unstable(); + assert_eq!(recv_values, (0..limit).collect::>()); + + // Join the handle before we exit. + handle.join().unwrap(); + }); +} + +#[test] +fn spsc_force() { + run_test(|q, limit| { + // Create a new pair of senders/receivers. + let (tx, rx) = pair(q); + + // Push each onto a thread and run them. + let handle = thread::spawn(move || { + for i in 0..limit { + if tx.force_send(i).is_err() { + break; + } + } + }); + + let mut recv_values = vec![]; + + loop { + match rx.recv() { + Ok(value) => recv_values.push(value), + Err(()) => break, + } + } + + // Values may not be in order. + recv_values.sort_unstable(); + let cap = rx.capacity().unwrap_or(usize::MAX); + for (left, right) in (0..limit) + .rev() + .take(cap) + .zip(recv_values.into_iter().rev()) + { + assert_eq!(left, right); + } + + // Join the handle before we exit. + handle.join().unwrap(); + }); +} diff --git a/external/vendor/concurrent-queue/tests/single.rs b/external/vendor/concurrent-queue/tests/single.rs new file mode 100644 index 0000000000..ec4b912c94 --- /dev/null +++ b/external/vendor/concurrent-queue/tests/single.rs @@ -0,0 +1,289 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::bounded(1); + + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn capacity() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.capacity(), Some(1)); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::bounded(1); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), true); + + q.pop().unwrap(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); +} + +#[test] +fn close() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[test] +fn force_push() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.force_push(10), Ok(None)); + + assert!(!q.is_closed()); + assert_eq!(q.force_push(20), Ok(Some(10))); + assert_eq!(q.force_push(30), Ok(Some(20))); + + assert!(q.close()); + assert_eq!(q.force_push(40), Err(ForcePushError(40))); + assert_eq!(q.pop(), Ok(30)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 1; + + let q = ConcurrentQueue::::bounded(THREADS); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 20 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(..STEPS); + let additional = fastrand::usize(0..=1); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + while q.push(DropCounter).is_err() { + DROPS.fetch_sub(1, Ordering::SeqCst); + } + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn linearizable() { + const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + while q.push(0).is_err() {} + q.pop().unwrap(); + } + }) + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + if q.force_push(0).unwrap().is_none() { + q.pop().unwrap(); + } + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 }; + + let t = AtomicUsize::new(1); + let q = ConcurrentQueue::::bounded(1); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .add(|| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .add(|| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), 1); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let t = AtomicUsize::new(THREADS); + let q = ConcurrentQueue::::bounded(1); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} diff --git a/external/vendor/concurrent-queue/tests/unbounded.rs b/external/vendor/concurrent-queue/tests/unbounded.rs new file mode 100644 index 0000000000..e95dc8c725 --- /dev/null +++ b/external/vendor/concurrent-queue/tests/unbounded.rs @@ -0,0 +1,181 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::unbounded(); + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::unbounded(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + + q.pop().unwrap(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); +} + +#[test] +fn len() { + let q = ConcurrentQueue::unbounded(); + + assert_eq!(q.len(), 0); + + for i in 0..50 { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for i in 0..50 { + q.pop().unwrap(); + assert_eq!(q.len(), 50 - i - 1); + } + + assert_eq!(q.len(), 0); +} + +#[test] +fn close() { + let q = ConcurrentQueue::unbounded(); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::unbounded(); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + q.push(i).unwrap(); + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::::unbounded(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + q.push(i).unwrap(); + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 20 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(0..STEPS); + let additional = fastrand::usize(0..1000); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::unbounded(); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + q.push(DropCounter).unwrap(); + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} diff --git a/external/vendor/crossbeam-utils/.cargo-checksum.json b/external/vendor/crossbeam-utils/.cargo-checksum.json new file mode 100644 index 0000000000..32c3420a24 --- /dev/null +++ b/external/vendor/crossbeam-utils/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"d43f49dbbe655cb91dcd62c862552f2ca4520eaff2e4bee391e01b0df968e358","CHANGELOG.md":"366caba01b88f421c71b97f61b9806abbf05e1ba0d24e4bf034191c1f8aa03b8","Cargo.toml":"961aa297754d8fdbae9e23d15bbbcfdcd2b50b2db56becddb84e8cba4f730713","Cargo.toml.orig":"6697cafee3a273f8c04e25c8606621b2fff8a779e3e1d01f9c08d225a0f36fc5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"3c82bbb994f54ab76a9ed30a42dfd095c6e636258d379b9be3fbf66324310e71","benches/atomic_cell.rs":"c927eb3cd1e5ecc4b91adbc3bde98af15ffab4086190792ba64d5cde0e24df3d","build-common.rs":"502cb7494549bed6fa10ac7bea36e880eeb60290dc69b679ac5c92b376469562","build.rs":"7a7f9e56ea7fb4f78c4e532b84b9d27be719d600e85eaeb3a2f4b79a4f0b419c","no_atomic.rs":"fc1baa4489d9842988bacaaa545a7d7d0e2f8b93cfa0b7d1ae31f21256e4cb0a","src/atomic/atomic_cell.rs":"6d8b83b65c73644abc10ec88a1442c8db531ae140de79197901510fcaea45966","src/atomic/consume.rs":"381c2a8b13312ca0525d53ca1b7d0d4f525ddb154951fa3e216b061ad22012ff","src/atomic/mod.rs":"712e2337e710c07116e977154ea4247a1c065bf5599e6bf368138e715b403f6d","src/atomic/seq_lock.rs":"27182e6b87a9db73c5f6831759f8625f9fcdec3c2828204c444aef04f427735a","src/atomic/seq_lock_wide.rs":"9888dd03116bb89ca36d4ab8d5a0b5032107a2983a7eb8024454263b09080088","src/backoff.rs":"8715f0303ec91d1847c8ac3fc24bcc002a22a7284ade610e5eff4181f85827c7","src/cache_padded.rs":"b6ff04ecf6de9124c0069c014d35f37de543cff1c4bfc1f260586aa49a5af6d8","src/lib.rs":"060dabc6dc07de92a7afa57dcbc47222a95ef5819d543ad854858c3b329d6637","src/sync/mod.rs":"eca73c04f821859b8434d2b93db87d160dc6a3f65498ca201cd40d732ca4c134","src/sync/once_lock.rs":"aa8f957604d1119c4fc7038a18c14a6281230e81005f31201c099acff284ad4b","src/sync/parker.rs":"698996e7530da1f3815df11c89df7d916155229cbfd022cccbd555f1d1d31985","src/sync/sharded_lock.rs":"f96d536f5622fe2a0a0f7d8117be31e4b1ed607544c52c7e2ffcd1f51a6b93a1","src/sync/wait_group.rs":"3e339aab014f50e214fea535c841755113ea058153378ed54e50a4acb403c937","src/thread.rs":"04610787ba88f1f59549874a13fc037f2dcf4d8b5f1daaf08378f05c2b3c0039","tests/atomic_cell.rs":"716c864d4e103039dc5cd8bf6110da4cbabafc7e4e03819aa197828e8fb0a9c7","tests/cache_padded.rs":"1bfaff8354c8184e1ee1f902881ca9400b60effb273b0d3f752801a483d2b66d","tests/parker.rs":"6def4721287d9d70b1cfd63ebb34e1c83fbb3376edbad2bc8aac6ef69dd99d20","tests/sharded_lock.rs":"314adeb8a651a28935f7a49c9a261b8fa1fd82bf6a16c865a5aced6216d7e40b","tests/thread.rs":"9a7d7d3028c552fd834c68598b04a1cc252a816bc20ab62cec060d6cd09cab10","tests/wait_group.rs":"2a41533a5f7f113d19cd2bdafcc2abf86509109652274156efdd74abd00896b6"},"package":"d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"} \ No newline at end of file diff --git a/external/vendor/crossbeam-utils/.cargo_vcs_info.json b/external/vendor/crossbeam-utils/.cargo_vcs_info.json new file mode 100644 index 0000000000..87108da095 --- /dev/null +++ b/external/vendor/crossbeam-utils/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "ccd83ac4108a2a1b41e9c6e79c87267167d18dfa" + }, + "path_in_vcs": "crossbeam-utils" +} \ No newline at end of file diff --git a/external/vendor/crossbeam-utils/CHANGELOG.md b/external/vendor/crossbeam-utils/CHANGELOG.md new file mode 100644 index 0000000000..5aa1967e71 --- /dev/null +++ b/external/vendor/crossbeam-utils/CHANGELOG.md @@ -0,0 +1,243 @@ +# Version 0.8.21 + +- Improve implementation of `CachePadded`. (#1152) + +# Version 0.8.20 + +- Implement `Display` for `CachePadded`. (#1097) + +# Version 0.8.19 + +- Remove dependency on `cfg-if`. (#1072) + +# Version 0.8.18 + +- Relax the minimum supported Rust version to 1.60. (#1056) +- Improve scalability of `AtomicCell` fallback. (#1055) + +# Version 0.8.17 + +- Bump the minimum supported Rust version to 1.61. (#1037) +- Improve support for targets without atomic CAS or 64-bit atomic. (#1037) +- Always implement `UnwindSafe` and `RefUnwindSafe` for `AtomicCell`. (#1045) +- Improve compatibility with Miri, TSan, and loom. (#995, #1003) +- Improve compatibility with unstable `oom=panic`. (#1045) +- Improve implementation of `CachePadded`. (#1014, #1025) +- Update `loom` dependency to 0.7. + +# Version 0.8.16 + +- Improve implementation of `CachePadded`. (#967) + +# Version 0.8.15 + +- Add `#[clippy::has_significant_drop]` to `ShardedLock{Read,Write}Guard`. (#958) +- Improve handling of very large timeout. (#953) +- Soft-deprecate `thread::scope()` in favor of the more efficient `std::thread::scope` that stabilized in Rust 1.63. (#954) + +# Version 0.8.14 + +- Fix build script bug introduced in 0.8.13. (#932) + +# Version 0.8.13 + +**Note:** This release has been yanked due to regression fixed in 0.8.14. + +- Improve support for custom targets. (#922) + +# Version 0.8.12 + +- Removes the dependency on the `once_cell` crate to restore the MSRV. (#913) +- Work around [rust-lang#98302](https://github.com/rust-lang/rust/issues/98302), which causes compile error on windows-gnu when LTO is enabled. (#913) + +# Version 0.8.11 + +- Bump the minimum supported Rust version to 1.38. (#877) + +# Version 0.8.10 + +- Fix unsoundness of `AtomicCell` on types containing niches. (#834) + This fix contains breaking changes, but they are allowed because this is a soundness bug fix. See #834 for more. + +# Version 0.8.9 + +- Replace lazy_static with once_cell. (#817) + +# Version 0.8.8 + +- Fix a bug when unstable `loom` support is enabled. (#787) + +# Version 0.8.7 + +- Add `AtomicCell<{i*,u*}>::{fetch_max,fetch_min}`. (#785) +- Add `AtomicCell<{i*,u*,bool}>::fetch_nand`. (#785) +- Fix unsoundness of `AtomicCell<{i,u}64>` arithmetics on 32-bit targets that support `Atomic{I,U}64` (#781) + +# Version 0.8.6 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Re-add `AtomicCell<{i,u}64>::{fetch_add,fetch_sub,fetch_and,fetch_or,fetch_xor}` that were accidentally removed in 0.8.0 on targets that do not support `Atomic{I,U}64`. (#767) +- Re-add `AtomicCell<{i,u}128>::{fetch_add,fetch_sub,fetch_and,fetch_or,fetch_xor}` that were accidentally removed in 0.8.0. (#767) + +# Version 0.8.5 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Add `AtomicCell::fetch_update`. (#704) +- Support targets that do not have atomic CAS on stable Rust. (#698) + +# Version 0.8.4 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Bump `loom` dependency to version 0.5. (#686) + +# Version 0.8.3 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Make `loom` dependency optional. (#666) + +# Version 0.8.2 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Deprecate `AtomicCell::compare_and_swap`. Use `AtomicCell::compare_exchange` instead. (#619) +- Add `Parker::park_deadline`. (#563) +- Improve implementation of `CachePadded`. (#636) +- Add unstable support for `loom`. (#487) + +# Version 0.8.1 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Make `AtomicCell::is_lock_free` always const fn. (#600) +- Fix a bug in `seq_lock_wide`. (#596) +- Remove `const_fn` dependency. (#600) +- `crossbeam-utils` no longer fails to compile if unable to determine rustc version. Instead, it now displays a warning. (#604) + +# Version 0.8.0 + +**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. + +- Bump the minimum supported Rust version to 1.36. +- Remove deprecated `AtomicCell::get_mut()` and `Backoff::is_complete()` methods. +- Remove `alloc` feature. +- Make `CachePadded::new()` const function. +- Make `AtomicCell::is_lock_free()` const function at 1.46+. +- Implement `From` for `AtomicCell`. + +# Version 0.7.2 + +- Fix bug in release (yanking 0.7.1) + +# Version 0.7.1 + +- Bump `autocfg` dependency to version 1.0. (#460) +- Make `AtomicCell` lockfree for u8, u16, u32, u64 sized values at 1.34+. (#454) + +# Version 0.7.0 + +- Bump the minimum required version to 1.28. +- Fix breakage with nightly feature due to rust-lang/rust#65214. +- Apply `#[repr(transparent)]` to `AtomicCell`. +- Make `AtomicCell::new()` const function at 1.31+. + +# Version 0.6.6 + +- Add `UnwindSafe` and `RefUnwindSafe` impls for `AtomicCell`. +- Add `AtomicCell::as_ptr()`. +- Add `AtomicCell::take()`. +- Fix a bug in `AtomicCell::compare_exchange()` and `AtomicCell::compare_and_swap()`. +- Various documentation improvements. + +# Version 0.6.5 + +- Rename `Backoff::is_complete()` to `Backoff::is_completed()`. + +# Version 0.6.4 + +- Add `WaitGroup`, `ShardedLock`, and `Backoff`. +- Add `fetch_*` methods for `AtomicCell` and `AtomicCell`. +- Expand documentation. + +# Version 0.6.3 + +- Add `AtomicCell`. +- Improve documentation. + +# Version 0.6.2 + +- Add `Parker`. +- Improve documentation. + +# Version 0.6.1 + +- Fix a soundness bug in `Scope::spawn()`. +- Remove the `T: 'scope` bound on `ScopedJoinHandle`. + +# Version 0.6.0 + +- Move `AtomicConsume` to `atomic` module. +- `scope()` returns a `Result` of thread joins. +- Remove `spawn_unchecked`. +- Fix a soundness bug due to incorrect lifetimes. +- Improve documentation. +- Support nested scoped spawns. +- Implement `Copy`, `Hash`, `PartialEq`, and `Eq` for `CachePadded`. +- Add `CachePadded::into_inner()`. + +# Version 0.5.0 + +- Reorganize sub-modules and rename functions. + +# Version 0.4.1 + +- Fix a documentation link. + +# Version 0.4.0 + +- `CachePadded` supports types bigger than 64 bytes. +- Fix a bug in scoped threads where unitialized memory was being dropped. +- Minimum required Rust version is now 1.25. + +# Version 0.3.2 + +- Mark `load_consume` with `#[inline]`. + +# Version 0.3.1 + +- `load_consume` on ARM and AArch64. + +# Version 0.3.0 + +- Add `join` for scoped thread API. +- Add `load_consume` for atomic load-consume memory ordering. +- Remove `AtomicOption`. + +# Version 0.2.2 + +- Support Rust 1.12.1. +- Call `T::clone` when cloning a `CachePadded`. + +# Version 0.2.1 + +- Add `use_std` feature. + +# Version 0.2.0 + +- Add `nightly` feature. +- Use `repr(align(64))` on `CachePadded` with the `nightly` feature. +- Implement `Drop` for `CachePadded`. +- Implement `Clone` for `CachePadded`. +- Implement `From` for `CachePadded`. +- Implement better `Debug` for `CachePadded`. +- Write more tests. +- Add this changelog. +- Change cache line length to 64 bytes. +- Remove `ZerosValid`. + +# Version 0.1.0 + +- Old implementation of `CachePadded` from `crossbeam` version 0.3.0 diff --git a/external/vendor/crossbeam-utils/Cargo.toml b/external/vendor/crossbeam-utils/Cargo.toml new file mode 100644 index 0000000000..c93dc2f275 --- /dev/null +++ b/external/vendor/crossbeam-utils/Cargo.toml @@ -0,0 +1,101 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "crossbeam-utils" +version = "0.8.21" +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Utilities for concurrent programming" +homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" +readme = "README.md" +keywords = [ + "scoped", + "thread", + "atomic", + "cache", +] +categories = [ + "algorithms", + "concurrency", + "data-structures", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam" + +[lib] +name = "crossbeam_utils" +path = "src/lib.rs" + +[[test]] +name = "atomic_cell" +path = "tests/atomic_cell.rs" + +[[test]] +name = "cache_padded" +path = "tests/cache_padded.rs" + +[[test]] +name = "parker" +path = "tests/parker.rs" + +[[test]] +name = "sharded_lock" +path = "tests/sharded_lock.rs" + +[[test]] +name = "thread" +path = "tests/thread.rs" + +[[test]] +name = "wait_group" +path = "tests/wait_group.rs" + +[[bench]] +name = "atomic_cell" +path = "benches/atomic_cell.rs" + +[dependencies] + +[dev-dependencies.rand] +version = "0.8" + +[features] +default = ["std"] +nightly = [] +std = [] + +[target."cfg(crossbeam_loom)".dependencies.loom] +version = "0.7.1" +optional = true + +[lints.clippy.declare_interior_mutable_const] +level = "allow" +priority = 1 + +[lints.clippy.lint_groups_priority] +level = "allow" +priority = 1 + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = [ + "cfg(crossbeam_loom)", + "cfg(crossbeam_sanitize)", +] diff --git a/external/vendor/crossbeam-utils/Cargo.toml.orig b/external/vendor/crossbeam-utils/Cargo.toml.orig new file mode 100644 index 0000000000..3a95baea25 --- /dev/null +++ b/external/vendor/crossbeam-utils/Cargo.toml.orig @@ -0,0 +1,46 @@ +[package] +name = "crossbeam-utils" +# When publishing a new version: +# - Update CHANGELOG.md +# - Update README.md (when increasing major or minor version) +# - Run './tools/publish.sh crossbeam-utils ' +version = "0.8.21" +edition = "2021" +rust-version = "1.60" +license = "MIT OR Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam" +homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" +description = "Utilities for concurrent programming" +keywords = ["scoped", "thread", "atomic", "cache"] +categories = ["algorithms", "concurrency", "data-structures", "no-std"] + +[features] +default = ["std"] + +# Enable to use APIs that require `std`. +# This is enabled by default. +std = [] + +# These features are no longer used. +# TODO: remove in the next major version. +# Enable to use of unstable functionality. +# This is disabled by default and requires recent nightly compiler. +# +# NOTE: This feature is outside of the normal semver guarantees and minor or +# patch versions of crossbeam may make breaking changes to them at any time. +nightly = [] + +[dependencies] + +# Enable the use of loom for concurrency testing. +# +# NOTE: This feature is outside of the normal semver guarantees and minor or +# patch versions of crossbeam may make breaking changes to them at any time. +[target.'cfg(crossbeam_loom)'.dependencies] +loom = { version = "0.7.1", optional = true } + +[dev-dependencies] +rand = "0.8" + +[lints] +workspace = true diff --git a/external/vendor/crossbeam-utils/LICENSE-APACHE b/external/vendor/crossbeam-utils/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/external/vendor/crossbeam-utils/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/external/vendor/crossbeam-utils/LICENSE-MIT b/external/vendor/crossbeam-utils/LICENSE-MIT new file mode 100644 index 0000000000..068d491fd5 --- /dev/null +++ b/external/vendor/crossbeam-utils/LICENSE-MIT @@ -0,0 +1,27 @@ +The MIT License (MIT) + +Copyright (c) 2019 The Crossbeam Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/external/vendor/crossbeam-utils/README.md b/external/vendor/crossbeam-utils/README.md new file mode 100644 index 0000000000..7d6a679487 --- /dev/null +++ b/external/vendor/crossbeam-utils/README.md @@ -0,0 +1,73 @@ +# Crossbeam Utils + +[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( +https://github.com/crossbeam-rs/crossbeam/actions) +[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils#license) +[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)]( +https://crates.io/crates/crossbeam-utils) +[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)]( +https://docs.rs/crossbeam-utils) +[![Rust 1.60+](https://img.shields.io/badge/rust-1.60+-lightgray.svg)]( +https://www.rust-lang.org) +[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) + +This crate provides miscellaneous tools for concurrent programming: + +#### Atomics + +* [`AtomicCell`], a thread-safe mutable memory location.(no_std) +* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.(no_std) + +#### Thread synchronization + +* [`Parker`], a thread parking primitive. +* [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. +* [`WaitGroup`], for synchronizing the beginning or end of some computation. + +#### Utilities + +* [`Backoff`], for exponential backoff in spin loops.(no_std) +* [`CachePadded`], for padding and aligning a value to the length of a cache line.(no_std) +* [`scope`], for spawning threads that borrow local variables from the stack. + +*Features marked with (no_std) can be used in `no_std` environments.*
+ +[`AtomicCell`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/struct.AtomicCell.html +[`AtomicConsume`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/trait.AtomicConsume.html +[`Parker`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.Parker.html +[`ShardedLock`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.ShardedLock.html +[`WaitGroup`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.WaitGroup.html +[`Backoff`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.Backoff.html +[`CachePadded`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.CachePadded.html +[`scope`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/thread/fn.scope.html + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-utils = "0.8" +``` + +## Compatibility + +Crossbeam Utils supports stable Rust releases going back at least six months, +and every time the minimum supported Rust version is increased, a new minor +version is released. Currently, the minimum supported Rust version is 1.60. + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/external/vendor/crossbeam-utils/benches/atomic_cell.rs b/external/vendor/crossbeam-utils/benches/atomic_cell.rs new file mode 100644 index 0000000000..844f7c02b6 --- /dev/null +++ b/external/vendor/crossbeam-utils/benches/atomic_cell.rs @@ -0,0 +1,156 @@ +#![feature(test)] + +extern crate test; + +use std::sync::Barrier; + +use crossbeam_utils::atomic::AtomicCell; +use crossbeam_utils::thread; + +#[bench] +fn load_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + let mut sum = 0; + b.iter(|| sum += a.load()); + test::black_box(sum); +} + +#[bench] +fn store_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + b.iter(|| a.store(1)); +} + +#[bench] +fn fetch_add_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + b.iter(|| a.fetch_add(1)); +} + +#[bench] +fn compare_exchange_u8(b: &mut test::Bencher) { + let a = AtomicCell::new(0u8); + let mut i = 0; + b.iter(|| { + let _ = a.compare_exchange(i, i.wrapping_add(1)); + i = i.wrapping_add(1); + }); +} + +#[bench] +fn concurrent_load_u8(b: &mut test::Bencher) { + const THREADS: usize = 2; + const STEPS: usize = 1_000_000; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + let exit = AtomicCell::new(false); + + let a = AtomicCell::new(0u8); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; + } + }); + } + + start.wait(); + end.wait(); + + b.iter(|| { + start.wait(); + end.wait(); + }); + + start.wait(); + exit.store(true); + end.wait(); + }) + .unwrap(); +} + +#[bench] +fn load_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + let mut sum = 0; + b.iter(|| sum += a.load()); + test::black_box(sum); +} + +#[bench] +fn store_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + b.iter(|| a.store(1)); +} + +#[bench] +fn fetch_add_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + b.iter(|| a.fetch_add(1)); +} + +#[bench] +fn compare_exchange_usize(b: &mut test::Bencher) { + let a = AtomicCell::new(0usize); + let mut i = 0; + b.iter(|| { + let _ = a.compare_exchange(i, i.wrapping_add(1)); + i = i.wrapping_add(1); + }); +} + +#[bench] +fn concurrent_load_usize(b: &mut test::Bencher) { + const THREADS: usize = 2; + const STEPS: usize = 1_000_000; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + let exit = AtomicCell::new(false); + + let a = AtomicCell::new(0usize); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| loop { + start.wait(); + + let mut sum = 0; + for _ in 0..STEPS { + sum += a.load(); + } + test::black_box(sum); + + end.wait(); + if exit.load() { + break; + } + }); + } + + start.wait(); + end.wait(); + + b.iter(|| { + start.wait(); + end.wait(); + }); + + start.wait(); + exit.store(true); + end.wait(); + }) + .unwrap(); +} diff --git a/external/vendor/crossbeam-utils/build-common.rs b/external/vendor/crossbeam-utils/build-common.rs new file mode 100644 index 0000000000..e91bb4d471 --- /dev/null +++ b/external/vendor/crossbeam-utils/build-common.rs @@ -0,0 +1,13 @@ +// The target triplets have the form of 'arch-vendor-system'. +// +// When building for Linux (e.g. the 'system' part is +// 'linux-something'), replace the vendor with 'unknown' +// so that mapping to rust standard targets happens correctly. +fn convert_custom_linux_target(target: String) -> String { + let mut parts: Vec<&str> = target.split('-').collect(); + let system = parts.get(2); + if system == Some(&"linux") { + parts[1] = "unknown"; + }; + parts.join("-") +} diff --git a/external/vendor/crossbeam-utils/build.rs b/external/vendor/crossbeam-utils/build.rs new file mode 100644 index 0000000000..ff7e81f949 --- /dev/null +++ b/external/vendor/crossbeam-utils/build.rs @@ -0,0 +1,48 @@ +// The rustc-cfg listed below are considered public API, but it is *unstable* +// and outside of the normal semver guarantees: +// +// - `crossbeam_no_atomic` +// Assume the target does *not* support any atomic operations. +// This is usually detected automatically by the build script, but you may +// need to enable it manually when building for custom targets or using +// non-cargo build systems that don't run the build script. +// +// With the exceptions mentioned above, the rustc-cfg emitted by the build +// script are *not* public API. + +#![warn(rust_2018_idioms)] + +use std::env; + +include!("no_atomic.rs"); +include!("build-common.rs"); + +fn main() { + println!("cargo:rerun-if-changed=no_atomic.rs"); + println!("cargo:rustc-check-cfg=cfg(crossbeam_no_atomic,crossbeam_sanitize_thread)"); + + let target = match env::var("TARGET") { + Ok(target) => convert_custom_linux_target(target), + Err(e) => { + println!( + "cargo:warning={}: unable to get TARGET environment variable: {}", + env!("CARGO_PKG_NAME"), + e + ); + return; + } + }; + + // Note that this is `no_`*, not `has_*`. This allows treating as the latest + // stable rustc is used when the build script doesn't run. This is useful + // for non-cargo build systems that don't run the build script. + if NO_ATOMIC.contains(&&*target) { + println!("cargo:rustc-cfg=crossbeam_no_atomic"); + } + + // `cfg(sanitize = "..")` is not stabilized. + let sanitize = env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); + if sanitize.contains("thread") { + println!("cargo:rustc-cfg=crossbeam_sanitize_thread"); + } +} diff --git a/external/vendor/crossbeam-utils/no_atomic.rs b/external/vendor/crossbeam-utils/no_atomic.rs new file mode 100644 index 0000000000..f7e6d2fa42 --- /dev/null +++ b/external/vendor/crossbeam-utils/no_atomic.rs @@ -0,0 +1,9 @@ +// This file is @generated by no_atomic.sh. +// It is not intended for manual editing. + +const NO_ATOMIC: &[&str] = &[ + "bpfeb-unknown-none", + "bpfel-unknown-none", + "mipsel-sony-psx", + "msp430-none-elf", +]; diff --git a/external/vendor/crossbeam-utils/src/atomic/atomic_cell.rs b/external/vendor/crossbeam-utils/src/atomic/atomic_cell.rs new file mode 100644 index 0000000000..47472534c8 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/atomic_cell.rs @@ -0,0 +1,1182 @@ +// Necessary for implementing atomic methods for `AtomicUnit` +#![allow(clippy::unit_arg)] + +use crate::primitive::sync::atomic::{self, Ordering}; +use crate::CachePadded; +use core::cell::UnsafeCell; +use core::cmp; +use core::fmt; +use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::panic::{RefUnwindSafe, UnwindSafe}; +use core::ptr; + +use super::seq_lock::SeqLock; + +/// A thread-safe mutable memory location. +/// +/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads. +/// +/// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using +/// global locks otherwise. You can call [`AtomicCell::::is_lock_free()`] to check whether +/// atomic instructions or locks will be used. +/// +/// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering. +/// +/// [`Cell`]: std::cell::Cell +/// [`AtomicCell::::is_lock_free()`]: AtomicCell::is_lock_free +/// [`Acquire`]: std::sync::atomic::Ordering::Acquire +/// [`Release`]: std::sync::atomic::Ordering::Release +#[repr(transparent)] +pub struct AtomicCell { + /// The inner value. + /// + /// If this value can be transmuted into a primitive atomic type, it will be treated as such. + /// Otherwise, all potentially concurrent operations on this data will be protected by a global + /// lock. + /// + /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state: + /// + /// (This rustc bug has been fixed in Rust 1.64.) + /// + /// Note: + /// - we'll never store uninitialized `T` due to our API only using initialized `T`. + /// - this `MaybeUninit` does *not* fix . + value: UnsafeCell>, +} + +unsafe impl Send for AtomicCell {} +unsafe impl Sync for AtomicCell {} + +impl UnwindSafe for AtomicCell {} +impl RefUnwindSafe for AtomicCell {} + +impl AtomicCell { + /// Creates a new atomic cell initialized with `val`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// ``` + pub const fn new(val: T) -> AtomicCell { + AtomicCell { + value: UnsafeCell::new(MaybeUninit::new(val)), + } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// let v = a.into_inner(); + /// + /// assert_eq!(v, 7); + /// ``` + pub fn into_inner(self) -> T { + let this = ManuallyDrop::new(self); + // SAFETY: + // - passing `self` by value guarantees that no other threads are concurrently + // accessing the atomic data + // - the raw pointer passed in is valid because we got it from an owned value. + // - `ManuallyDrop` prevents double dropping `T` + unsafe { this.as_ptr().read() } + } + + /// Returns `true` if operations on values of this type are lock-free. + /// + /// If the compiler or the platform doesn't support the necessary atomic instructions, + /// `AtomicCell` will use global locks for every potentially concurrent atomic operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// // This type is internally represented as `AtomicUsize` so we can just use atomic + /// // operations provided by it. + /// assert_eq!(AtomicCell::::is_lock_free(), true); + /// + /// // A wrapper struct around `isize`. + /// struct Foo { + /// bar: isize, + /// } + /// // `AtomicCell` will be internally represented as `AtomicIsize`. + /// assert_eq!(AtomicCell::::is_lock_free(), true); + /// + /// // Operations on zero-sized types are always lock-free. + /// assert_eq!(AtomicCell::<()>::is_lock_free(), true); + /// + /// // Very large types cannot be represented as any of the standard atomic types, so atomic + /// // operations on them will have to use global locks for synchronization. + /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false); + /// ``` + pub const fn is_lock_free() -> bool { + atomic_is_lock_free::() + } + + /// Stores `val` into the atomic cell. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// + /// assert_eq!(a.load(), 7); + /// a.store(8); + /// assert_eq!(a.load(), 8); + /// ``` + pub fn store(&self, val: T) { + if mem::needs_drop::() { + drop(self.swap(val)); + } else { + unsafe { + atomic_store(self.as_ptr(), val); + } + } + } + + /// Stores `val` into the atomic cell and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// + /// assert_eq!(a.load(), 7); + /// assert_eq!(a.swap(8), 7); + /// assert_eq!(a.load(), 8); + /// ``` + pub fn swap(&self, val: T) -> T { + unsafe { atomic_swap(self.as_ptr(), val) } + } + + /// Returns a raw pointer to the underlying data in this atomic cell. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(5); + /// + /// let ptr = a.as_ptr(); + /// ``` + #[inline] + pub fn as_ptr(&self) -> *mut T { + self.value.get().cast::() + } +} + +impl AtomicCell { + /// Takes the value of the atomic cell, leaving `Default::default()` in its place. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(5); + /// let five = a.take(); + /// + /// assert_eq!(five, 5); + /// assert_eq!(a.into_inner(), 0); + /// ``` + pub fn take(&self) -> T { + self.swap(Default::default()) + } +} + +impl AtomicCell { + /// Loads a value from the atomic cell. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// + /// assert_eq!(a.load(), 7); + /// ``` + pub fn load(&self) -> T { + unsafe { atomic_load(self.as_ptr()) } + } +} + +impl AtomicCell { + /// If the current value equals `current`, stores `new` into the atomic cell. + /// + /// The return value is always the previous value. If it is equal to `current`, then the value + /// was updated. + /// + /// # Examples + /// + /// ``` + /// # #![allow(deprecated)] + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(1); + /// + /// assert_eq!(a.compare_and_swap(2, 3), 1); + /// assert_eq!(a.load(), 1); + /// + /// assert_eq!(a.compare_and_swap(1, 2), 1); + /// assert_eq!(a.load(), 2); + /// ``` + // TODO: remove in the next major version. + #[deprecated(note = "Use `compare_exchange` instead")] + pub fn compare_and_swap(&self, current: T, new: T) -> T { + match self.compare_exchange(current, new) { + Ok(v) => v, + Err(v) => v, + } + } + + /// If the current value equals `current`, stores `new` into the atomic cell. + /// + /// The return value is a result indicating whether the new value was written and containing + /// the previous value. On success this value is guaranteed to be equal to `current`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(1); + /// + /// assert_eq!(a.compare_exchange(2, 3), Err(1)); + /// assert_eq!(a.load(), 1); + /// + /// assert_eq!(a.compare_exchange(1, 2), Ok(1)); + /// assert_eq!(a.load(), 2); + /// ``` + pub fn compare_exchange(&self, current: T, new: T) -> Result { + unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) } + } + + /// Fetches the value, and applies a function to it that returns an optional + /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else + /// `Err(previous_value)`. + /// + /// Note: This may call the function multiple times if the value has been changed from other threads in + /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied + /// only once to the stored value. + /// + /// # Examples + /// + /// ```rust + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(7); + /// assert_eq!(a.fetch_update(|_| None), Err(7)); + /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7)); + /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8)); + /// assert_eq!(a.load(), 9); + /// ``` + #[inline] + pub fn fetch_update(&self, mut f: F) -> Result + where + F: FnMut(T) -> Option, + { + let mut prev = self.load(); + while let Some(next) = f(prev) { + match self.compare_exchange(prev, next) { + x @ Ok(_) => return x, + Err(next_prev) => prev = next_prev, + } + } + Err(prev) + } +} + +// `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop` +// for `AtomicCell` to avoid leaks of non-`Copy` types. +impl Drop for AtomicCell { + fn drop(&mut self) { + if mem::needs_drop::() { + // SAFETY: + // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data + // - the raw pointer passed in is valid because we got it from a reference + // - `MaybeUninit` prevents double dropping `T` + unsafe { + self.as_ptr().drop_in_place(); + } + } + } +} + +macro_rules! atomic { + // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`, + // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop. + (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => { + if can_transmute::<$t, $atomic>() { + let $a: &$atomic; + break $atomic_op; + } + }; + + // If values of type `$t` can be transmuted into values of a primitive atomic type, declares + // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes + // `$fallback_op`. + ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => { + loop { + atomic!(@check, $t, AtomicUnit, $a, $atomic_op); + + atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op); + atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op); + atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op); + #[cfg(target_has_atomic = "64")] + atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op); + // TODO: AtomicU128 is unstable + // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op); + + break $fallback_op; + } + }; +} + +macro_rules! impl_arithmetic { + ($t:ty, fallback, $example:tt) => { + impl AtomicCell<$t> { + /// Increments the current value by `val` and returns the previous value. + /// + /// The addition wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_add(3), 7); + /// assert_eq!(a.load(), 10); + /// ``` + #[inline] + pub fn fetch_add(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_add(val); + old + } + + /// Decrements the current value by `val` and returns the previous value. + /// + /// The subtraction wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_sub(3), 7); + /// assert_eq!(a.load(), 4); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_sub(val); + old + } + + /// Applies bitwise "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_and(3), 7); + /// assert_eq!(a.load(), 3); + /// ``` + #[inline] + pub fn fetch_and(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value &= val; + old + } + + /// Applies bitwise "nand" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_nand(3), 7); + /// assert_eq!(a.load(), !(7 & 3)); + /// ``` + #[inline] + pub fn fetch_nand(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = !(old & val); + old + } + + /// Applies bitwise "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_or(16), 7); + /// assert_eq!(a.load(), 23); + /// ``` + #[inline] + pub fn fetch_or(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value |= val; + old + } + + /// Applies bitwise "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_xor(2), 7); + /// assert_eq!(a.load(), 5); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value ^= val; + old + } + + /// Compares and sets the maximum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_max(2), 7); + /// assert_eq!(a.load(), 7); + /// ``` + #[inline] + pub fn fetch_max(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::max(old, val); + old + } + + /// Compares and sets the minimum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_min(2), 7); + /// assert_eq!(a.load(), 2); + /// ``` + #[inline] + pub fn fetch_min(&self, val: $t) -> $t { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::min(old, val); + old + } + } + }; + ($t:ty, $atomic:ident, $example:tt) => { + impl AtomicCell<$t> { + /// Increments the current value by `val` and returns the previous value. + /// + /// The addition wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_add(3), 7); + /// assert_eq!(a.load(), 10); + /// ``` + #[inline] + pub fn fetch_add(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_add(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_add(val); + old + } + } + } + + /// Decrements the current value by `val` and returns the previous value. + /// + /// The subtraction wraps on overflow. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_sub(3), 7); + /// assert_eq!(a.load(), 4); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_sub(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = value.wrapping_sub(val); + old + } + } + } + + /// Applies bitwise "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_and(3), 7); + /// assert_eq!(a.load(), 3); + /// ``` + #[inline] + pub fn fetch_and(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_and(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value &= val; + old + } + } + } + + /// Applies bitwise "nand" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_nand(3), 7); + /// assert_eq!(a.load(), !(7 & 3)); + /// ``` + #[inline] + pub fn fetch_nand(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_nand(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = !(old & val); + old + } + } + } + + /// Applies bitwise "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_or(16), 7); + /// assert_eq!(a.load(), 23); + /// ``` + #[inline] + pub fn fetch_or(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_or(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value |= val; + old + } + } + } + + /// Applies bitwise "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_xor(2), 7); + /// assert_eq!(a.load(), 5); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_xor(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value ^= val; + old + } + } + } + + /// Compares and sets the maximum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_max(9), 7); + /// assert_eq!(a.load(), 9); + /// ``` + #[inline] + pub fn fetch_max(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_max(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::max(old, val); + old + } + } + } + + /// Compares and sets the minimum of the current value and `val`, + /// and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + #[doc = $example] + /// + /// assert_eq!(a.fetch_min(2), 7); + /// assert_eq!(a.load(), 2); + /// ``` + #[inline] + pub fn fetch_min(&self, val: $t) -> $t { + atomic! { + $t, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; + a.fetch_min(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = cmp::min(old, val); + old + } + } + } + } + }; +} + +impl_arithmetic!(u8, AtomicU8, "let a = AtomicCell::new(7u8);"); +impl_arithmetic!(i8, AtomicI8, "let a = AtomicCell::new(7i8);"); +impl_arithmetic!(u16, AtomicU16, "let a = AtomicCell::new(7u16);"); +impl_arithmetic!(i16, AtomicI16, "let a = AtomicCell::new(7i16);"); + +impl_arithmetic!(u32, AtomicU32, "let a = AtomicCell::new(7u32);"); +impl_arithmetic!(i32, AtomicI32, "let a = AtomicCell::new(7i32);"); + +#[cfg(target_has_atomic = "64")] +impl_arithmetic!(u64, AtomicU64, "let a = AtomicCell::new(7u64);"); +#[cfg(target_has_atomic = "64")] +impl_arithmetic!(i64, AtomicI64, "let a = AtomicCell::new(7i64);"); +#[cfg(not(target_has_atomic = "64"))] +impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);"); +#[cfg(not(target_has_atomic = "64"))] +impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);"); + +// TODO: AtomicU128 is unstable +// impl_arithmetic!(u128, AtomicU128, "let a = AtomicCell::new(7u128);"); +// impl_arithmetic!(i128, AtomicI128, "let a = AtomicCell::new(7i128);"); +impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);"); +impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);"); + +impl_arithmetic!(usize, AtomicUsize, "let a = AtomicCell::new(7usize);"); +impl_arithmetic!(isize, AtomicIsize, "let a = AtomicCell::new(7isize);"); + +impl AtomicCell { + /// Applies logical "and" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(true); + /// + /// assert_eq!(a.fetch_and(true), true); + /// assert_eq!(a.load(), true); + /// + /// assert_eq!(a.fetch_and(false), true); + /// assert_eq!(a.load(), false); + /// ``` + #[inline] + pub fn fetch_and(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_and(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value &= val; + old + } + } + } + + /// Applies logical "nand" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(true); + /// + /// assert_eq!(a.fetch_nand(false), true); + /// assert_eq!(a.load(), true); + /// + /// assert_eq!(a.fetch_nand(true), true); + /// assert_eq!(a.load(), false); + /// + /// assert_eq!(a.fetch_nand(false), false); + /// assert_eq!(a.load(), true); + /// ``` + #[inline] + pub fn fetch_nand(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_nand(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value = !(old & val); + old + } + } + } + + /// Applies logical "or" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(false); + /// + /// assert_eq!(a.fetch_or(false), false); + /// assert_eq!(a.load(), false); + /// + /// assert_eq!(a.fetch_or(true), false); + /// assert_eq!(a.load(), true); + /// ``` + #[inline] + pub fn fetch_or(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_or(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value |= val; + old + } + } + } + + /// Applies logical "xor" to the current value and returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::atomic::AtomicCell; + /// + /// let a = AtomicCell::new(true); + /// + /// assert_eq!(a.fetch_xor(false), true); + /// assert_eq!(a.load(), true); + /// + /// assert_eq!(a.fetch_xor(true), true); + /// assert_eq!(a.load(), false); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: bool) -> bool { + atomic! { + bool, _a, + { + let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; + a.fetch_xor(val, Ordering::AcqRel) + }, + { + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; + let old = *value; + *value ^= val; + old + } + } + } +} + +impl Default for AtomicCell { + fn default() -> AtomicCell { + AtomicCell::new(T::default()) + } +} + +impl From for AtomicCell { + #[inline] + fn from(val: T) -> AtomicCell { + AtomicCell::new(val) + } +} + +impl fmt::Debug for AtomicCell { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AtomicCell") + .field("value", &self.load()) + .finish() + } +} + +/// Returns `true` if values of type `A` can be transmuted into values of type `B`. +const fn can_transmute() -> bool { + // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`. + (mem::size_of::() == mem::size_of::()) & (mem::align_of::() >= mem::align_of::()) +} + +/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`. +/// +/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic +/// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock. +/// +/// However, there is not only one global lock but an array of many locks, and one of them is +/// picked based on the given address. Having many locks reduces contention and improves +/// scalability. +#[inline] +#[must_use] +fn lock(addr: usize) -> &'static SeqLock { + // The number of locks is a prime number because we want to make sure `addr % LEN` gets + // dispersed across all locks. + // + // Note that addresses are always aligned to some power of 2, depending on type `T` in + // `AtomicCell`. If `LEN` was an even number, then `addr % LEN` would be an even number, + // too, which means only half of the locks would get utilized! + // + // It is also possible for addresses to accidentally get aligned to a number that is not a + // power of 2. Consider this example: + // + // ``` + // #[repr(C)] + // struct Foo { + // a: AtomicCell, + // b: u8, + // c: u8, + // } + // ``` + // + // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets + // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3. + // In order to protect from such cases, we simply choose a large prime number for `LEN`. + const LEN: usize = 67; + const L: CachePadded = CachePadded::new(SeqLock::new()); + static LOCKS: [CachePadded; LEN] = [L; LEN]; + + // If the modulus is a constant number, the compiler will use crazy math to transform this into + // a sequence of cheap arithmetic operations rather than using the slow modulo instruction. + &LOCKS[addr % LEN] +} + +/// An atomic `()`. +/// +/// All operations are noops. +struct AtomicUnit; + +impl AtomicUnit { + #[inline] + fn load(&self, _order: Ordering) {} + + #[inline] + fn store(&self, _val: (), _order: Ordering) {} + + #[inline] + fn swap(&self, _val: (), _order: Ordering) {} + + #[inline] + fn compare_exchange_weak( + &self, + _current: (), + _new: (), + _success: Ordering, + _failure: Ordering, + ) -> Result<(), ()> { + Ok(()) + } +} + +/// Returns `true` if operations on `AtomicCell` are lock-free. +const fn atomic_is_lock_free() -> bool { + atomic! { T, _a, true, false } +} + +/// Atomically reads data from `src`. +/// +/// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +unsafe fn atomic_load(src: *mut T) -> T +where + T: Copy, +{ + atomic! { + T, a, + { + a = &*(src as *const _ as *const _); + mem::transmute_copy(&a.load(Ordering::Acquire)) + }, + { + let lock = lock(src as usize); + + // Try doing an optimistic read first. + if let Some(stamp) = lock.optimistic_read() { + // We need a volatile read here because other threads might concurrently modify the + // value. In theory, data races are *always* UB, even if we use volatile reads and + // discard the data when a data race is detected. The proper solution would be to + // do atomic reads and atomic writes, but we can't atomically read and write all + // kinds of data since `AtomicU8` is not available on stable Rust yet. + // Load as `MaybeUninit` because we may load a value that is not valid as `T`. + let val = ptr::read_volatile(src.cast::>()); + + if lock.validate_read(stamp) { + return val.assume_init(); + } + } + + // Grab a regular write lock so that writers don't starve this load. + let guard = lock.write(); + let val = ptr::read(src); + // The value hasn't been changed. Drop the guard without incrementing the stamp. + guard.abort(); + val + } + } +} + +/// Atomically writes `val` to `dst`. +/// +/// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +unsafe fn atomic_store(dst: *mut T, val: T) { + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + a.store(mem::transmute_copy(&val), Ordering::Release); + mem::forget(val); + }, + { + let _guard = lock(dst as usize).write(); + ptr::write(dst, val); + } + } +} + +/// Atomically swaps data at `dst` with `val`. +/// +/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +unsafe fn atomic_swap(dst: *mut T, val: T) -> T { + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel)); + mem::forget(val); + res + }, + { + let _guard = lock(dst as usize).write(); + ptr::replace(dst, val) + } + } +} + +/// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at +/// `dst` with `new`. +/// +/// Returns the old value on success, or the current value at `dst` on failure. +/// +/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a +/// global lock otherwise. +#[allow(clippy::let_unit_value)] +unsafe fn atomic_compare_exchange_weak(dst: *mut T, mut current: T, new: T) -> Result +where + T: Copy + Eq, +{ + atomic! { + T, a, + { + a = &*(dst as *const _ as *const _); + let mut current_raw = mem::transmute_copy(¤t); + let new_raw = mem::transmute_copy(&new); + + loop { + match a.compare_exchange_weak( + current_raw, + new_raw, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => break Ok(current), + Err(previous_raw) => { + let previous = mem::transmute_copy(&previous_raw); + + if !T::eq(&previous, ¤t) { + break Err(previous); + } + + // The compare-exchange operation has failed and didn't store `new`. The + // failure is either spurious, or `previous` was semantically equal to + // `current` but not byte-equal. Let's retry with `previous` as the new + // `current`. + current = previous; + current_raw = previous_raw; + } + } + } + }, + { + let guard = lock(dst as usize).write(); + + if T::eq(&*dst, ¤t) { + Ok(ptr::replace(dst, new)) + } else { + let val = ptr::read(dst); + // The value hasn't been changed. Drop the guard without incrementing the stamp. + guard.abort(); + Err(val) + } + } + } +} diff --git a/external/vendor/crossbeam-utils/src/atomic/consume.rs b/external/vendor/crossbeam-utils/src/atomic/consume.rs new file mode 100644 index 0000000000..ff8e316b2c --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/consume.rs @@ -0,0 +1,111 @@ +#[cfg(not(crossbeam_no_atomic))] +use core::sync::atomic::Ordering; + +/// Trait which allows reading from primitive atomic types with "consume" ordering. +pub trait AtomicConsume { + /// Type returned by `load_consume`. + type Val; + + /// Loads a value from the atomic using a "consume" memory ordering. + /// + /// This is similar to the "acquire" ordering, except that an ordering is + /// only guaranteed with operations that "depend on" the result of the load. + /// However consume loads are usually much faster than acquire loads on + /// architectures with a weak memory model since they don't require memory + /// fence instructions. + /// + /// The exact definition of "depend on" is a bit vague, but it works as you + /// would expect in practice since a lot of software, especially the Linux + /// kernel, rely on this behavior. + /// + /// This is currently only implemented on ARM and AArch64, where a fence + /// can be avoided. On other architectures this will fall back to a simple + /// `load(Ordering::Acquire)`. + fn load_consume(&self) -> Self::Val; +} + +#[cfg(not(crossbeam_no_atomic))] +// Miri and Loom don't support "consume" ordering and ThreadSanitizer doesn't treat +// load(Relaxed) + compiler_fence(Acquire) as "consume" load. +// LLVM generates machine code equivalent to fence(Acquire) in compiler_fence(Acquire) +// on PowerPC, MIPS, etc. (https://godbolt.org/z/hffvjvW7h), so for now the fence +// can be actually avoided here only on ARM and AArch64. See also +// https://github.com/rust-lang/rust/issues/62256. +#[cfg(all( + any(target_arch = "arm", target_arch = "aarch64"), + not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)), +))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + use crate::primitive::sync::atomic::compiler_fence; + let result = self.load(Ordering::Relaxed); + compiler_fence(Ordering::Acquire); + result + } + }; +} + +#[cfg(not(crossbeam_no_atomic))] +#[cfg(not(all( + any(target_arch = "arm", target_arch = "aarch64"), + not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)), +)))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + self.load(Ordering::Acquire) + } + }; +} + +macro_rules! impl_atomic { + ($atomic:ident, $val:ty) => { + #[cfg(not(crossbeam_no_atomic))] + impl AtomicConsume for core::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } + #[cfg(crossbeam_loom)] + impl AtomicConsume for loom::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } + }; +} + +impl_atomic!(AtomicBool, bool); +impl_atomic!(AtomicUsize, usize); +impl_atomic!(AtomicIsize, isize); +impl_atomic!(AtomicU8, u8); +impl_atomic!(AtomicI8, i8); +impl_atomic!(AtomicU16, u16); +impl_atomic!(AtomicI16, i16); +#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] +impl_atomic!(AtomicU32, u32); +#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] +impl_atomic!(AtomicI32, i32); +#[cfg(any( + target_has_atomic = "64", + not(any(target_pointer_width = "16", target_pointer_width = "32")), +))] +impl_atomic!(AtomicU64, u64); +#[cfg(any( + target_has_atomic = "64", + not(any(target_pointer_width = "16", target_pointer_width = "32")), +))] +impl_atomic!(AtomicI64, i64); + +#[cfg(not(crossbeam_no_atomic))] +impl AtomicConsume for core::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} + +#[cfg(crossbeam_loom)] +impl AtomicConsume for loom::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} diff --git a/external/vendor/crossbeam-utils/src/atomic/mod.rs b/external/vendor/crossbeam-utils/src/atomic/mod.rs new file mode 100644 index 0000000000..8662ded564 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/mod.rs @@ -0,0 +1,32 @@ +//! Atomic types. +//! +//! * [`AtomicCell`], a thread-safe mutable memory location. +//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering. + +#[cfg(target_has_atomic = "ptr")] +#[cfg(not(crossbeam_loom))] +// Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap +// around. +// +// In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be +// vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the +// counter will not be increased that fast. +// Note that Rust (and C99) pointers must be at least 16-bit (i.e., 8-bit targets are impossible): https://github.com/rust-lang/rust/pull/49305 +#[cfg_attr( + any(target_pointer_width = "16", target_pointer_width = "32"), + path = "seq_lock_wide.rs" +)] +mod seq_lock; + +#[cfg(target_has_atomic = "ptr")] +// We cannot provide AtomicCell under cfg(crossbeam_loom) because loom's atomic +// types have a different in-memory representation than the underlying type. +// TODO: The latest loom supports fences, so fallback using seqlock may be available. +#[cfg(not(crossbeam_loom))] +mod atomic_cell; +#[cfg(target_has_atomic = "ptr")] +#[cfg(not(crossbeam_loom))] +pub use atomic_cell::AtomicCell; + +mod consume; +pub use consume::AtomicConsume; diff --git a/external/vendor/crossbeam-utils/src/atomic/seq_lock.rs b/external/vendor/crossbeam-utils/src/atomic/seq_lock.rs new file mode 100644 index 0000000000..ff8defd26d --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/seq_lock.rs @@ -0,0 +1,112 @@ +use core::mem; +use core::sync::atomic::{self, AtomicUsize, Ordering}; + +use crate::Backoff; + +/// A simple stamped lock. +pub(crate) struct SeqLock { + /// The current state of the lock. + /// + /// All bits except the least significant one hold the current stamp. When locked, the state + /// equals 1 and doesn't contain a valid stamp. + state: AtomicUsize, +} + +impl SeqLock { + pub(crate) const fn new() -> Self { + Self { + state: AtomicUsize::new(0), + } + } + + /// If not locked, returns the current stamp. + /// + /// This method should be called before optimistic reads. + #[inline] + pub(crate) fn optimistic_read(&self) -> Option { + let state = self.state.load(Ordering::Acquire); + if state == 1 { + None + } else { + Some(state) + } + } + + /// Returns `true` if the current stamp is equal to `stamp`. + /// + /// This method should be called after optimistic reads to check whether they are valid. The + /// argument `stamp` should correspond to the one returned by method `optimistic_read`. + #[inline] + pub(crate) fn validate_read(&self, stamp: usize) -> bool { + atomic::fence(Ordering::Acquire); + self.state.load(Ordering::Relaxed) == stamp + } + + /// Grabs the lock for writing. + #[inline] + pub(crate) fn write(&'static self) -> SeqLockWriteGuard { + let backoff = Backoff::new(); + loop { + let previous = self.state.swap(1, Ordering::Acquire); + + if previous != 1 { + atomic::fence(Ordering::Release); + + return SeqLockWriteGuard { + lock: self, + state: previous, + }; + } + + backoff.snooze(); + } + } +} + +/// An RAII guard that releases the lock and increments the stamp when dropped. +pub(crate) struct SeqLockWriteGuard { + /// The parent lock. + lock: &'static SeqLock, + + /// The stamp before locking. + state: usize, +} + +impl SeqLockWriteGuard { + /// Releases the lock without incrementing the stamp. + #[inline] + pub(crate) fn abort(self) { + self.lock.state.store(self.state, Ordering::Release); + + // We specifically don't want to call drop(), since that's + // what increments the stamp. + mem::forget(self); + } +} + +impl Drop for SeqLockWriteGuard { + #[inline] + fn drop(&mut self) { + // Release the lock and increment the stamp. + self.lock + .state + .store(self.state.wrapping_add(2), Ordering::Release); + } +} + +#[cfg(test)] +mod tests { + use super::SeqLock; + + #[test] + fn test_abort() { + static LK: SeqLock = SeqLock::new(); + let before = LK.optimistic_read().unwrap(); + { + let guard = LK.write(); + guard.abort(); + } + let after = LK.optimistic_read().unwrap(); + assert_eq!(before, after, "aborted write does not update the stamp"); + } +} diff --git a/external/vendor/crossbeam-utils/src/atomic/seq_lock_wide.rs b/external/vendor/crossbeam-utils/src/atomic/seq_lock_wide.rs new file mode 100644 index 0000000000..ef5d94a454 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/atomic/seq_lock_wide.rs @@ -0,0 +1,155 @@ +use core::mem; +use core::sync::atomic::{self, AtomicUsize, Ordering}; + +use crate::Backoff; + +/// A simple stamped lock. +/// +/// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low +/// bits. +pub(crate) struct SeqLock { + /// The high bits of the current state of the lock. + state_hi: AtomicUsize, + + /// The low bits of the current state of the lock. + /// + /// All bits except the least significant one hold the current stamp. When locked, the state_lo + /// equals 1 and doesn't contain a valid stamp. + state_lo: AtomicUsize, +} + +impl SeqLock { + pub(crate) const fn new() -> Self { + Self { + state_hi: AtomicUsize::new(0), + state_lo: AtomicUsize::new(0), + } + } + + /// If not locked, returns the current stamp. + /// + /// This method should be called before optimistic reads. + #[inline] + pub(crate) fn optimistic_read(&self) -> Option<(usize, usize)> { + // The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in + // `SeqLockWriteGuard::drop`. + // + // As a consequence, we can make sure that (1) all writes within the era of `state_hi - 1` + // happens before now; and therefore, (2) if `state_lo` is even, all writes within the + // critical section of (`state_hi`, `state_lo`) happens before now. + let state_hi = self.state_hi.load(Ordering::Acquire); + let state_lo = self.state_lo.load(Ordering::Acquire); + if state_lo == 1 { + None + } else { + Some((state_hi, state_lo)) + } + } + + /// Returns `true` if the current stamp is equal to `stamp`. + /// + /// This method should be called after optimistic reads to check whether they are valid. The + /// argument `stamp` should correspond to the one returned by method `optimistic_read`. + #[inline] + pub(crate) fn validate_read(&self, stamp: (usize, usize)) -> bool { + // Thanks to the fence, if we're noticing any modification to the data at the critical + // section of `(a, b)`, then the critical section's write of 1 to state_lo should be + // visible. + atomic::fence(Ordering::Acquire); + + // So if `state_lo` coincides with `stamp.1`, then either (1) we're noticing no modification + // to the data after the critical section of `(stamp.0, stamp.1)`, or (2) `state_lo` wrapped + // around. + // + // If (2) is the case, the acquire ordering ensures we see the new value of `state_hi`. + let state_lo = self.state_lo.load(Ordering::Acquire); + + // If (2) is the case and `state_hi` coincides with `stamp.0`, then `state_hi` also wrapped + // around, which we give up to correctly validate the read. + let state_hi = self.state_hi.load(Ordering::Relaxed); + + // Except for the case that both `state_hi` and `state_lo` wrapped around, the following + // condition implies that we're noticing no modification to the data after the critical + // section of `(stamp.0, stamp.1)`. + (state_hi, state_lo) == stamp + } + + /// Grabs the lock for writing. + #[inline] + pub(crate) fn write(&'static self) -> SeqLockWriteGuard { + let backoff = Backoff::new(); + loop { + let previous = self.state_lo.swap(1, Ordering::Acquire); + + if previous != 1 { + // To synchronize with the acquire fence in `validate_read` via any modification to + // the data at the critical section of `(state_hi, previous)`. + atomic::fence(Ordering::Release); + + return SeqLockWriteGuard { + lock: self, + state_lo: previous, + }; + } + + backoff.snooze(); + } + } +} + +/// An RAII guard that releases the lock and increments the stamp when dropped. +pub(crate) struct SeqLockWriteGuard { + /// The parent lock. + lock: &'static SeqLock, + + /// The stamp before locking. + state_lo: usize, +} + +impl SeqLockWriteGuard { + /// Releases the lock without incrementing the stamp. + #[inline] + pub(crate) fn abort(self) { + self.lock.state_lo.store(self.state_lo, Ordering::Release); + mem::forget(self); + } +} + +impl Drop for SeqLockWriteGuard { + #[inline] + fn drop(&mut self) { + let state_lo = self.state_lo.wrapping_add(2); + + // Increase the high bits if the low bits wrap around. + // + // Release ordering for synchronizing with `optimistic_read`. + if state_lo == 0 { + let state_hi = self.lock.state_hi.load(Ordering::Relaxed); + self.lock + .state_hi + .store(state_hi.wrapping_add(1), Ordering::Release); + } + + // Release the lock and increment the stamp. + // + // Release ordering for synchronizing with `optimistic_read`. + self.lock.state_lo.store(state_lo, Ordering::Release); + } +} + +#[cfg(test)] +mod tests { + use super::SeqLock; + + #[test] + fn test_abort() { + static LK: SeqLock = SeqLock::new(); + let before = LK.optimistic_read().unwrap(); + { + let guard = LK.write(); + guard.abort(); + } + let after = LK.optimistic_read().unwrap(); + assert_eq!(before, after, "aborted write does not update the stamp"); + } +} diff --git a/external/vendor/crossbeam-utils/src/backoff.rs b/external/vendor/crossbeam-utils/src/backoff.rs new file mode 100644 index 0000000000..7a505ed614 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/backoff.rs @@ -0,0 +1,287 @@ +use crate::primitive::hint; +use core::cell::Cell; +use core::fmt; + +const SPIN_LIMIT: u32 = 6; +const YIELD_LIMIT: u32 = 10; + +/// Performs exponential backoff in spin loops. +/// +/// Backing off in spin loops reduces contention and improves overall performance. +/// +/// This primitive can execute *YIELD* and *PAUSE* instructions, yield the current thread to the OS +/// scheduler, and tell when is a good time to block the thread using a different synchronization +/// mechanism. Each step of the back off procedure takes roughly twice as long as the previous +/// step. +/// +/// # Examples +/// +/// Backing off in a lock-free loop: +/// +/// ``` +/// use crossbeam_utils::Backoff; +/// use std::sync::atomic::AtomicUsize; +/// use std::sync::atomic::Ordering::SeqCst; +/// +/// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize { +/// let backoff = Backoff::new(); +/// loop { +/// let val = a.load(SeqCst); +/// if a.compare_exchange(val, val.wrapping_mul(b), SeqCst, SeqCst).is_ok() { +/// return val; +/// } +/// backoff.spin(); +/// } +/// } +/// ``` +/// +/// Waiting for an [`AtomicBool`] to become `true`: +/// +/// ``` +/// use crossbeam_utils::Backoff; +/// use std::sync::atomic::AtomicBool; +/// use std::sync::atomic::Ordering::SeqCst; +/// +/// fn spin_wait(ready: &AtomicBool) { +/// let backoff = Backoff::new(); +/// while !ready.load(SeqCst) { +/// backoff.snooze(); +/// } +/// } +/// ``` +/// +/// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait. +/// Note that whoever sets the atomic variable to `true` must notify the parked thread by calling +/// [`unpark()`]: +/// +/// ``` +/// use crossbeam_utils::Backoff; +/// use std::sync::atomic::AtomicBool; +/// use std::sync::atomic::Ordering::SeqCst; +/// use std::thread; +/// +/// fn blocking_wait(ready: &AtomicBool) { +/// let backoff = Backoff::new(); +/// while !ready.load(SeqCst) { +/// if backoff.is_completed() { +/// thread::park(); +/// } else { +/// backoff.snooze(); +/// } +/// } +/// } +/// ``` +/// +/// [`is_completed`]: Backoff::is_completed +/// [`std::thread::park()`]: std::thread::park +/// [`Condvar`]: std::sync::Condvar +/// [`AtomicBool`]: std::sync::atomic::AtomicBool +/// [`unpark()`]: std::thread::Thread::unpark +pub struct Backoff { + step: Cell, +} + +impl Backoff { + /// Creates a new `Backoff`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// + /// let backoff = Backoff::new(); + /// ``` + #[inline] + pub fn new() -> Self { + Backoff { step: Cell::new(0) } + } + + /// Resets the `Backoff`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// + /// let backoff = Backoff::new(); + /// backoff.reset(); + /// ``` + #[inline] + pub fn reset(&self) { + self.step.set(0); + } + + /// Backs off in a lock-free loop. + /// + /// This method should be used when we need to retry an operation because another thread made + /// progress. + /// + /// The processor may yield using the *YIELD* or *PAUSE* instruction. + /// + /// # Examples + /// + /// Backing off in a lock-free loop: + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// use std::sync::atomic::AtomicUsize; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize { + /// let backoff = Backoff::new(); + /// loop { + /// let val = a.load(SeqCst); + /// if a.compare_exchange(val, val.wrapping_mul(b), SeqCst, SeqCst).is_ok() { + /// return val; + /// } + /// backoff.spin(); + /// } + /// } + /// + /// let a = AtomicUsize::new(7); + /// assert_eq!(fetch_mul(&a, 8), 7); + /// assert_eq!(a.load(SeqCst), 56); + /// ``` + #[inline] + pub fn spin(&self) { + for _ in 0..1 << self.step.get().min(SPIN_LIMIT) { + hint::spin_loop(); + } + + if self.step.get() <= SPIN_LIMIT { + self.step.set(self.step.get() + 1); + } + } + + /// Backs off in a blocking loop. + /// + /// This method should be used when we need to wait for another thread to make progress. + /// + /// The processor may yield using the *YIELD* or *PAUSE* instruction and the current thread + /// may yield by giving up a timeslice to the OS scheduler. + /// + /// In `#[no_std]` environments, this method is equivalent to [`spin`]. + /// + /// If possible, use [`is_completed`] to check when it is advised to stop using backoff and + /// block the current thread using a different synchronization mechanism instead. + /// + /// [`spin`]: Backoff::spin + /// [`is_completed`]: Backoff::is_completed + /// + /// # Examples + /// + /// Waiting for an [`AtomicBool`] to become `true`: + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// use std::sync::Arc; + /// use std::sync::atomic::AtomicBool; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// fn spin_wait(ready: &AtomicBool) { + /// let backoff = Backoff::new(); + /// while !ready.load(SeqCst) { + /// backoff.snooze(); + /// } + /// } + /// + /// let ready = Arc::new(AtomicBool::new(false)); + /// let ready2 = ready.clone(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_millis(100)); + /// ready2.store(true, SeqCst); + /// }); + /// + /// assert_eq!(ready.load(SeqCst), false); + /// spin_wait(&ready); + /// assert_eq!(ready.load(SeqCst), true); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + /// + /// [`AtomicBool`]: std::sync::atomic::AtomicBool + #[inline] + pub fn snooze(&self) { + if self.step.get() <= SPIN_LIMIT { + for _ in 0..1 << self.step.get() { + hint::spin_loop(); + } + } else { + #[cfg(not(feature = "std"))] + for _ in 0..1 << self.step.get() { + hint::spin_loop(); + } + + #[cfg(feature = "std")] + ::std::thread::yield_now(); + } + + if self.step.get() <= YIELD_LIMIT { + self.step.set(self.step.get() + 1); + } + } + + /// Returns `true` if exponential backoff has completed and blocking the thread is advised. + /// + /// # Examples + /// + /// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait: + /// + /// ``` + /// use crossbeam_utils::Backoff; + /// use std::sync::Arc; + /// use std::sync::atomic::AtomicBool; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// fn blocking_wait(ready: &AtomicBool) { + /// let backoff = Backoff::new(); + /// while !ready.load(SeqCst) { + /// if backoff.is_completed() { + /// thread::park(); + /// } else { + /// backoff.snooze(); + /// } + /// } + /// } + /// + /// let ready = Arc::new(AtomicBool::new(false)); + /// let ready2 = ready.clone(); + /// let waiter = thread::current(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_millis(100)); + /// ready2.store(true, SeqCst); + /// waiter.unpark(); + /// }); + /// + /// assert_eq!(ready.load(SeqCst), false); + /// blocking_wait(&ready); + /// assert_eq!(ready.load(SeqCst), true); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + /// + /// [`AtomicBool`]: std::sync::atomic::AtomicBool + #[inline] + pub fn is_completed(&self) -> bool { + self.step.get() > YIELD_LIMIT + } +} + +impl fmt::Debug for Backoff { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Backoff") + .field("step", &self.step) + .field("is_completed", &self.is_completed()) + .finish() + } +} + +impl Default for Backoff { + fn default() -> Backoff { + Backoff::new() + } +} diff --git a/external/vendor/crossbeam-utils/src/cache_padded.rs b/external/vendor/crossbeam-utils/src/cache_padded.rs new file mode 100644 index 0000000000..6c930c6f3f --- /dev/null +++ b/external/vendor/crossbeam-utils/src/cache_padded.rs @@ -0,0 +1,217 @@ +use core::fmt; +use core::ops::{Deref, DerefMut}; + +/// Pads and aligns a value to the length of a cache line. +/// +/// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of +/// data are not placed into the same cache line. Updating an atomic value invalidates the whole +/// cache line it belongs to, which makes the next access to the same cache line slower for other +/// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other +/// cached data. +/// +/// # Size and alignment +/// +/// Cache lines are assumed to be N bytes long, depending on the architecture: +/// +/// * On x86-64, aarch64, and powerpc64, N = 128. +/// * On arm, mips, mips64, sparc, and hexagon, N = 32. +/// * On m68k, N = 16. +/// * On s390x, N = 256. +/// * On all others, N = 64. +/// +/// Note that N is just a reasonable guess and is not guaranteed to match the actual cache line +/// length of the machine the program is running on. On modern Intel architectures, spatial +/// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that +/// cache lines are 128 bytes long. +/// +/// The size of `CachePadded` is the smallest multiple of N bytes large enough to accommodate +/// a value of type `T`. +/// +/// The alignment of `CachePadded` is the maximum of N bytes and the alignment of `T`. +/// +/// # Examples +/// +/// Alignment and padding: +/// +/// ``` +/// use crossbeam_utils::CachePadded; +/// +/// let array = [CachePadded::new(1i8), CachePadded::new(2i8)]; +/// let addr1 = &*array[0] as *const i8 as usize; +/// let addr2 = &*array[1] as *const i8 as usize; +/// +/// assert!(addr2 - addr1 >= 32); +/// assert_eq!(addr1 % 32, 0); +/// assert_eq!(addr2 % 32, 0); +/// ``` +/// +/// When building a concurrent queue with a head and a tail index, it is wise to place them in +/// different cache lines so that concurrent threads pushing and popping elements don't invalidate +/// each other's cache lines: +/// +/// ``` +/// use crossbeam_utils::CachePadded; +/// use std::sync::atomic::AtomicUsize; +/// +/// struct Queue { +/// head: CachePadded, +/// tail: CachePadded, +/// buffer: *mut T, +/// } +/// ``` +#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] +// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache +// lines at a time, so we have to align to 128 bytes rather than 64. +// +// Sources: +// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf +// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 +// +// aarch64/arm64ec's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. +// +// Sources: +// - https://www.mono-project.com/news/2016/09/12/arm64-icache/ +// +// powerpc64 has 128-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/powerpc/include/asm/cache.h#L26 +#[cfg_attr( + any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "powerpc64", + ), + repr(align(128)) +)] +// arm, mips, mips64, sparc, and hexagon have 32-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12 +#[cfg_attr( + any( + target_arch = "arm", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "sparc", + target_arch = "hexagon", + ), + repr(align(32)) +)] +// m68k has 16-byte cache line size. +// +// Sources: +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9 +#[cfg_attr(target_arch = "m68k", repr(align(16)))] +// s390x has 256-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13 +#[cfg_attr(target_arch = "s390x", repr(align(256)))] +// x86, wasm, riscv, and sparc64 have 64-byte cache line size. +// +// Sources: +// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 +// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 +// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19 +// +// All others are assumed to have 64-byte cache line size. +#[cfg_attr( + not(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "powerpc64", + target_arch = "arm", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "sparc", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "s390x", + )), + repr(align(64)) +)] +pub struct CachePadded { + value: T, +} + +unsafe impl Send for CachePadded {} +unsafe impl Sync for CachePadded {} + +impl CachePadded { + /// Pads and aligns a value to the length of a cache line. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::CachePadded; + /// + /// let padded_value = CachePadded::new(1); + /// ``` + pub const fn new(t: T) -> CachePadded { + CachePadded:: { value: t } + } + + /// Returns the inner value. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::CachePadded; + /// + /// let padded_value = CachePadded::new(7); + /// let value = padded_value.into_inner(); + /// assert_eq!(value, 7); + /// ``` + pub fn into_inner(self) -> T { + self.value + } +} + +impl Deref for CachePadded { + type Target = T; + + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for CachePadded { + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl fmt::Debug for CachePadded { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CachePadded") + .field("value", &self.value) + .finish() + } +} + +impl From for CachePadded { + fn from(t: T) -> Self { + CachePadded::new(t) + } +} + +impl fmt::Display for CachePadded { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.value, f) + } +} diff --git a/external/vendor/crossbeam-utils/src/lib.rs b/external/vendor/crossbeam-utils/src/lib.rs new file mode 100644 index 0000000000..6f124f9700 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/lib.rs @@ -0,0 +1,110 @@ +//! Miscellaneous tools for concurrent programming. +//! +//! ## Atomics +//! +//! * [`AtomicCell`], a thread-safe mutable memory location. +//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering. +//! +//! ## Thread synchronization +//! +//! * [`Parker`], a thread parking primitive. +//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. +//! * [`WaitGroup`], for synchronizing the beginning or end of some computation. +//! +//! ## Utilities +//! +//! * [`Backoff`], for exponential backoff in spin loops. +//! * [`CachePadded`], for padding and aligning a value to the length of a cache line. +//! * [`scope`], for spawning threads that borrow local variables from the stack. +//! +//! [`AtomicCell`]: atomic::AtomicCell +//! [`AtomicConsume`]: atomic::AtomicConsume +//! [`Parker`]: sync::Parker +//! [`ShardedLock`]: sync::ShardedLock +//! [`WaitGroup`]: sync::WaitGroup +//! [`scope`]: thread::scope + +#![no_std] +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms), + allow(dead_code, unused_assignments, unused_variables) + ) +))] +#![warn( + missing_docs, + missing_debug_implementations, + rust_2018_idioms, + unreachable_pub +)] + +#[cfg(feature = "std")] +extern crate std; + +#[cfg(crossbeam_loom)] +#[allow(unused_imports)] +mod primitive { + pub(crate) mod hint { + pub(crate) use loom::hint::spin_loop; + } + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::{ + AtomicBool, AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, + AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering, + }; + + // FIXME: loom does not support compiler_fence at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // we use fence as a stand-in for compiler_fence for the time being. + // this may miss some races since fence is stronger than compiler_fence, + // but it's the best we can do for the time being. + pub(crate) use loom::sync::atomic::fence as compiler_fence; + } + pub(crate) use loom::sync::{Arc, Condvar, Mutex}; + } +} +#[cfg(not(crossbeam_loom))] +#[allow(unused_imports)] +mod primitive { + pub(crate) mod hint { + pub(crate) use core::hint::spin_loop; + } + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::{compiler_fence, Ordering}; + #[cfg(not(crossbeam_no_atomic))] + pub(crate) use core::sync::atomic::{ + AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicU16, AtomicU8, AtomicUsize, + }; + #[cfg(not(crossbeam_no_atomic))] + #[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] + pub(crate) use core::sync::atomic::{AtomicI32, AtomicU32}; + #[cfg(not(crossbeam_no_atomic))] + #[cfg(any( + target_has_atomic = "64", + not(any(target_pointer_width = "16", target_pointer_width = "32")), + ))] + pub(crate) use core::sync::atomic::{AtomicI64, AtomicU64}; + } + + #[cfg(feature = "std")] + pub(crate) use std::sync::{Arc, Condvar, Mutex}; + } +} + +pub mod atomic; + +mod cache_padded; +pub use crate::cache_padded::CachePadded; + +mod backoff; +pub use crate::backoff::Backoff; + +#[cfg(feature = "std")] +pub mod sync; + +#[cfg(feature = "std")] +#[cfg(not(crossbeam_loom))] +pub mod thread; diff --git a/external/vendor/crossbeam-utils/src/sync/mod.rs b/external/vendor/crossbeam-utils/src/sync/mod.rs new file mode 100644 index 0000000000..f9eec71fb3 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/mod.rs @@ -0,0 +1,17 @@ +//! Thread synchronization primitives. +//! +//! * [`Parker`], a thread parking primitive. +//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. +//! * [`WaitGroup`], for synchronizing the beginning or end of some computation. + +#[cfg(not(crossbeam_loom))] +mod once_lock; +mod parker; +#[cfg(not(crossbeam_loom))] +mod sharded_lock; +mod wait_group; + +pub use self::parker::{Parker, Unparker}; +#[cfg(not(crossbeam_loom))] +pub use self::sharded_lock::{ShardedLock, ShardedLockReadGuard, ShardedLockWriteGuard}; +pub use self::wait_group::WaitGroup; diff --git a/external/vendor/crossbeam-utils/src/sync/once_lock.rs b/external/vendor/crossbeam-utils/src/sync/once_lock.rs new file mode 100644 index 0000000000..e057aca7d5 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/once_lock.rs @@ -0,0 +1,88 @@ +// Based on unstable std::sync::OnceLock. +// +// Source: https://github.com/rust-lang/rust/blob/8e9c93df464b7ada3fc7a1c8ccddd9dcb24ee0a0/library/std/src/sync/once_lock.rs + +use core::cell::UnsafeCell; +use core::mem::MaybeUninit; +use std::sync::Once; + +pub(crate) struct OnceLock { + once: Once, + value: UnsafeCell>, + // Unlike std::sync::OnceLock, we don't need PhantomData here because + // we don't use #[may_dangle]. +} + +unsafe impl Sync for OnceLock {} +unsafe impl Send for OnceLock {} + +impl OnceLock { + /// Creates a new empty cell. + #[must_use] + pub(crate) const fn new() -> Self { + Self { + once: Once::new(), + value: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Gets the contents of the cell, initializing it with `f` if the cell + /// was empty. + /// + /// Many threads may call `get_or_init` concurrently with different + /// initializing functions, but it is guaranteed that only one function + /// will be executed. + /// + /// # Panics + /// + /// If `f` panics, the panic is propagated to the caller, and the cell + /// remains uninitialized. + /// + /// It is an error to reentrantly initialize the cell from `f`. The + /// exact outcome is unspecified. Current implementation deadlocks, but + /// this may be changed to a panic in the future. + pub(crate) fn get_or_init(&self, f: F) -> &T + where + F: FnOnce() -> T, + { + // Fast path check + if self.once.is_completed() { + // SAFETY: The inner value has been initialized + return unsafe { self.get_unchecked() }; + } + self.initialize(f); + + // SAFETY: The inner value has been initialized + unsafe { self.get_unchecked() } + } + + #[cold] + fn initialize(&self, f: F) + where + F: FnOnce() -> T, + { + let slot = self.value.get(); + + self.once.call_once(|| { + let value = f(); + unsafe { slot.write(MaybeUninit::new(value)) } + }); + } + + /// # Safety + /// + /// The value must be initialized + unsafe fn get_unchecked(&self) -> &T { + debug_assert!(self.once.is_completed()); + &*self.value.get().cast::() + } +} + +impl Drop for OnceLock { + fn drop(&mut self) { + if self.once.is_completed() { + // SAFETY: The inner value has been initialized + unsafe { (*self.value.get()).assume_init_drop() }; + } + } +} diff --git a/external/vendor/crossbeam-utils/src/sync/parker.rs b/external/vendor/crossbeam-utils/src/sync/parker.rs new file mode 100644 index 0000000000..971981d2b7 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/parker.rs @@ -0,0 +1,415 @@ +use crate::primitive::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use crate::primitive::sync::{Arc, Condvar, Mutex}; +use std::fmt; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; + +/// A thread parking primitive. +/// +/// Conceptually, each `Parker` has an associated token which is initially not present: +/// +/// * The [`park`] method blocks the current thread unless or until the token is available, at +/// which point it automatically consumes the token. +/// +/// * The [`park_timeout`] and [`park_deadline`] methods work the same as [`park`], but block for +/// a specified maximum time. +/// +/// * The [`unpark`] method atomically makes the token available if it wasn't already. Because the +/// token is initially absent, [`unpark`] followed by [`park`] will result in the second call +/// returning immediately. +/// +/// In other words, each `Parker` acts a bit like a spinlock that can be locked and unlocked using +/// [`park`] and [`unpark`]. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_utils::sync::Parker; +/// +/// let p = Parker::new(); +/// let u = p.unparker().clone(); +/// +/// // Make the token available. +/// u.unpark(); +/// // Wakes up immediately and consumes the token. +/// p.park(); +/// +/// thread::spawn(move || { +/// thread::sleep(Duration::from_millis(500)); +/// u.unpark(); +/// }); +/// +/// // Wakes up when `u.unpark()` provides the token. +/// p.park(); +/// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +/// ``` +/// +/// [`park`]: Parker::park +/// [`park_timeout`]: Parker::park_timeout +/// [`park_deadline`]: Parker::park_deadline +/// [`unpark`]: Unparker::unpark +pub struct Parker { + unparker: Unparker, + _marker: PhantomData<*const ()>, +} + +unsafe impl Send for Parker {} + +impl Default for Parker { + fn default() -> Self { + Self { + unparker: Unparker { + inner: Arc::new(Inner { + state: AtomicUsize::new(EMPTY), + lock: Mutex::new(()), + cvar: Condvar::new(), + }), + }, + _marker: PhantomData, + } + } +} + +impl Parker { + /// Creates a new `Parker`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// ``` + /// + pub fn new() -> Parker { + Self::default() + } + + /// Blocks the current thread until the token is made available. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// // Make the token available. + /// u.unpark(); + /// + /// // Wakes up immediately and consumes the token. + /// p.park(); + /// ``` + pub fn park(&self) { + self.unparker.inner.park(None); + } + + /// Blocks the current thread until the token is made available, but only for a limited time. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// + /// // Waits for the token to become available, but will not wait longer than 500 ms. + /// p.park_timeout(Duration::from_millis(500)); + /// ``` + pub fn park_timeout(&self, timeout: Duration) { + match Instant::now().checked_add(timeout) { + Some(deadline) => self.park_deadline(deadline), + None => self.park(), + } + } + + /// Blocks the current thread until the token is made available, or until a certain deadline. + /// + /// # Examples + /// + /// ``` + /// use std::time::{Duration, Instant}; + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let deadline = Instant::now() + Duration::from_millis(500); + /// + /// // Waits for the token to become available, but will not wait longer than 500 ms. + /// p.park_deadline(deadline); + /// ``` + pub fn park_deadline(&self, deadline: Instant) { + self.unparker.inner.park(Some(deadline)) + } + + /// Returns a reference to an associated [`Unparker`]. + /// + /// The returned [`Unparker`] doesn't have to be used by reference - it can also be cloned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// // Make the token available. + /// u.unpark(); + /// // Wakes up immediately and consumes the token. + /// p.park(); + /// ``` + /// + /// [`park`]: Parker::park + /// [`park_timeout`]: Parker::park_timeout + pub fn unparker(&self) -> &Unparker { + &self.unparker + } + + /// Converts a `Parker` into a raw pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let raw = Parker::into_raw(p); + /// # let _ = unsafe { Parker::from_raw(raw) }; + /// ``` + pub fn into_raw(this: Parker) -> *const () { + Unparker::into_raw(this.unparker) + } + + /// Converts a raw pointer into a `Parker`. + /// + /// # Safety + /// + /// This method is safe to use only with pointers returned by [`Parker::into_raw`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let raw = Parker::into_raw(p); + /// let p = unsafe { Parker::from_raw(raw) }; + /// ``` + pub unsafe fn from_raw(ptr: *const ()) -> Parker { + Parker { + unparker: Unparker::from_raw(ptr), + _marker: PhantomData, + } + } +} + +impl fmt::Debug for Parker { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Parker { .. }") + } +} + +/// Unparks a thread parked by the associated [`Parker`]. +pub struct Unparker { + inner: Arc, +} + +unsafe impl Send for Unparker {} +unsafe impl Sync for Unparker {} + +impl Unparker { + /// Atomically makes the token available if it is not already. + /// + /// This method will wake up the thread blocked on [`park`] or [`park_timeout`], if there is + /// any. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_utils::sync::Parker; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_millis(500)); + /// u.unpark(); + /// }); + /// + /// // Wakes up when `u.unpark()` provides the token. + /// p.park(); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + /// + /// [`park`]: Parker::park + /// [`park_timeout`]: Parker::park_timeout + pub fn unpark(&self) { + self.inner.unpark() + } + + /// Converts an `Unparker` into a raw pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::{Parker, Unparker}; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// let raw = Unparker::into_raw(u); + /// # let _ = unsafe { Unparker::from_raw(raw) }; + /// ``` + pub fn into_raw(this: Unparker) -> *const () { + Arc::into_raw(this.inner).cast::<()>() + } + + /// Converts a raw pointer into an `Unparker`. + /// + /// # Safety + /// + /// This method is safe to use only with pointers returned by [`Unparker::into_raw`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::{Parker, Unparker}; + /// + /// let p = Parker::new(); + /// let u = p.unparker().clone(); + /// + /// let raw = Unparker::into_raw(u); + /// let u = unsafe { Unparker::from_raw(raw) }; + /// ``` + pub unsafe fn from_raw(ptr: *const ()) -> Unparker { + Unparker { + inner: Arc::from_raw(ptr.cast::()), + } + } +} + +impl fmt::Debug for Unparker { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Unparker { .. }") + } +} + +impl Clone for Unparker { + fn clone(&self) -> Unparker { + Unparker { + inner: self.inner.clone(), + } + } +} + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +struct Inner { + state: AtomicUsize, + lock: Mutex<()>, + cvar: Condvar, +} + +impl Inner { + fn park(&self, deadline: Option) { + // If we were previously notified then we consume this notification and return quickly. + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + return; + } + + // If the timeout is zero, then there is no need to actually block. + if let Some(deadline) = deadline { + if deadline <= Instant::now() { + return; + } + } + + // Otherwise we need to coordinate going to sleep. + let mut m = self.lock.lock().unwrap(); + + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + // Consume this notification to avoid spurious wakeups in the next park. + Err(NOTIFIED) => { + // We must read `state` here, even though we know it will be `NOTIFIED`. This is + // because `unpark` may have been called again since we read `NOTIFIED` in the + // `compare_exchange` above. We must perform an acquire operation that synchronizes + // with that `unpark` to observe any writes it made before the call to `unpark`. To + // do that we must read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } + Err(n) => panic!("inconsistent park_timeout state: {}", n), + } + + loop { + // Block the current thread on the conditional variable. + m = match deadline { + None => self.cvar.wait(m).unwrap(), + Some(deadline) => { + let now = Instant::now(); + if now < deadline { + // We could check for a timeout here, in the return value of wait_timeout, + // but in the case that a timeout and an unpark arrive simultaneously, we + // prefer to report the former. + self.cvar.wait_timeout(m, deadline - now).unwrap().0 + } else { + // We've timed out; swap out the state back to empty on our way out + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED | PARKED => return, + n => panic!("inconsistent park_timeout state: {}", n), + }; + } + } + }; + + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + // got a notification + return; + } + + // Spurious wakeup, go back to sleep. Alternatively, if we timed out, it will be caught + // in the branch above, when we discover the deadline is in the past + } + } + + pub(crate) fn unpark(&self) { + // To ensure the unparked thread will observe any writes we made before this call, we must + // perform a release operation that `park` can synchronize with. To do that we must write + // `NOTIFIED` even if `state` is already `NOTIFIED`. That is why this must be a swap rather + // than a compare-and-swap that returns if it reads `NOTIFIED` on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, // no one was waiting + NOTIFIED => return, // already unparked + PARKED => {} // gotta go wake someone up + _ => panic!("inconsistent state in unpark"), + } + + // There is a period between when the parked thread sets `state` to `PARKED` (or last + // checked `state` in the case of a spurious wakeup) and when it actually waits on `cvar`. + // If we were to notify during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has `lock` locked at this + // stage so we can acquire `lock` to wait until it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the parked thread wakes + // it doesn't get woken only to have to wait for us to release `lock`. + drop(self.lock.lock().unwrap()); + self.cvar.notify_one(); + } +} diff --git a/external/vendor/crossbeam-utils/src/sync/sharded_lock.rs b/external/vendor/crossbeam-utils/src/sync/sharded_lock.rs new file mode 100644 index 0000000000..629b97598e --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/sharded_lock.rs @@ -0,0 +1,638 @@ +use std::boxed::Box; +use std::cell::UnsafeCell; +use std::collections::HashMap; +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::sync::{LockResult, PoisonError, TryLockError, TryLockResult}; +use std::sync::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use std::thread::{self, ThreadId}; +use std::vec::Vec; + +use crate::sync::once_lock::OnceLock; +use crate::CachePadded; + +/// The number of shards per sharded lock. Must be a power of two. +const NUM_SHARDS: usize = 8; + +/// A shard containing a single reader-writer lock. +struct Shard { + /// The inner reader-writer lock. + lock: RwLock<()>, + + /// The write-guard keeping this shard locked. + /// + /// Write operations will lock each shard and store the guard here. These guards get dropped at + /// the same time the big guard is dropped. + write_guard: UnsafeCell>>, +} + +/// A sharded reader-writer lock. +/// +/// This lock is equivalent to [`RwLock`], except read operations are faster and write operations +/// are slower. +/// +/// A `ShardedLock` is internally made of a list of *shards*, each being a [`RwLock`] occupying a +/// single cache line. Read operations will pick one of the shards depending on the current thread +/// and lock it. Write operations need to lock all shards in succession. +/// +/// By splitting the lock into shards, concurrent read operations will in most cases choose +/// different shards and thus update different cache lines, which is good for scalability. However, +/// write operations need to do more work and are therefore slower than usual. +/// +/// The priority policy of the lock is dependent on the underlying operating system's +/// implementation, and this type does not guarantee that any particular policy will be used. +/// +/// # Poisoning +/// +/// A `ShardedLock`, like [`RwLock`], will become poisoned on a panic. Note that it may only be +/// poisoned if a panic occurs while a write operation is in progress. If a panic occurs in any +/// read operation, the lock will not be poisoned. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::sync::ShardedLock; +/// +/// let lock = ShardedLock::new(5); +/// +/// // Any number of read locks can be held at once. +/// { +/// let r1 = lock.read().unwrap(); +/// let r2 = lock.read().unwrap(); +/// assert_eq!(*r1, 5); +/// assert_eq!(*r2, 5); +/// } // Read locks are dropped at this point. +/// +/// // However, only one write lock may be held. +/// { +/// let mut w = lock.write().unwrap(); +/// *w += 1; +/// assert_eq!(*w, 6); +/// } // Write lock is dropped here. +/// ``` +/// +/// [`RwLock`]: std::sync::RwLock +pub struct ShardedLock { + /// A list of locks protecting the internal data. + shards: Box<[CachePadded]>, + + /// The internal data. + value: UnsafeCell, +} + +unsafe impl Send for ShardedLock {} +unsafe impl Sync for ShardedLock {} + +impl UnwindSafe for ShardedLock {} +impl RefUnwindSafe for ShardedLock {} + +impl ShardedLock { + /// Creates a new sharded reader-writer lock. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(5); + /// ``` + pub fn new(value: T) -> ShardedLock { + ShardedLock { + shards: (0..NUM_SHARDS) + .map(|_| { + CachePadded::new(Shard { + lock: RwLock::new(()), + write_guard: UnsafeCell::new(None), + }) + }) + .collect::>(), + value: UnsafeCell::new(value), + } + } + + /// Consumes this lock, returning the underlying data. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(String::new()); + /// { + /// let mut s = lock.write().unwrap(); + /// *s = "modified".to_owned(); + /// } + /// assert_eq!(lock.into_inner().unwrap(), "modified"); + /// ``` + pub fn into_inner(self) -> LockResult { + let is_poisoned = self.is_poisoned(); + let inner = self.value.into_inner(); + + if is_poisoned { + Err(PoisonError::new(inner)) + } else { + Ok(inner) + } + } +} + +impl ShardedLock { + /// Returns `true` if the lock is poisoned. + /// + /// If another thread can still access the lock, it may become poisoned at any time. A `false` + /// result should not be trusted without additional synchronization. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// use std::sync::Arc; + /// use std::thread; + /// + /// let lock = Arc::new(ShardedLock::new(0)); + /// let c_lock = lock.clone(); + /// + /// let _ = thread::spawn(move || { + /// let _lock = c_lock.write().unwrap(); + /// panic!(); // the lock gets poisoned + /// }).join(); + /// assert_eq!(lock.is_poisoned(), true); + /// ``` + pub fn is_poisoned(&self) -> bool { + self.shards[0].lock.is_poisoned() + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the lock mutably, no actual locking needs to take place. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let mut lock = ShardedLock::new(0); + /// *lock.get_mut().unwrap() = 10; + /// assert_eq!(*lock.read().unwrap(), 10); + /// ``` + pub fn get_mut(&mut self) -> LockResult<&mut T> { + let is_poisoned = self.is_poisoned(); + let inner = unsafe { &mut *self.value.get() }; + + if is_poisoned { + Err(PoisonError::new(inner)) + } else { + Ok(inner) + } + } + + /// Attempts to acquire this lock with shared read access. + /// + /// If the access could not be granted at this time, an error is returned. Otherwise, a guard + /// is returned which will release the shared access when it is dropped. This method does not + /// provide any guarantees with respect to the ordering of whether contentious readers or + /// writers will acquire the lock first. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(1); + /// + /// match lock.try_read() { + /// Ok(n) => assert_eq!(*n, 1), + /// Err(_) => unreachable!(), + /// }; + /// ``` + pub fn try_read(&self) -> TryLockResult> { + // Take the current thread index and map it to a shard index. Thread indices will tend to + // distribute shards among threads equally, thus reducing contention due to read-locking. + let current_index = current_index().unwrap_or(0); + let shard_index = current_index & (self.shards.len() - 1); + + match self.shards[shard_index].lock.try_read() { + Ok(guard) => Ok(ShardedLockReadGuard { + lock: self, + _guard: guard, + _marker: PhantomData, + }), + Err(TryLockError::Poisoned(err)) => { + let guard = ShardedLockReadGuard { + lock: self, + _guard: err.into_inner(), + _marker: PhantomData, + }; + Err(TryLockError::Poisoned(PoisonError::new(guard))) + } + Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock), + } + } + + /// Locks with shared read access, blocking the current thread until it can be acquired. + /// + /// The calling thread will be blocked until there are no more writers which hold the lock. + /// There may be other readers currently inside the lock when this method returns. This method + /// does not provide any guarantees with respect to the ordering of whether contentious readers + /// or writers will acquire the lock first. + /// + /// Returns a guard which will release the shared access when dropped. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Panics + /// + /// This method might panic when called if the lock is already held by the current thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// use std::sync::Arc; + /// use std::thread; + /// + /// let lock = Arc::new(ShardedLock::new(1)); + /// let c_lock = lock.clone(); + /// + /// let n = lock.read().unwrap(); + /// assert_eq!(*n, 1); + /// + /// thread::spawn(move || { + /// let r = c_lock.read(); + /// assert!(r.is_ok()); + /// }).join().unwrap(); + /// ``` + pub fn read(&self) -> LockResult> { + // Take the current thread index and map it to a shard index. Thread indices will tend to + // distribute shards among threads equally, thus reducing contention due to read-locking. + let current_index = current_index().unwrap_or(0); + let shard_index = current_index & (self.shards.len() - 1); + + match self.shards[shard_index].lock.read() { + Ok(guard) => Ok(ShardedLockReadGuard { + lock: self, + _guard: guard, + _marker: PhantomData, + }), + Err(err) => Err(PoisonError::new(ShardedLockReadGuard { + lock: self, + _guard: err.into_inner(), + _marker: PhantomData, + })), + } + } + + /// Attempts to acquire this lock with exclusive write access. + /// + /// If the access could not be granted at this time, an error is returned. Otherwise, a guard + /// is returned which will release the exclusive access when it is dropped. This method does + /// not provide any guarantees with respect to the ordering of whether contentious readers or + /// writers will acquire the lock first. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(1); + /// + /// let n = lock.read().unwrap(); + /// assert_eq!(*n, 1); + /// + /// assert!(lock.try_write().is_err()); + /// ``` + pub fn try_write(&self) -> TryLockResult> { + let mut poisoned = false; + let mut blocked = None; + + // Write-lock each shard in succession. + for (i, shard) in self.shards.iter().enumerate() { + let guard = match shard.lock.try_write() { + Ok(guard) => guard, + Err(TryLockError::Poisoned(err)) => { + poisoned = true; + err.into_inner() + } + Err(TryLockError::WouldBlock) => { + blocked = Some(i); + break; + } + }; + + // Store the guard into the shard. + unsafe { + let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); + let dest: *mut _ = shard.write_guard.get(); + *dest = Some(guard); + } + } + + if let Some(i) = blocked { + // Unlock the shards in reverse order of locking. + for shard in self.shards[0..i].iter().rev() { + unsafe { + let dest: *mut _ = shard.write_guard.get(); + let guard = (*dest).take(); + drop(guard); + } + } + Err(TryLockError::WouldBlock) + } else if poisoned { + let guard = ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }; + Err(TryLockError::Poisoned(PoisonError::new(guard))) + } else { + Ok(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }) + } + } + + /// Locks with exclusive write access, blocking the current thread until it can be acquired. + /// + /// The calling thread will be blocked until there are no more writers which hold the lock. + /// There may be other readers currently inside the lock when this method returns. This method + /// does not provide any guarantees with respect to the ordering of whether contentious readers + /// or writers will acquire the lock first. + /// + /// Returns a guard which will release the exclusive access when dropped. + /// + /// # Errors + /// + /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write + /// operation panics. + /// + /// # Panics + /// + /// This method might panic when called if the lock is already held by the current thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::ShardedLock; + /// + /// let lock = ShardedLock::new(1); + /// + /// let mut n = lock.write().unwrap(); + /// *n = 2; + /// + /// assert!(lock.try_read().is_err()); + /// ``` + pub fn write(&self) -> LockResult> { + let mut poisoned = false; + + // Write-lock each shard in succession. + for shard in self.shards.iter() { + let guard = match shard.lock.write() { + Ok(guard) => guard, + Err(err) => { + poisoned = true; + err.into_inner() + } + }; + + // Store the guard into the shard. + unsafe { + let guard: RwLockWriteGuard<'_, ()> = guard; + let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); + let dest: *mut _ = shard.write_guard.get(); + *dest = Some(guard); + } + } + + if poisoned { + Err(PoisonError::new(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + })) + } else { + Ok(ShardedLockWriteGuard { + lock: self, + _marker: PhantomData, + }) + } + } +} + +impl fmt::Debug for ShardedLock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_read() { + Ok(guard) => f + .debug_struct("ShardedLock") + .field("data", &&*guard) + .finish(), + Err(TryLockError::Poisoned(err)) => f + .debug_struct("ShardedLock") + .field("data", &&**err.get_ref()) + .finish(), + Err(TryLockError::WouldBlock) => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("") + } + } + f.debug_struct("ShardedLock") + .field("data", &LockedPlaceholder) + .finish() + } + } + } +} + +impl Default for ShardedLock { + fn default() -> ShardedLock { + ShardedLock::new(Default::default()) + } +} + +impl From for ShardedLock { + fn from(t: T) -> Self { + ShardedLock::new(t) + } +} + +/// A guard used to release the shared read access of a [`ShardedLock`] when dropped. +#[clippy::has_significant_drop] +pub struct ShardedLockReadGuard<'a, T: ?Sized> { + lock: &'a ShardedLock, + _guard: RwLockReadGuard<'a, ()>, + _marker: PhantomData>, +} + +unsafe impl Sync for ShardedLockReadGuard<'_, T> {} + +impl Deref for ShardedLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.value.get() } + } +} + +impl fmt::Debug for ShardedLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ShardedLockReadGuard") + .field("lock", &self.lock) + .finish() + } +} + +impl fmt::Display for ShardedLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +/// A guard used to release the exclusive write access of a [`ShardedLock`] when dropped. +#[clippy::has_significant_drop] +pub struct ShardedLockWriteGuard<'a, T: ?Sized> { + lock: &'a ShardedLock, + _marker: PhantomData>, +} + +unsafe impl Sync for ShardedLockWriteGuard<'_, T> {} + +impl Drop for ShardedLockWriteGuard<'_, T> { + fn drop(&mut self) { + // Unlock the shards in reverse order of locking. + for shard in self.lock.shards.iter().rev() { + unsafe { + let dest: *mut _ = shard.write_guard.get(); + let guard = (*dest).take(); + drop(guard); + } + } + } +} + +impl fmt::Debug for ShardedLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ShardedLockWriteGuard") + .field("lock", &self.lock) + .finish() + } +} + +impl fmt::Display for ShardedLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl Deref for ShardedLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.value.get() } + } +} + +impl DerefMut for ShardedLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.value.get() } + } +} + +/// Returns a `usize` that identifies the current thread. +/// +/// Each thread is associated with an 'index'. While there are no particular guarantees, indices +/// usually tend to be consecutive numbers between 0 and the number of running threads. +/// +/// Since this function accesses TLS, `None` might be returned if the current thread's TLS is +/// tearing down. +#[inline] +fn current_index() -> Option { + REGISTRATION.try_with(|reg| reg.index).ok() +} + +/// The global registry keeping track of registered threads and indices. +struct ThreadIndices { + /// Mapping from `ThreadId` to thread index. + mapping: HashMap, + + /// A list of free indices. + free_list: Vec, + + /// The next index to allocate if the free list is empty. + next_index: usize, +} + +fn thread_indices() -> &'static Mutex { + static THREAD_INDICES: OnceLock> = OnceLock::new(); + fn init() -> Mutex { + Mutex::new(ThreadIndices { + mapping: HashMap::new(), + free_list: Vec::new(), + next_index: 0, + }) + } + THREAD_INDICES.get_or_init(init) +} + +/// A registration of a thread with an index. +/// +/// When dropped, unregisters the thread and frees the reserved index. +struct Registration { + index: usize, + thread_id: ThreadId, +} + +impl Drop for Registration { + fn drop(&mut self) { + let mut indices = thread_indices().lock().unwrap(); + indices.mapping.remove(&self.thread_id); + indices.free_list.push(self.index); + } +} + +std::thread_local! { + static REGISTRATION: Registration = { + let thread_id = thread::current().id(); + let mut indices = thread_indices().lock().unwrap(); + + let index = match indices.free_list.pop() { + Some(i) => i, + None => { + let i = indices.next_index; + indices.next_index += 1; + i + } + }; + indices.mapping.insert(thread_id, index); + + Registration { + index, + thread_id, + } + }; +} diff --git a/external/vendor/crossbeam-utils/src/sync/wait_group.rs b/external/vendor/crossbeam-utils/src/sync/wait_group.rs new file mode 100644 index 0000000000..19d6074157 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/sync/wait_group.rs @@ -0,0 +1,145 @@ +use crate::primitive::sync::{Arc, Condvar, Mutex}; +use std::fmt; + +/// Enables threads to synchronize the beginning or end of some computation. +/// +/// # Wait groups vs barriers +/// +/// `WaitGroup` is very similar to [`Barrier`], but there are a few differences: +/// +/// * [`Barrier`] needs to know the number of threads at construction, while `WaitGroup` is cloned to +/// register more threads. +/// +/// * A [`Barrier`] can be reused even after all threads have synchronized, while a `WaitGroup` +/// synchronizes threads only once. +/// +/// * All threads wait for others to reach the [`Barrier`]. With `WaitGroup`, each thread can choose +/// to either wait for other threads or to continue without blocking. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::sync::WaitGroup; +/// use std::thread; +/// +/// // Create a new wait group. +/// let wg = WaitGroup::new(); +/// +/// for _ in 0..4 { +/// // Create another reference to the wait group. +/// let wg = wg.clone(); +/// +/// thread::spawn(move || { +/// // Do some work. +/// +/// // Drop the reference to the wait group. +/// drop(wg); +/// }); +/// } +/// +/// // Block until all threads have finished their work. +/// wg.wait(); +/// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +/// ``` +/// +/// [`Barrier`]: std::sync::Barrier +pub struct WaitGroup { + inner: Arc, +} + +/// Inner state of a `WaitGroup`. +struct Inner { + cvar: Condvar, + count: Mutex, +} + +impl Default for WaitGroup { + fn default() -> Self { + Self { + inner: Arc::new(Inner { + cvar: Condvar::new(), + count: Mutex::new(1), + }), + } + } +} + +impl WaitGroup { + /// Creates a new wait group and returns the single reference to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::WaitGroup; + /// + /// let wg = WaitGroup::new(); + /// ``` + pub fn new() -> Self { + Self::default() + } + + /// Drops this reference and waits until all other references are dropped. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::sync::WaitGroup; + /// use std::thread; + /// + /// let wg = WaitGroup::new(); + /// + /// thread::spawn({ + /// let wg = wg.clone(); + /// move || { + /// // Block until both threads have reached `wait()`. + /// wg.wait(); + /// } + /// }); + /// + /// // Block until both threads have reached `wait()`. + /// wg.wait(); + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + pub fn wait(self) { + if *self.inner.count.lock().unwrap() == 1 { + return; + } + + let inner = self.inner.clone(); + drop(self); + + let mut count = inner.count.lock().unwrap(); + while *count > 0 { + count = inner.cvar.wait(count).unwrap(); + } + } +} + +impl Drop for WaitGroup { + fn drop(&mut self) { + let mut count = self.inner.count.lock().unwrap(); + *count -= 1; + + if *count == 0 { + self.inner.cvar.notify_all(); + } + } +} + +impl Clone for WaitGroup { + fn clone(&self) -> WaitGroup { + let mut count = self.inner.count.lock().unwrap(); + *count += 1; + + WaitGroup { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for WaitGroup { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let count: &usize = &*self.inner.count.lock().unwrap(); + f.debug_struct("WaitGroup").field("count", count).finish() + } +} diff --git a/external/vendor/crossbeam-utils/src/thread.rs b/external/vendor/crossbeam-utils/src/thread.rs new file mode 100644 index 0000000000..847f4cf112 --- /dev/null +++ b/external/vendor/crossbeam-utils/src/thread.rs @@ -0,0 +1,611 @@ +//! Threads that can borrow variables from the stack. +//! +//! Create a scope when spawned threads need to access variables on the stack: +//! +//! ``` +//! use crossbeam_utils::thread; +//! +//! let people = vec![ +//! "Alice".to_string(), +//! "Bob".to_string(), +//! "Carol".to_string(), +//! ]; +//! +//! thread::scope(|s| { +//! for person in &people { +//! s.spawn(move |_| { +//! println!("Hello, {}!", person); +//! }); +//! } +//! }).unwrap(); +//! ``` +//! +//! # Why scoped threads? +//! +//! Suppose we wanted to re-write the previous example using plain threads: +//! +//! ```compile_fail,E0597 +//! use std::thread; +//! +//! let people = vec![ +//! "Alice".to_string(), +//! "Bob".to_string(), +//! "Carol".to_string(), +//! ]; +//! +//! let mut threads = Vec::new(); +//! +//! for person in &people { +//! threads.push(thread::spawn(move || { +//! println!("Hello, {}!", person); +//! })); +//! } +//! +//! for thread in threads { +//! thread.join().unwrap(); +//! } +//! ``` +//! +//! This doesn't work because the borrow checker complains about `people` not living long enough: +//! +//! ```text +//! error[E0597]: `people` does not live long enough +//! --> src/main.rs:12:20 +//! | +//! 12 | for person in &people { +//! | ^^^^^^ borrowed value does not live long enough +//! ... +//! 21 | } +//! | - borrowed value only lives until here +//! | +//! = note: borrowed value must be valid for the static lifetime... +//! ``` +//! +//! The problem here is that spawned threads are not allowed to borrow variables on stack because +//! the compiler cannot prove they will be joined before `people` is destroyed. +//! +//! Scoped threads are a mechanism to guarantee to the compiler that spawned threads will be joined +//! before the scope ends. +//! +//! # How scoped threads work +//! +//! If a variable is borrowed by a thread, the thread must complete before the variable is +//! destroyed. Threads spawned using [`std::thread::spawn`] can only borrow variables with the +//! `'static` lifetime because the borrow checker cannot be sure when the thread will complete. +//! +//! A scope creates a clear boundary between variables outside the scope and threads inside the +//! scope. Whenever a scope spawns a thread, it promises to join the thread before the scope ends. +//! This way we guarantee to the borrow checker that scoped threads only live within the scope and +//! can safely access variables outside it. +//! +//! # Nesting scoped threads +//! +//! Sometimes scoped threads need to spawn more threads within the same scope. This is a little +//! tricky because argument `s` lives *inside* the invocation of `thread::scope()` and as such +//! cannot be borrowed by scoped threads: +//! +//! ```compile_fail,E0521 +//! use crossbeam_utils::thread; +//! +//! thread::scope(|s| { +//! s.spawn(|_| { +//! // Not going to compile because we're trying to borrow `s`, +//! // which lives *inside* the scope! :( +//! s.spawn(|_| println!("nested thread")); +//! }); +//! }); +//! ``` +//! +//! Fortunately, there is a solution. Every scoped thread is passed a reference to its scope as an +//! argument, which can be used for spawning nested threads: +//! +//! ``` +//! use crossbeam_utils::thread; +//! +//! thread::scope(|s| { +//! // Note the `|s|` here. +//! s.spawn(|s| { +//! // Yay, this works because we're using a fresh argument `s`! :) +//! s.spawn(|_| println!("nested thread")); +//! }); +//! }).unwrap(); +//! ``` + +use std::boxed::Box; +use std::fmt; +use std::io; +use std::marker::PhantomData; +use std::mem; +use std::panic; +use std::string::String; +use std::sync::{Arc, Mutex}; +use std::thread; +use std::vec::Vec; + +use crate::sync::WaitGroup; + +type SharedVec = Arc>>; +type SharedOption = Arc>>; + +/// Creates a new scope for spawning threads. +/// +/// All child threads that haven't been manually joined will be automatically joined just before +/// this function invocation ends. If all joined threads have successfully completed, `Ok` is +/// returned with the return value of `f`. If any of the joined threads has panicked, an `Err` is +/// returned containing errors from panicked threads. Note that if panics are implemented by +/// aborting the process, no error is returned; see the notes of [std::panic::catch_unwind]. +/// +/// **Note:** Since Rust 1.63, this function is soft-deprecated in favor of the more efficient [`std::thread::scope`]. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::thread; +/// +/// let var = vec![1, 2, 3]; +/// +/// thread::scope(|s| { +/// s.spawn(|_| { +/// println!("A child thread borrowing `var`: {:?}", var); +/// }); +/// }).unwrap(); +/// ``` +pub fn scope<'env, F, R>(f: F) -> thread::Result +where + F: FnOnce(&Scope<'env>) -> R, +{ + struct AbortOnPanic; + impl Drop for AbortOnPanic { + fn drop(&mut self) { + if thread::panicking() { + std::process::abort(); + } + } + } + + let wg = WaitGroup::new(); + let scope = Scope::<'env> { + handles: SharedVec::default(), + wait_group: wg.clone(), + _marker: PhantomData, + }; + + // Execute the scoped function, but catch any panics. + let result = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&scope))); + + // If an unwinding panic occurs before all threads are joined + // promote it to an aborting panic to prevent any threads from escaping the scope. + let guard = AbortOnPanic; + + // Wait until all nested scopes are dropped. + drop(scope.wait_group); + wg.wait(); + + // Join all remaining spawned threads. + let panics: Vec<_> = scope + .handles + .lock() + .unwrap() + // Filter handles that haven't been joined, join them, and collect errors. + .drain(..) + .filter_map(|handle| handle.lock().unwrap().take()) + .filter_map(|handle| handle.join().err()) + .collect(); + + mem::forget(guard); + + // If `f` has panicked, resume unwinding. + // If any of the child threads have panicked, return the panic errors. + // Otherwise, everything is OK and return the result of `f`. + match result { + Err(err) => panic::resume_unwind(err), + Ok(res) => { + if panics.is_empty() { + Ok(res) + } else { + Err(Box::new(panics)) + } + } + } +} + +/// A scope for spawning threads. +pub struct Scope<'env> { + /// The list of the thread join handles. + handles: SharedVec>>, + + /// Used to wait until all subscopes all dropped. + wait_group: WaitGroup, + + /// Borrows data with invariant lifetime `'env`. + _marker: PhantomData<&'env mut &'env ()>, +} + +unsafe impl Sync for Scope<'_> {} + +impl<'env> Scope<'env> { + /// Spawns a scoped thread. + /// + /// This method is similar to the [`spawn`] function in Rust's standard library. The difference + /// is that this thread is scoped, meaning it's guaranteed to terminate before the scope exits, + /// allowing it to reference variables outside the scope. + /// + /// The scoped thread is passed a reference to this scope as an argument, which can be used for + /// spawning nested threads. + /// + /// The returned [handle](ScopedJoinHandle) can be used to manually + /// [join](ScopedJoinHandle::join) the thread before the scope exits. + /// + /// This will create a thread using default parameters of [`ScopedThreadBuilder`], if you want to specify the + /// stack size or the name of the thread, use this API instead. + /// + /// [`spawn`]: std::thread::spawn + /// + /// # Panics + /// + /// Panics if the OS fails to create a thread; use [`ScopedThreadBuilder::spawn`] + /// to recover from such errors. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle = s.spawn(|_| { + /// println!("A child thread is running"); + /// 42 + /// }); + /// + /// // Join the thread and retrieve its result. + /// let res = handle.join().unwrap(); + /// assert_eq!(res, 42); + /// }).unwrap(); + /// ``` + pub fn spawn<'scope, F, T>(&'scope self, f: F) -> ScopedJoinHandle<'scope, T> + where + F: FnOnce(&Scope<'env>) -> T, + F: Send + 'env, + T: Send + 'env, + { + self.builder() + .spawn(f) + .expect("failed to spawn scoped thread") + } + + /// Creates a builder that can configure a thread before spawning. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// s.builder() + /// .spawn(|_| println!("A child thread is running")) + /// .unwrap(); + /// }).unwrap(); + /// ``` + pub fn builder<'scope>(&'scope self) -> ScopedThreadBuilder<'scope, 'env> { + ScopedThreadBuilder { + scope: self, + builder: thread::Builder::new(), + } + } +} + +impl fmt::Debug for Scope<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Scope { .. }") + } +} + +/// Configures the properties of a new thread. +/// +/// The two configurable properties are: +/// +/// - [`name`]: Specifies an [associated name for the thread][naming-threads]. +/// - [`stack_size`]: Specifies the [desired stack size for the thread][stack-size]. +/// +/// The [`spawn`] method will take ownership of the builder and return an [`io::Result`] of the +/// thread handle with the given configuration. +/// +/// The [`Scope::spawn`] method uses a builder with default configuration and unwraps its return +/// value. You may want to use this builder when you want to recover from a failure to launch a +/// thread. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_utils::thread; +/// +/// thread::scope(|s| { +/// s.builder() +/// .spawn(|_| println!("Running a child thread")) +/// .unwrap(); +/// }).unwrap(); +/// ``` +/// +/// [`name`]: ScopedThreadBuilder::name +/// [`stack_size`]: ScopedThreadBuilder::stack_size +/// [`spawn`]: ScopedThreadBuilder::spawn +/// [`io::Result`]: std::io::Result +/// [naming-threads]: std::thread#naming-threads +/// [stack-size]: std::thread#stack-size +#[derive(Debug)] +pub struct ScopedThreadBuilder<'scope, 'env> { + scope: &'scope Scope<'env>, + builder: thread::Builder, +} + +impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { + /// Sets the name for the new thread. + /// + /// The name must not contain null bytes (`\0`). + /// + /// For more information about named threads, see [here][naming-threads]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// use std::thread::current; + /// + /// thread::scope(|s| { + /// s.builder() + /// .name("my thread".to_string()) + /// .spawn(|_| assert_eq!(current().name(), Some("my thread"))) + /// .unwrap(); + /// }).unwrap(); + /// ``` + /// + /// [naming-threads]: std::thread#naming-threads + pub fn name(mut self, name: String) -> ScopedThreadBuilder<'scope, 'env> { + self.builder = self.builder.name(name); + self + } + + /// Sets the size of the stack for the new thread. + /// + /// The stack size is measured in bytes. + /// + /// For more information about the stack size for threads, see [here][stack-size]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// s.builder() + /// .stack_size(32 * 1024) + /// .spawn(|_| println!("Running a child thread")) + /// .unwrap(); + /// }).unwrap(); + /// ``` + /// + /// [stack-size]: std::thread#stack-size + pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'scope, 'env> { + self.builder = self.builder.stack_size(size); + self + } + + /// Spawns a scoped thread with this configuration. + /// + /// The scoped thread is passed a reference to this scope as an argument, which can be used for + /// spawning nested threads. + /// + /// The returned handle can be used to manually join the thread before the scope exits. + /// + /// # Errors + /// + /// Unlike the [`Scope::spawn`] method, this method yields an + /// [`io::Result`] to capture any failure to create the thread at + /// the OS level. + /// + /// [`io::Result`]: std::io::Result + /// + /// # Panics + /// + /// Panics if a thread name was set and it contained null bytes. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle = s.builder() + /// .spawn(|_| { + /// println!("A child thread is running"); + /// 42 + /// }) + /// .unwrap(); + /// + /// // Join the thread and retrieve its result. + /// let res = handle.join().unwrap(); + /// assert_eq!(res, 42); + /// }).unwrap(); + /// ``` + pub fn spawn(self, f: F) -> io::Result> + where + F: FnOnce(&Scope<'env>) -> T, + F: Send + 'env, + T: Send + 'env, + { + // The result of `f` will be stored here. + let result = SharedOption::default(); + + // Spawn the thread and grab its join handle and thread handle. + let (handle, thread) = { + let result = Arc::clone(&result); + + // A clone of the scope that will be moved into the new thread. + let scope = Scope::<'env> { + handles: Arc::clone(&self.scope.handles), + wait_group: self.scope.wait_group.clone(), + _marker: PhantomData, + }; + + // Spawn the thread. + let handle = { + let closure = move || { + // Make sure the scope is inside the closure with the proper `'env` lifetime. + let scope: Scope<'env> = scope; + + // Run the closure. + let res = f(&scope); + + // Store the result if the closure didn't panic. + *result.lock().unwrap() = Some(res); + }; + + // Allocate `closure` on the heap and erase the `'env` bound. + let closure: Box = Box::new(closure); + let closure: Box = + unsafe { mem::transmute(closure) }; + + // Finally, spawn the closure. + self.builder.spawn(closure)? + }; + + let thread = handle.thread().clone(); + let handle = Arc::new(Mutex::new(Some(handle))); + (handle, thread) + }; + + // Add the handle to the shared list of join handles. + self.scope.handles.lock().unwrap().push(Arc::clone(&handle)); + + Ok(ScopedJoinHandle { + handle, + result, + thread, + _marker: PhantomData, + }) + } +} + +unsafe impl Send for ScopedJoinHandle<'_, T> {} +unsafe impl Sync for ScopedJoinHandle<'_, T> {} + +/// A handle that can be used to join its scoped thread. +/// +/// This struct is created by the [`Scope::spawn`] method and the +/// [`ScopedThreadBuilder::spawn`] method. +pub struct ScopedJoinHandle<'scope, T> { + /// A join handle to the spawned thread. + handle: SharedOption>, + + /// Holds the result of the inner closure. + result: SharedOption, + + /// A handle to the spawned thread. + thread: thread::Thread, + + /// Borrows the parent scope with lifetime `'scope`. + _marker: PhantomData<&'scope ()>, +} + +impl ScopedJoinHandle<'_, T> { + /// Waits for the thread to finish and returns its result. + /// + /// If the child thread panics, an error is returned. Note that if panics are implemented by + /// aborting the process, no error is returned; see the notes of [std::panic::catch_unwind]. + /// + /// # Panics + /// + /// This function may panic on some platforms if a thread attempts to join itself or otherwise + /// may create a deadlock with joining threads. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle1 = s.spawn(|_| println!("I'm a happy thread :)")); + /// let handle2 = s.spawn(|_| panic!("I'm a sad thread :(")); + /// + /// // Join the first thread and verify that it succeeded. + /// let res = handle1.join(); + /// assert!(res.is_ok()); + /// + /// // Join the second thread and verify that it panicked. + /// let res = handle2.join(); + /// assert!(res.is_err()); + /// }).unwrap(); + /// ``` + pub fn join(self) -> thread::Result { + // Take out the handle. The handle will surely be available because the root scope waits + // for nested scopes before joining remaining threads. + let handle = self.handle.lock().unwrap().take().unwrap(); + + // Join the thread and then take the result out of its inner closure. + handle + .join() + .map(|()| self.result.lock().unwrap().take().unwrap()) + } + + /// Returns a handle to the underlying thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_utils::thread; + /// + /// thread::scope(|s| { + /// let handle = s.spawn(|_| println!("A child thread is running")); + /// println!("The child thread ID: {:?}", handle.thread().id()); + /// }).unwrap(); + /// ``` + pub fn thread(&self) -> &thread::Thread { + &self.thread + } +} + +/// Unix-specific extensions. +#[cfg(unix)] +mod unix { + use super::ScopedJoinHandle; + use std::os::unix::thread::{JoinHandleExt, RawPthread}; + + impl JoinHandleExt for ScopedJoinHandle<'_, T> { + fn as_pthread_t(&self) -> RawPthread { + // Borrow the handle. The handle will surely be available because the root scope waits + // for nested scopes before joining remaining threads. + let handle = self.handle.lock().unwrap(); + handle.as_ref().unwrap().as_pthread_t() + } + fn into_pthread_t(self) -> RawPthread { + self.as_pthread_t() + } + } +} +/// Windows-specific extensions. +#[cfg(windows)] +mod windows { + use super::ScopedJoinHandle; + use std::os::windows::io::{AsRawHandle, IntoRawHandle, RawHandle}; + + impl AsRawHandle for ScopedJoinHandle<'_, T> { + fn as_raw_handle(&self) -> RawHandle { + // Borrow the handle. The handle will surely be available because the root scope waits + // for nested scopes before joining remaining threads. + let handle = self.handle.lock().unwrap(); + handle.as_ref().unwrap().as_raw_handle() + } + } + + impl IntoRawHandle for ScopedJoinHandle<'_, T> { + fn into_raw_handle(self) -> RawHandle { + self.as_raw_handle() + } + } +} + +impl fmt::Debug for ScopedJoinHandle<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("ScopedJoinHandle { .. }") + } +} diff --git a/external/vendor/crossbeam-utils/tests/atomic_cell.rs b/external/vendor/crossbeam-utils/tests/atomic_cell.rs new file mode 100644 index 0000000000..9fe69328df --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/atomic_cell.rs @@ -0,0 +1,374 @@ +use std::mem; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +use crossbeam_utils::atomic::AtomicCell; + +#[test] +fn is_lock_free() { + struct UsizeWrap(#[allow(dead_code)] usize); + struct U8Wrap(#[allow(dead_code)] bool); + struct I16Wrap(#[allow(dead_code)] i16); + #[repr(align(8))] + struct U64Align8(#[allow(dead_code)] u64); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + assert!(AtomicCell::<()>::is_lock_free()); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + assert!(AtomicCell::::is_lock_free()); + assert!(AtomicCell::::is_lock_free()); + + // Sizes of both types must be equal, and the alignment of `u64` must be greater or equal than + // that of `AtomicU64`. In i686-unknown-linux-gnu, the alignment of `u64` is `4` and alignment + // of `AtomicU64` is `8`, so `AtomicCell` is not lock-free. + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(target_has_atomic = "64") && std::mem::align_of::() == 8 + ); + assert_eq!(mem::size_of::(), 8); + assert_eq!(mem::align_of::(), 8); + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(target_has_atomic = "64") + ); + + // AtomicU128 is unstable + assert!(!AtomicCell::::is_lock_free()); +} + +#[test] +fn const_is_lock_free() { + const _U: bool = AtomicCell::::is_lock_free(); + const _I: bool = AtomicCell::::is_lock_free(); +} + +#[test] +fn drops_unit() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(); + + impl Foo { + fn new() -> Foo { + CNT.fetch_add(1, SeqCst); + Foo() + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new() + } + } + + let a = AtomicCell::new(Foo::new()); + + assert_eq!(a.swap(Foo::new()), Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new()); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn drops_u8() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(u8); + + impl Foo { + fn new(val: u8) -> Foo { + CNT.fetch_add(1, SeqCst); + Foo(val) + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new(0) + } + } + + let a = AtomicCell::new(Foo::new(5)); + + assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); + assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(0)); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn drops_usize() { + static CNT: AtomicUsize = AtomicUsize::new(0); + CNT.store(0, SeqCst); + + #[derive(Debug, PartialEq, Eq)] + struct Foo(usize); + + impl Foo { + fn new(val: usize) -> Foo { + CNT.fetch_add(1, SeqCst); + Foo(val) + } + } + + impl Drop for Foo { + fn drop(&mut self) { + CNT.fetch_sub(1, SeqCst); + } + } + + impl Default for Foo { + fn default() -> Foo { + Foo::new(0) + } + } + + let a = AtomicCell::new(Foo::new(5)); + + assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); + assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); + assert_eq!(CNT.load(SeqCst), 1); + + a.store(Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(2)); + assert_eq!(CNT.load(SeqCst), 1); + + assert_eq!(a.swap(Foo::default()), Foo::new(0)); + assert_eq!(CNT.load(SeqCst), 1); + + drop(a); + assert_eq!(CNT.load(SeqCst), 0); +} + +#[test] +fn modular_u8() { + #[derive(Clone, Copy, Eq, Debug, Default)] + struct Foo(u8); + + impl PartialEq for Foo { + fn eq(&self, other: &Foo) -> bool { + self.0 % 5 == other.0 % 5 + } + } + + let a = AtomicCell::new(Foo(1)); + + assert_eq!(a.load(), Foo(1)); + assert_eq!(a.swap(Foo(2)), Foo(11)); + assert_eq!(a.load(), Foo(52)); + + a.store(Foo(0)); + assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); + assert_eq!(a.load().0, 5); + assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); + assert_eq!(a.load().0, 15); +} + +#[test] +fn modular_usize() { + #[derive(Clone, Copy, Eq, Debug, Default)] + struct Foo(usize); + + impl PartialEq for Foo { + fn eq(&self, other: &Foo) -> bool { + self.0 % 5 == other.0 % 5 + } + } + + let a = AtomicCell::new(Foo(1)); + + assert_eq!(a.load(), Foo(1)); + assert_eq!(a.swap(Foo(2)), Foo(11)); + assert_eq!(a.load(), Foo(52)); + + a.store(Foo(0)); + assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); + assert_eq!(a.load().0, 5); + assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); + assert_eq!(a.load().0, 15); +} + +#[test] +fn garbage_padding() { + #[derive(Copy, Clone, Eq, PartialEq)] + struct Object { + a: i64, + b: i32, + } + + let cell = AtomicCell::new(Object { a: 0, b: 0 }); + let _garbage = [0xfe, 0xfe, 0xfe, 0xfe, 0xfe]; // Needed + let next = Object { a: 0, b: 0 }; + + let prev = cell.load(); + assert!(cell.compare_exchange(prev, next).is_ok()); + println!(); +} + +#[test] +fn const_atomic_cell_new() { + static CELL: AtomicCell = AtomicCell::new(0); + + CELL.store(1); + assert_eq!(CELL.load(), 1); +} + +// https://github.com/crossbeam-rs/crossbeam/pull/767 +macro_rules! test_arithmetic { + ($test_name:ident, $ty:ident) => { + #[test] + fn $test_name() { + let a: AtomicCell<$ty> = AtomicCell::new(7); + + assert_eq!(a.fetch_add(3), 7); + assert_eq!(a.load(), 10); + + assert_eq!(a.fetch_sub(3), 10); + assert_eq!(a.load(), 7); + + assert_eq!(a.fetch_and(3), 7); + assert_eq!(a.load(), 3); + + assert_eq!(a.fetch_or(16), 3); + assert_eq!(a.load(), 19); + + assert_eq!(a.fetch_xor(2), 19); + assert_eq!(a.load(), 17); + + assert_eq!(a.fetch_max(18), 17); + assert_eq!(a.load(), 18); + + assert_eq!(a.fetch_min(17), 18); + assert_eq!(a.load(), 17); + + assert_eq!(a.fetch_nand(7), 17); + assert_eq!(a.load(), !(17 & 7)); + } + }; +} +test_arithmetic!(arithmetic_u8, u8); +test_arithmetic!(arithmetic_i8, i8); +test_arithmetic!(arithmetic_u16, u16); +test_arithmetic!(arithmetic_i16, i16); +test_arithmetic!(arithmetic_u32, u32); +test_arithmetic!(arithmetic_i32, i32); +test_arithmetic!(arithmetic_u64, u64); +test_arithmetic!(arithmetic_i64, i64); +test_arithmetic!(arithmetic_u128, u128); +test_arithmetic!(arithmetic_i128, i128); + +// https://github.com/crossbeam-rs/crossbeam/issues/748 +#[cfg_attr(miri, ignore)] // TODO +#[test] +fn issue_748() { + #[allow(dead_code)] + #[repr(align(8))] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + enum Test { + Field(u32), + FieldLess, + } + + assert_eq!(mem::size_of::(), 8); + assert_eq!( + AtomicCell::::is_lock_free(), + cfg!(target_has_atomic = "64") + ); + let x = AtomicCell::new(Test::FieldLess); + assert_eq!(x.load(), Test::FieldLess); +} + +// https://github.com/crossbeam-rs/crossbeam/issues/833 +#[test] +fn issue_833() { + use std::num::NonZeroU128; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::thread; + + #[cfg(miri)] + const N: usize = 10_000; + #[cfg(not(miri))] + const N: usize = 1_000_000; + + #[allow(dead_code)] + enum Enum { + NeverConstructed, + Cell(AtomicCell), + } + + static STATIC: Enum = Enum::Cell(AtomicCell::new(match NonZeroU128::new(1) { + Some(nonzero) => nonzero, + None => unreachable!(), + })); + static FINISHED: AtomicBool = AtomicBool::new(false); + + let handle = thread::spawn(|| { + let cell = match &STATIC { + Enum::NeverConstructed => unreachable!(), + Enum::Cell(cell) => cell, + }; + let x = NonZeroU128::new(0xFFFF_FFFF_FFFF_FFFF_0000_0000_0000_0000).unwrap(); + let y = NonZeroU128::new(0x0000_0000_0000_0000_FFFF_FFFF_FFFF_FFFF).unwrap(); + while !FINISHED.load(Ordering::Relaxed) { + cell.store(x); + cell.store(y); + } + }); + + for _ in 0..N { + if let Enum::NeverConstructed = STATIC { + unreachable!(":("); + } + } + + FINISHED.store(true, Ordering::Relaxed); + handle.join().unwrap(); +} diff --git a/external/vendor/crossbeam-utils/tests/cache_padded.rs b/external/vendor/crossbeam-utils/tests/cache_padded.rs new file mode 100644 index 0000000000..86e9a7709c --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/cache_padded.rs @@ -0,0 +1,113 @@ +use std::cell::Cell; +use std::mem; + +use crossbeam_utils::CachePadded; + +#[test] +fn default() { + let x: CachePadded = Default::default(); + assert_eq!(*x, 0); +} + +#[test] +fn store_u64() { + let x: CachePadded = CachePadded::new(17); + assert_eq!(*x, 17); +} + +#[test] +fn store_pair() { + let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37)); + assert_eq!(x.0, 17); + assert_eq!(x.1, 37); +} + +#[test] +fn distance() { + let arr = [CachePadded::new(17u8), CachePadded::new(37u8)]; + let a = &*arr[0] as *const u8; + let b = &*arr[1] as *const u8; + let align = mem::align_of::>(); + assert!(align >= 32); + assert_eq!(unsafe { a.add(align) }, b); +} + +#[test] +fn different_sizes() { + CachePadded::new(17u8); + CachePadded::new(17u16); + CachePadded::new(17u32); + CachePadded::new([17u64; 0]); + CachePadded::new([17u64; 1]); + CachePadded::new([17u64; 2]); + CachePadded::new([17u64; 3]); + CachePadded::new([17u64; 4]); + CachePadded::new([17u64; 5]); + CachePadded::new([17u64; 6]); + CachePadded::new([17u64; 7]); + CachePadded::new([17u64; 8]); +} + +#[test] +fn large() { + let a = [17u64; 9]; + let b = CachePadded::new(a); + assert!(mem::size_of_val(&a) <= mem::size_of_val(&b)); +} + +#[test] +fn debug() { + assert_eq!( + format!("{:?}", CachePadded::new(17u64)), + "CachePadded { value: 17 }" + ); +} + +#[test] +fn drops() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Drop for Foo<'a> { + fn drop(&mut self) { + self.0.set(self.0.get() + 1); + } + } + + let a = CachePadded::new(Foo(&count)); + let b = CachePadded::new(Foo(&count)); + + assert_eq!(count.get(), 0); + drop(a); + assert_eq!(count.get(), 1); + drop(b); + assert_eq!(count.get(), 2); +} + +#[allow(clippy::clone_on_copy)] // This is intentional. +#[test] +fn clone() { + let a = CachePadded::new(17); + let b = a.clone(); + assert_eq!(*a, *b); +} + +#[test] +fn runs_custom_clone() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Clone for Foo<'a> { + fn clone(&self) -> Foo<'a> { + self.0.set(self.0.get() + 1); + Foo::<'a>(self.0) + } + } + + let a = CachePadded::new(Foo(&count)); + let _ = a.clone(); + + assert_eq!(count.get(), 1); +} diff --git a/external/vendor/crossbeam-utils/tests/parker.rs b/external/vendor/crossbeam-utils/tests/parker.rs new file mode 100644 index 0000000000..2bf9c37d49 --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/parker.rs @@ -0,0 +1,41 @@ +use std::thread::sleep; +use std::time::Duration; +use std::u32; + +use crossbeam_utils::sync::Parker; +use crossbeam_utils::thread; + +#[test] +fn park_timeout_unpark_before() { + let p = Parker::new(); + for _ in 0..10 { + p.unparker().unpark(); + p.park_timeout(Duration::from_millis(u32::MAX as u64)); + } +} + +#[test] +fn park_timeout_unpark_not_called() { + let p = Parker::new(); + for _ in 0..10 { + p.park_timeout(Duration::from_millis(10)) + } +} + +#[test] +fn park_timeout_unpark_called_other_thread() { + for _ in 0..10 { + let p = Parker::new(); + let u = p.unparker().clone(); + + thread::scope(|scope| { + scope.spawn(move |_| { + sleep(Duration::from_millis(50)); + u.unpark(); + }); + + p.park_timeout(Duration::from_millis(u32::MAX as u64)) + }) + .unwrap(); + } +} diff --git a/external/vendor/crossbeam-utils/tests/sharded_lock.rs b/external/vendor/crossbeam-utils/tests/sharded_lock.rs new file mode 100644 index 0000000000..002f7f5e19 --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/sharded_lock.rs @@ -0,0 +1,252 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc::channel; +use std::sync::{Arc, TryLockError}; +use std::thread; + +use crossbeam_utils::sync::ShardedLock; +use rand::Rng; + +#[derive(Eq, PartialEq, Debug)] +struct NonCopy(i32); + +#[test] +fn smoke() { + let l = ShardedLock::new(()); + drop(l.read().unwrap()); + drop(l.write().unwrap()); + drop((l.read().unwrap(), l.read().unwrap())); + drop(l.write().unwrap()); +} + +#[test] +fn frob() { + const N: u32 = 10; + #[cfg(miri)] + const M: usize = 50; + #[cfg(not(miri))] + const M: usize = 1000; + + let r = Arc::new(ShardedLock::new(())); + + let (tx, rx) = channel::<()>(); + for _ in 0..N { + let tx = tx.clone(); + let r = r.clone(); + thread::spawn(move || { + let mut rng = rand::thread_rng(); + for _ in 0..M { + if rng.gen_bool(1.0 / (N as f64)) { + drop(r.write().unwrap()); + } else { + drop(r.read().unwrap()); + } + } + drop(tx); + }); + } + drop(tx); + let _ = rx.recv(); +} + +#[test] +fn arc_poison_wr() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }) + .join(); + assert!(arc.read().is_err()); +} + +#[test] +fn arc_poison_ww() { + let arc = Arc::new(ShardedLock::new(1)); + assert!(!arc.is_poisoned()); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }) + .join(); + assert!(arc.write().is_err()); + assert!(arc.is_poisoned()); +} + +#[test] +fn arc_no_poison_rr() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!(); + }) + .join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 1); +} +#[test] +fn arc_no_poison_sl() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!() + }) + .join(); + let lock = arc.write().unwrap(); + assert_eq!(*lock, 1); +} + +#[test] +fn arc() { + let arc = Arc::new(ShardedLock::new(0)); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + + thread::spawn(move || { + let mut lock = arc2.write().unwrap(); + for _ in 0..10 { + let tmp = *lock; + *lock = -1; + thread::yield_now(); + *lock = tmp + 1; + } + tx.send(()).unwrap(); + }); + + // Readers try to catch the writer in the act + let mut children = Vec::new(); + for _ in 0..5 { + let arc3 = arc.clone(); + children.push(thread::spawn(move || { + let lock = arc3.read().unwrap(); + assert!(*lock >= 0); + })); + } + + // Wait for children to pass their asserts + for r in children { + assert!(r.join().is_ok()); + } + + // Wait for writer to finish + rx.recv().unwrap(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 10); +} + +#[test] +fn arc_access_in_unwind() { + let arc = Arc::new(ShardedLock::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + let mut lock = self.i.write().unwrap(); + *lock += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 2); +} + +#[test] +fn unsized_type() { + let sl: &ShardedLock<[i32]> = &ShardedLock::new([1, 2, 3]); + { + let b = &mut *sl.write().unwrap(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*sl.read().unwrap(), comp); +} + +#[test] +fn try_write() { + let lock = ShardedLock::new(0isize); + let read_guard = lock.read().unwrap(); + + let write_result = lock.try_write(); + match write_result { + Err(TryLockError::WouldBlock) => (), + Ok(_) => panic!("try_write should not succeed while read_guard is in scope"), + Err(_) => panic!("unexpected error"), + } + + drop(read_guard); +} + +#[test] +fn test_into_inner() { + let m = ShardedLock::new(NonCopy(10)); + assert_eq!(m.into_inner().unwrap(), NonCopy(10)); +} + +#[test] +fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = ShardedLock::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner().unwrap(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); +} + +#[test] +fn test_into_inner_poison() { + let m = Arc::new(ShardedLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison ShardedLock"); + }) + .join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().into_inner() { + Err(e) => assert_eq!(e.into_inner(), NonCopy(10)), + Ok(x) => panic!("into_inner of poisoned ShardedLock is Ok: {:?}", x), + } +} + +#[test] +fn test_get_mut() { + let mut m = ShardedLock::new(NonCopy(10)); + *m.get_mut().unwrap() = NonCopy(20); + assert_eq!(m.into_inner().unwrap(), NonCopy(20)); +} + +#[test] +fn test_get_mut_poison() { + let m = Arc::new(ShardedLock::new(NonCopy(10))); + let m2 = m.clone(); + let _ = thread::spawn(move || { + let _lock = m2.write().unwrap(); + panic!("test panic in inner thread to poison ShardedLock"); + }) + .join(); + + assert!(m.is_poisoned()); + match Arc::try_unwrap(m).unwrap().get_mut() { + Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)), + Ok(x) => panic!("get_mut of poisoned ShardedLock is Ok: {:?}", x), + } +} diff --git a/external/vendor/crossbeam-utils/tests/thread.rs b/external/vendor/crossbeam-utils/tests/thread.rs new file mode 100644 index 0000000000..0dfad90bd6 --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/thread.rs @@ -0,0 +1,215 @@ +use std::any::Any; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; + +use crossbeam_utils::thread; + +const THREADS: usize = 10; +const SMALL_STACK_SIZE: usize = 20; + +#[test] +fn join() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + let handle = scope.spawn(|_| { + counter.store(1, Ordering::Relaxed); + }); + assert!(handle.join().is_ok()); + + let panic_handle = scope.spawn(|_| { + panic!("\"My honey is running out!\", said Pooh."); + }); + assert!(panic_handle.join().is_err()); + }) + .unwrap(); + + // There should be sufficient synchronization. + assert_eq!(1, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }); + } + }) + .unwrap(); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter_builder() { + let counter = AtomicUsize::new(0); + thread::scope(|scope| { + for i in 0..THREADS { + scope + .builder() + .name(format!("child-{}", i)) + .stack_size(SMALL_STACK_SIZE) + .spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }) + .unwrap(); + } + }) + .unwrap(); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); +} + +#[test] +fn counter_panic() { + let counter = AtomicUsize::new(0); + let result = thread::scope(|scope| { + scope.spawn(|_| { + panic!("\"My honey is running out!\", said Pooh."); + }); + sleep(Duration::from_millis(100)); + + for _ in 0..THREADS { + scope.spawn(|_| { + counter.fetch_add(1, Ordering::Relaxed); + }); + } + }); + + assert_eq!(THREADS, counter.load(Ordering::Relaxed)); + assert!(result.is_err()); +} + +#[test] +fn panic_twice() { + let result = thread::scope(|scope| { + scope.spawn(|_| { + sleep(Duration::from_millis(500)); + panic!("thread #1"); + }); + scope.spawn(|_| { + panic!("thread #2"); + }); + }); + + let err = result.unwrap_err(); + let vec = err + .downcast_ref::>>() + .unwrap(); + assert_eq!(2, vec.len()); + + let first = vec[0].downcast_ref::<&str>().unwrap(); + let second = vec[1].downcast_ref::<&str>().unwrap(); + assert_eq!("thread #1", *first); + assert_eq!("thread #2", *second) +} + +#[test] +fn panic_many() { + let result = thread::scope(|scope| { + scope.spawn(|_| panic!("deliberate panic #1")); + scope.spawn(|_| panic!("deliberate panic #2")); + scope.spawn(|_| panic!("deliberate panic #3")); + }); + + let err = result.unwrap_err(); + let vec = err + .downcast_ref::>>() + .unwrap(); + assert_eq!(3, vec.len()); + + for panic in vec.iter() { + let panic = panic.downcast_ref::<&str>().unwrap(); + assert!( + *panic == "deliberate panic #1" + || *panic == "deliberate panic #2" + || *panic == "deliberate panic #3" + ); + } +} + +#[test] +fn nesting() { + let var = "foo".to_string(); + + struct Wrapper<'a> { + var: &'a String, + } + + impl<'a> Wrapper<'a> { + fn recurse(&'a self, scope: &thread::Scope<'a>, depth: usize) { + assert_eq!(self.var, "foo"); + + if depth > 0 { + scope.spawn(move |scope| { + self.recurse(scope, depth - 1); + }); + } + } + } + + let wrapper = Wrapper { var: &var }; + + thread::scope(|scope| { + scope.spawn(|scope| { + scope.spawn(|scope| { + wrapper.recurse(scope, 5); + }); + }); + }) + .unwrap(); +} + +#[test] +fn join_nested() { + thread::scope(|scope| { + scope.spawn(|scope| { + let handle = scope.spawn(|_| 7); + + sleep(Duration::from_millis(200)); + handle.join().unwrap(); + }); + + sleep(Duration::from_millis(100)); + }) + .unwrap(); +} + +#[test] +fn scope_returns_ok() { + let result = thread::scope(|scope| scope.spawn(|_| 1234).join().unwrap()).unwrap(); + assert_eq!(result, 1234); +} + +#[cfg(unix)] +#[test] +fn as_pthread_t() { + use std::os::unix::thread::JoinHandleExt; + thread::scope(|scope| { + let handle = scope.spawn(|_scope| { + sleep(Duration::from_millis(100)); + 42 + }); + let _pthread_t = handle.as_pthread_t(); + handle.join().unwrap(); + }) + .unwrap(); +} + +#[cfg(windows)] +#[test] +fn as_raw_handle() { + use std::os::windows::io::AsRawHandle; + thread::scope(|scope| { + let handle = scope.spawn(|_scope| { + sleep(Duration::from_millis(100)); + 42 + }); + let _raw_handle = handle.as_raw_handle(); + handle.join().unwrap(); + }) + .unwrap(); +} diff --git a/external/vendor/crossbeam-utils/tests/wait_group.rs b/external/vendor/crossbeam-utils/tests/wait_group.rs new file mode 100644 index 0000000000..5b549b849c --- /dev/null +++ b/external/vendor/crossbeam-utils/tests/wait_group.rs @@ -0,0 +1,67 @@ +use std::sync::mpsc; +use std::thread; +use std::time::Duration; + +use crossbeam_utils::sync::WaitGroup; + +const THREADS: usize = 10; + +#[test] +fn wait() { + let wg = WaitGroup::new(); + let (tx, rx) = mpsc::channel(); + + for _ in 0..THREADS { + let wg = wg.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + wg.wait(); + tx.send(()).unwrap(); + }); + } + + thread::sleep(Duration::from_millis(100)); + + // At this point, all spawned threads should be blocked, so we shouldn't get anything from the + // channel. + assert!(rx.try_recv().is_err()); + + wg.wait(); + + // Now, the wait group is cleared and we should receive messages. + for _ in 0..THREADS { + rx.recv().unwrap(); + } +} + +#[test] +fn wait_and_drop() { + let wg = WaitGroup::new(); + let wg2 = WaitGroup::new(); + let (tx, rx) = mpsc::channel(); + + for _ in 0..THREADS { + let wg = wg.clone(); + let wg2 = wg2.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + wg2.wait(); + tx.send(()).unwrap(); + drop(wg); + }); + } + + // At this point, no thread has gotten past `wg2.wait()`, so we shouldn't get anything from the + // channel. + assert!(rx.try_recv().is_err()); + drop(wg2); + + wg.wait(); + + // Now, the wait group is cleared and we should receive messages. + for _ in 0..THREADS { + rx.try_recv().unwrap(); + } +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 134c20a85f..c1723d92a4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -15,7 +15,6 @@ # limitations under the License. set(DBB-FIRMWARE-SOURCES - ${CMAKE_SOURCE_DIR}/src/firmware_main_loop.c ${CMAKE_SOURCE_DIR}/src/delay.c ${CMAKE_SOURCE_DIR}/src/keystore.c ${CMAKE_SOURCE_DIR}/src/random.c diff --git a/src/bootloader/startup.c b/src/bootloader/startup.c index 830da3d972..b476b33bf1 100644 --- a/src/bootloader/startup.c +++ b/src/bootloader/startup.c @@ -112,9 +112,9 @@ int main(void) // Set device name, the MCU and BLE chip will probably not have the same name after a reset of // only the MCU. - char buf[MEMORY_DEVICE_NAME_MAX_LEN] = {0}; + char buf[MEMORY_DEVICE_NAME_MAX_LEN + 1] = {0}; memory_random_name(buf); - da14531_set_name(buf, strlen(buf), &uart_write_queue); + da14531_set_name(buf, &uart_write_queue); // Ask for the current conection state da14531_get_connection_state(&uart_write_queue); diff --git a/src/da14531/da14531.c b/src/da14531/da14531.c index b74157edb1..f7557bc0b5 100644 --- a/src/da14531/da14531.c +++ b/src/da14531/da14531.c @@ -72,8 +72,9 @@ void da14531_set_product( } } -void da14531_set_name(const char* name, size_t name_len, struct ringbuffer* uart_out) +void da14531_set_name(const char* name, struct ringbuffer* uart_out) { + size_t name_len = strlen(name); uint8_t payload[64] = {0}; payload[0] = CTRL_CMD_DEVICE_NAME; memcpy(&payload[1], name, MIN(name_len, sizeof(payload) - 1)); diff --git a/src/da14531/da14531.h b/src/da14531/da14531.h index 5829041687..c17afd8e2c 100644 --- a/src/da14531/da14531.h +++ b/src/da14531/da14531.h @@ -52,7 +52,7 @@ void da14531_set_product( volatile uint16_t product_len, struct ringbuffer* uart_out); -void da14531_set_name(const char* name, size_t name_len, struct ringbuffer* uart_out); +void da14531_set_name(const char* name, struct ringbuffer* uart_out); void da14531_get_connection_state(struct ringbuffer* uart_out); diff --git a/src/da14531/da14531_handler.c b/src/da14531/da14531_handler.c index 90d240372e..57869623f7 100644 --- a/src/da14531/da14531_handler.c +++ b/src/da14531/da14531_handler.c @@ -82,7 +82,7 @@ bool da14531_handler_bond_db_set(void) } #endif -static void _ctrl_handler(struct da14531_ctrl_frame* frame, struct ringbuffer* queue) +static void _ctrl_handler(const struct da14531_ctrl_frame* frame, struct ringbuffer* queue) { switch (frame->cmd) { case CTRL_CMD_DEVICE_NAME: { @@ -158,7 +158,7 @@ static void _ctrl_handler(struct da14531_ctrl_frame* frame, struct ringbuffer* q &frame->cmd_data[0], sizeof(_ble_pairing_callback_data.key)); _ble_pairing_callback_data.queue = queue; - uint32_t pairing_code_int = (*(uint32_t*)&frame->cmd_data[0]) % 1000000; + uint32_t pairing_code_int = (*(const uint32_t*)&frame->cmd_data[0]) % 1000000; char pairing_code[7] = {0}; snprintf(pairing_code, sizeof(pairing_code), "%06lu", (long unsigned int)pairing_code_int); // util_log("da14531: show/confirm pairing code: %s", pairing_code); @@ -275,21 +275,21 @@ static void _ctrl_handler(struct da14531_ctrl_frame* frame, struct ringbuffer* q } } -static void _hww_handler(struct da14531_protocol_frame* frame, struct ringbuffer* queue) +static void _hww_handler(const struct da14531_protocol_frame* frame, struct ringbuffer* queue) { // util_log(" in: %s", util_dbg_hex(frame->payload, 64)); (void)queue; ASSERT(frame->payload_length == 64); - usb_packet_process((USB_FRAME*)&frame->payload[0]); + usb_packet_process((const USB_FRAME*)&frame->payload[0]); } // Handler must not use the frame pointer after it has returned -void da14531_handler(struct da14531_protocol_frame* frame, struct ringbuffer* queue) +void da14531_handler(const struct da14531_protocol_frame* frame, struct ringbuffer* queue) { // util_log("handler called"); switch (frame->type) { case DA14531_PROTOCOL_PACKET_TYPE_CTRL_DATA: - _ctrl_handler((struct da14531_ctrl_frame*)frame, queue); + _ctrl_handler((const struct da14531_ctrl_frame*)frame, queue); break; case DA14531_PROTOCOL_PACKET_TYPE_BLE_DATA: _hww_handler(frame, queue); diff --git a/src/da14531/da14531_handler.h b/src/da14531/da14531_handler.h index c39535e5d8..a36480e43b 100644 --- a/src/da14531/da14531_handler.h +++ b/src/da14531/da14531_handler.h @@ -26,6 +26,6 @@ extern uint16_t da14531_handler_current_product_len; bool da14531_handler_bond_db_set(void); #endif -void da14531_handler(struct da14531_protocol_frame* frame, struct ringbuffer* queue); +void da14531_handler(const struct da14531_protocol_frame* frame, struct ringbuffer* queue); #endif diff --git a/src/delay.c b/src/delay.c index 0b7edeb6a3..6ec4b737bc 100644 --- a/src/delay.c +++ b/src/delay.c @@ -25,6 +25,8 @@ struct task { struct timer_task timer; volatile bool done; + delay_callback_t cb; + void* user_data; }; static struct task _tasks[10] = {0}; @@ -34,11 +36,16 @@ static void _hal_timer_cb(const struct timer_task* const timer) for (size_t i = 0; i < COUNT_OF(_tasks); i++) { if (&_tasks[i].timer == timer) { _tasks[i].done = true; + if (_tasks[i].cb) { + _tasks[i].cb(_tasks[i].user_data); + // Only call callbak once + _tasks[i].cb = NULL; + } } } } -void delay_init_ms(delay_t* self, uint32_t ms) +void delay_init_ms(delay_t* self, uint32_t ms, delay_callback_t cb, void* user_data) { // find an unused slot in tasks size_t i; @@ -56,6 +63,8 @@ void delay_init_ms(delay_t* self, uint32_t ms) } else { _tasks[i].done = false; memset(&_tasks[i], 0, sizeof(struct task)); + _tasks[i].cb = cb; + _tasks[i].user_data = user_data; _tasks[i].timer.interval = ms; _tasks[i].timer.cb = _hal_timer_cb; _tasks[i].timer.mode = TIMER_TASK_ONE_SHOT; diff --git a/src/delay.h b/src/delay.h index ee7e6c3064..3b99efb924 100644 --- a/src/delay.h +++ b/src/delay.h @@ -22,9 +22,11 @@ typedef struct { size_t id; } delay_t; +typedef void (*delay_callback_t)(void*); + // Creates a non-blocking delay. Check with delay_is_elapsed if it has elapsed. // Limited to 10 concurrent delays, will abort if it fails to allocate one -void delay_init_ms(delay_t* self, uint32_t ms); +void delay_init_ms(delay_t* self, uint32_t ms, delay_callback_t cb, void* user_data); // returns true if time has passed. After it has returned true once it must not be called again bool delay_is_elapsed(const delay_t* self); diff --git a/src/firmware.c b/src/firmware.c index 3c909870b0..ba25d51bc5 100644 --- a/src/firmware.c +++ b/src/firmware.c @@ -15,7 +15,6 @@ #include "common_main.h" #include "da14531/da14531_protocol.h" #include "driver_init.h" -#include "firmware_main_loop.h" #include "hardfault.h" #include "memory/bitbox02_smarteeprom.h" #include "memory/memory_shared.h" @@ -27,6 +26,7 @@ #include "usb/usb_processing.h" #include #include +#include #include #if APP_U2F == 1 @@ -55,6 +55,6 @@ int main(void) #if APP_U2F == 1 u2f_device_setup(); #endif - firmware_main_loop(); + rust_main_loop(); return 0; } diff --git a/src/firmware_main_loop.c b/src/firmware_main_loop.c deleted file mode 100644 index 2fca2b28a0..0000000000 --- a/src/firmware_main_loop.c +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2019 Shift Cryptosecurity AG -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "firmware_main_loop.h" - -#include "communication_mode.h" -#include "da14531/da14531.h" -#include "da14531/da14531_handler.h" -#include "da14531/da14531_protocol.h" -#include "driver_init.h" -#include "hardfault.h" -#include "hid_hww.h" -#include "hww.h" -#include "memory/memory.h" -#include "memory/memory_shared.h" -#include "touch/gestures.h" -#include "uart.h" -#include "ui/screen_process.h" -#include "ui/screen_stack.h" -#include "usb/class/hid/hww/hid_hww.h" -#include "usb/usb.h" -#include "usb/usb_frame.h" -#include "usb/usb_processing.h" -#include -#include -#include -#include -#if APP_U2F == 1 - #include "u2f.h" - #include "u2f/u2f_packet.h" - #include "usb/class/hid/u2f/hid_u2f.h" -#endif - -// Must be power of 2 -#define UART_OUT_BUF_LEN 2048 - -static void _orientation_screen_poll(struct ringbuffer* uart_write_queue) -{ - static bool orientation_set = false; - bool _orientation; - if (!orientation_set && rust_workflow_orientation_screen_poll(&_orientation)) { - orientation_set = true; - // hww handler in usb_process must be setup before we can allow ble connections - if (memory_get_platform() == MEMORY_PLATFORM_BITBOX02_PLUS) { - size_t len; - da14531_handler_current_product = (const uint8_t*)platform_product(&len); - da14531_handler_current_product_len = len; - util_log("%s %d", da14531_handler_current_product, da14531_handler_current_product_len); - da14531_set_product( - da14531_handler_current_product, - da14531_handler_current_product_len, - uart_write_queue); - } - usb_start(); - } -} - -void firmware_main_loop(void) -{ - // Set the size of uart_read_buf to the size of the ringbuffer in the UART driver so we can read - // out all bytes - uint8_t uart_read_buf[USART_0_BUFFER_SIZE] = {0}; - uint16_t uart_read_buf_len = 0; - - struct ringbuffer uart_write_queue; - uint8_t uart_write_buf[UART_OUT_BUF_LEN]; - ringbuffer_init(&uart_write_queue, &uart_write_buf, UART_OUT_BUF_LEN); - - /// If the bootloader has booted the BLE chip, the BLE chip isn't aware of the name according to - /// the fw. Send it over. - char buf[MEMORY_DEVICE_NAME_MAX_LEN] = {0}; - memory_get_device_name(buf); - da14531_set_name(buf, strlen(buf), &uart_write_queue); - - // This starts the async orientation screen workflow, which is processed by the loop below. - rust_workflow_spawn_orientation_screen(); - - const uint8_t* hww_data = NULL; - uint8_t hww_frame[USB_REPORT_SIZE] = {0}; - -#if APP_U2F == 1 - u2f_packet_init(); - const uint8_t* u2f_data = NULL; - uint8_t u2f_frame[USB_REPORT_SIZE] = {0}; -#endif - - if (!memory_ble_enabled()) { - communication_mode_ble_disable(); - } - - while (1) { - // Do UART I/O - if (communication_mode_ble_enabled()) { - if (uart_read_buf_len < sizeof(uart_read_buf) || - ringbuffer_num(&uart_write_queue) > 0) { - uart_poll( - &uart_read_buf[0], - sizeof(uart_read_buf), - &uart_read_buf_len, - &uart_write_queue); - } - } - - // Check if there is outgoing data - if (!hww_data) { - hww_data = queue_pull(queue_hww_queue()); - } -#if APP_U2F == 1 - // Generate timeout packets - uint32_t timeout_cid; - while (u2f_packet_timeout_get(&timeout_cid)) { - u2f_packet_timeout(timeout_cid); - } - if (!u2f_data) { - u2f_data = queue_pull(queue_u2f_queue()); - // If USB stack was locked and there is no more messages to send out, time to - // unlock it. - if (!u2f_data && usb_processing_locked(usb_processing_u2f())) { - usb_processing_unlock(); - } - } -#endif - // Do USB Input - if (!hww_data && hid_hww_read(&hww_frame[0])) { - if (usb_packet_process((const USB_FRAME*)hww_frame)) { - if (communication_mode_ble_enabled()) { - // Enqueue a power down command to the da14531 - da14531_power_down(&uart_write_queue); - // Flush out the power down command. This will be the last UART communication we - // do. - while (ringbuffer_num(&uart_write_queue) > 0) { - uart_poll(NULL, 0, NULL, &uart_write_queue); - } - communication_mode_ble_disable(); - } - } else { - util_log("usb_packet_process: invalid"); - } - } -#if APP_U2F == 1 - if (!u2f_data && hid_u2f_read(&u2f_frame[0])) { - util_log("u2f data %s", util_dbg_hex((void*)u2f_frame, 16)); - u2f_packet_process((const USB_FRAME*)u2f_frame); - } -#endif - - // Do UART Output - if (communication_mode_ble_enabled()) { - struct da14531_protocol_frame* frame = da14531_protocol_poll( - &uart_read_buf[0], &uart_read_buf_len, &hww_data, &uart_write_queue); - - if (frame) { - da14531_handler(frame, &uart_write_queue); - } - } - - // Do USB Output - if (!communication_mode_ble_enabled() && hww_data) { - if (hid_hww_write_poll(hww_data)) { - hww_data = NULL; - } - } -#if APP_U2F == 1 - if (u2f_data) { - if (hid_u2f_write_poll(u2f_data)) { - util_log("u2f wrote %s", util_dbg_hex(u2f_data, 16)); - u2f_data = NULL; - } - } -#endif - - /* First, process all the incoming USB traffic. */ - usb_processing_process(usb_processing_hww()); -#if APP_U2F == 1 - usb_processing_process(usb_processing_u2f()); -#endif - /* - * If USB has generated events at the application level, - * process them now. - */ - hww_process(); -#if APP_U2F == 1 - u2f_process(); -#endif - - screen_process(); - /* And finally, run the high-level event processing. */ - - rust_workflow_spin(); - rust_async_usb_spin(); - - _orientation_screen_poll(&uart_write_queue); - } -} diff --git a/src/memory/memory_shared.c b/src/memory/memory_shared.c index e355d8b40d..e294a94bba 100644 --- a/src/memory/memory_shared.c +++ b/src/memory/memory_shared.c @@ -182,7 +182,7 @@ int16_t memory_get_ble_bond_db(uint8_t* data) return len; } -bool memory_set_ble_bond_db(uint8_t* data, int16_t data_len) +bool memory_set_ble_bond_db(const uint8_t* data, int16_t data_len) { ASSERT(data_len <= MEMORY_BLE_BOND_DB_LEN); if (data_len > MEMORY_BLE_BOND_DB_LEN) { diff --git a/src/memory/memory_shared.h b/src/memory/memory_shared.h index 7099c0e76f..a5135a514e 100644 --- a/src/memory/memory_shared.h +++ b/src/memory/memory_shared.h @@ -156,7 +156,7 @@ void memory_get_ble_irk(uint8_t* data); void memory_get_ble_identity_address(uint8_t* data); // data_len can be at most MEMORY_BLE_BOND_DB_LEN -bool memory_set_ble_bond_db(uint8_t* data, int16_t data_len); +bool memory_set_ble_bond_db(const uint8_t* data, int16_t data_len); typedef struct { uint8_t allowed_firmware_hash[32]; diff --git a/src/rust/Cargo.lock b/src/rust/Cargo.lock index 68821cd1c8..27d9757f3c 100644 --- a/src/rust/Cargo.lock +++ b/src/rust/Cargo.lock @@ -24,6 +24,12 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "autocfg" version = "1.0.1" @@ -112,6 +118,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bitbox02-executor" +version = "0.1.0" +dependencies = [ + "async-task", + "concurrent-queue", + "critical-section", + "futures-lite", + "pin-project-lite", +] + [[package]] name = "bitbox02-noise" version = "0.1.0" @@ -130,6 +147,7 @@ dependencies = [ "bip39", "bitbox-aes", "bitbox02", + "bitbox02-executor", "bitbox02-noise", "bitcoin", "bitcoin_hashes", @@ -312,6 +330,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -361,6 +388,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + [[package]] name = "crypto-bigint" version = "0.5.5" diff --git a/src/rust/Cargo.toml b/src/rust/Cargo.toml index cbf333c10c..f0e7e48cec 100644 --- a/src/rust/Cargo.toml +++ b/src/rust/Cargo.toml @@ -24,6 +24,7 @@ members = [ "erc20_params", "streaming-silent-payments", "bitbox-aes", + "bitbox02-executor", ] resolver = "2" @@ -52,6 +53,7 @@ keccak = { version = "0.1.4", default-features = false, features = ["no_unroll"] zeroize = "1.7.0" futures-lite = { version = "2.6.1", default-features = false } hex_lit = { version = "0.1.1", default-features = false } +critical-section = {version = "1.2"} [patch.crates-io] rtt-target = { git = "https://github.com/probe-rs/rtt-target.git", rev = "117d9519a5d3b1f4bc024bc05f9e3c5dec0a57f5" } diff --git a/src/rust/bitbox02-executor/Cargo.toml b/src/rust/bitbox02-executor/Cargo.toml new file mode 100644 index 0000000000..efbfe2c96f --- /dev/null +++ b/src/rust/bitbox02-executor/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "bitbox02-executor" +version = "0.1.0" +edition = "2024" + +[dependencies] +async-task = {version="4.7.1", default-features=false} +concurrent-queue = {version="2.5.0", default-features=false} +critical-section = "1.2.0" +futures-lite = {workspace = true} +pin-project-lite = "0.2.16" diff --git a/src/rust/bitbox02-executor/src/lib.rs b/src/rust/bitbox02-executor/src/lib.rs new file mode 100644 index 0000000000..bad8abba10 --- /dev/null +++ b/src/rust/bitbox02-executor/src/lib.rs @@ -0,0 +1,64 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +use async_task::{Builder, Runnable, Task}; +use concurrent_queue::ConcurrentQueue; + +pub struct Executor { + queue: ConcurrentQueue, +} + +impl Executor { + pub const fn new() -> Executor { + Executor { + queue: ConcurrentQueue::unbounded(), + } + } + + /// Attempts to run a task if at least one is scheduled + /// + /// Running a scheduled task means simply polling its future once + pub fn try_tick(&self) -> bool { + match self.queue.pop() { + Err(_) => false, + Ok(runnable) => { + runnable.run(); + true + } + } + } + + /// Spawns a task onto the executor. + pub fn spawn(&'static self, future: impl Future + 'static) -> Task { + // `schedule` is the function eventually being called when `Waker.wake()` is called. The + // function schedules the task by placing the tasks Runnable into the executors queue. + let schedule = move |runnable| self.queue.push(runnable).unwrap(); + + // SAFETY + // 1. `future` doesn't need to be `Send` because the firmware is single threaded + // 2. `schedule` doesn't need to be `Send` and `Sync` beause the firmware is single threaded + let (runnable, task) = unsafe { Builder::new().spawn_unchecked(|()| future, schedule) }; + + // Schedule the task once to get started + runnable.schedule(); + task + } +} + +impl Default for Executor { + fn default() -> Executor { + Executor::new() + } +} diff --git a/src/rust/bitbox02-rust-c/src/lib.rs b/src/rust/bitbox02-rust-c/src/lib.rs index 08a2cce8ef..8ee196487d 100644 --- a/src/rust/bitbox02-rust-c/src/lib.rs +++ b/src/rust/bitbox02-rust-c/src/lib.rs @@ -25,8 +25,6 @@ mod alloc; pub mod async_usb; #[cfg(feature = "firmware")] mod der; -#[cfg(feature = "firmware")] -pub mod workflow; // Expose C interface defined in bitbox_aes #[cfg(feature = "firmware")] diff --git a/src/rust/bitbox02-rust/Cargo.toml b/src/rust/bitbox02-rust/Cargo.toml index 8aea87c8db..5979538c86 100644 --- a/src/rust/bitbox02-rust/Cargo.toml +++ b/src/rust/bitbox02-rust/Cargo.toml @@ -49,6 +49,7 @@ minicbor = { version = "0.24.0", default-features = false, features = ["alloc"], crc = { version = "3.0.1", optional = true } ed25519-dalek = { version = "2.1.1", default-features = false, features = ["hazmat", "digest"], optional = true } hmac = { workspace = true } +bitbox02-executor = {path = "../bitbox02-executor"} miniscript = { version = "12.2.0", default-features = false, features = ["no-std"], optional = true } bitcoin = { workspace = true } @@ -110,3 +111,5 @@ c-unit-testing = [] simulator-graphical = [] firmware = [] + +rtt = [] diff --git a/src/rust/bitbox02-rust/src/lib.rs b/src/rust/bitbox02-rust/src/lib.rs index 2a685974a8..228ceb8ff7 100644 --- a/src/rust/bitbox02-rust/src/lib.rs +++ b/src/rust/bitbox02-rust/src/lib.rs @@ -13,7 +13,14 @@ // limitations under the License. // Since we are targeting embedded we exclude the standard library by default -#![no_std] +#![cfg_attr( + not(any( + feature = "testing", + feature = "c-unit-testing", + feature = "simulator-graphical" + )), + no_std +)] // When compiling for testing we allow certain warnings. #![cfg_attr(test, allow(unused_imports, dead_code))] @@ -36,6 +43,8 @@ pub mod hal; pub mod hash; pub mod hww; pub mod keystore; +#[cfg(feature = "firmware")] +pub mod main_loop; pub mod salt; pub mod secp256k1; #[cfg(feature = "app-u2f")] diff --git a/src/rust/bitbox02-rust/src/main_loop.rs b/src/rust/bitbox02-rust/src/main_loop.rs new file mode 100644 index 0000000000..a2a452bb5c --- /dev/null +++ b/src/rust/bitbox02-rust/src/main_loop.rs @@ -0,0 +1,202 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::sync::atomic::{AtomicBool, Ordering}; + +use bitbox02::ringbuffer::RingBuffer; +use bitbox02::uart::USART_0_BUFFER_SIZE; +use bitbox02::usb::USB_REPORT_SIZE; +use bitbox02_executor::Executor; +use util::log::log; + +#[cfg_attr(feature = "c-unit-testing", allow(unused))] +const UART_OUT_BUF_LEN: u32 = 2048; + +static EXECUTOR: Executor = Executor::new(); + +#[cfg_attr(feature = "c-unit-testing", allow(unused))] +fn main_loop() -> ! { + static ORIENTATION_CHOSEN: AtomicBool = AtomicBool::new(false); + // Set the size of uart_read_buf to the size of the ringbuffer in the UART driver so we can read + // out all bytes + let mut uart_read_buf = [0u8; USART_0_BUFFER_SIZE as usize]; + let mut uart_read_buf_len = 0u16; + + let mut uart_write_buf = [0u8; UART_OUT_BUF_LEN as usize]; + let mut uart_write_queue = RingBuffer::new(&mut uart_write_buf); + + // If the bootloader has booted the BLE chip, the BLE chip isn't aware of the name according to + // the fw. Send it over. + let device_name = bitbox02::memory::get_device_name(); + bitbox02::da14531::set_name(&device_name, &mut uart_write_queue); + + // This starts the async orientation screen workflow, which is processed by the loop below. + EXECUTOR + .spawn(async { + crate::workflow::orientation_screen::orientation_screen().await; + util::log!("ori chosen"); + ORIENTATION_CHOSEN.store(true, Ordering::Relaxed); + }) + .detach(); + + EXECUTOR + .spawn(async { + util::log::log!("hello world"); + }) + .detach(); + + let mut hww_data = None; + let mut hww_frame = [0u8; USB_REPORT_SIZE as usize]; + + #[cfg(feature = "app-u2f")] + bitbox02::u2f_packet::init(); + #[cfg(feature = "app-u2f")] + let mut u2f_data = None; + #[cfg(feature = "app-u2f")] + let mut u2f_frame = [0u8; USB_REPORT_SIZE as usize]; + + if !bitbox02::memory::ble_enabled() { + bitbox02::communication_mode::ble_disable(); + } + + loop { + // Do UART I/O + if bitbox02::communication_mode::ble_enabled() { + if uart_read_buf_len < uart_read_buf.len() as u16 || uart_write_queue.len() > 0 { + bitbox02::uart::poll( + Some(&mut uart_read_buf), + Some(&mut uart_read_buf_len), + Some(&mut uart_write_queue), + ) + } + } + + // Check if there is outgoing data + if hww_data.is_none() { + hww_data = bitbox02::queue::pull_hww(); + } + + // Generate u2f timeout packets + #[cfg(feature = "app-u2f")] + { + // Generate timeout packets + let mut timeout_cid = 0u32; + while bitbox02::u2f_packet::timeout_get(&mut timeout_cid) { + bitbox02::u2f_packet::timeout(timeout_cid); + } + if u2f_data.is_none() { + u2f_data = bitbox02::queue::pull_u2f(); + // If USB stack was locked and there is no more messages to send out, time to + // unlock it. + if u2f_data.is_none() && bitbox02::usb_processing::locked_u2f() { + bitbox02::usb_processing::unlock(); + } + } + } + + // Do USB Input + if hww_data.is_none() && bitbox02::hid_hww::read(&mut hww_frame) { + if bitbox02::usb_packet::process(&hww_frame) { + if bitbox02::communication_mode::ble_enabled() { + // Enqueue a power down command to the da14531 + bitbox02::da14531::power_down(&mut uart_write_queue); + // Flush out the power down command. This will be the last UART communication + // we do. + while uart_write_queue.len() > 0 { + bitbox02::uart::poll(None, None, Some(&mut uart_write_queue)); + } + bitbox02::communication_mode::ble_disable(); + } + } else { + log!("usb_packet_process: invalid"); + } + } + #[cfg(feature = "app-u2f")] + if u2f_data.is_none() && bitbox02::hid_u2f::read(&mut u2f_frame) { + bitbox02::u2f_packet::process(&u2f_frame); + } + + // Do UART Output + if bitbox02::communication_mode::ble_enabled() { + if let Some(frame) = bitbox02::da14531_protocol::poll( + &mut uart_read_buf, + &mut uart_read_buf_len, + &mut hww_data, + &mut uart_write_queue, + ) { + bitbox02::da14531_handler::handler(frame, &mut uart_write_queue); + } + } + + // Do USB Output + if let Some(data) = &mut hww_data + && !bitbox02::communication_mode::ble_enabled() + { + if bitbox02::hid_hww::write_poll(data) { + hww_data = None; + } + } + #[cfg(feature = "app-u2f")] + if let Some(data) = &mut u2f_data { + if bitbox02::hid_u2f::write_poll(data) { + u2f_data = None; + } + } + + /* First, process all the incoming USB traffic. */ + bitbox02::usb_processing::process_hww(); + #[cfg(feature = "app-u2f")] + bitbox02::usb_processing::process_u2f(); + + /* + * If USB has generated events at the application level, + * process them now. + */ + #[cfg(feature = "app-u2f")] + bitbox02::u2f::process(); + + bitbox02::screen::process(); + + /* And finally, run the high-level event processing. */ + #[cfg(feature = "app-u2f")] + unsafe { + crate::workflow::u2f_c_api::rust_workflow_spin() + } + crate::async_usb::spin(); + + // Run async exuecutor + EXECUTOR.try_tick(); + + if ORIENTATION_CHOSEN.swap(false, Ordering::Relaxed) { + util::log!("orientation chosen"); + // hww handler in usb_process must be setup before we can allow ble connections + if let Ok(bitbox02::memory::Platform::BitBox02Plus) = bitbox02::memory::get_platform() { + let (product, product_len) = bitbox02::platform::product(); + bitbox02::da14531_handler::set_product(product, product_len); + bitbox02::da14531::set_product(product, &mut uart_write_queue) + } + bitbox02::usb::start(); + } + } +} + +// +// C interface +// + +#[unsafe(no_mangle)] +#[cfg(not(feature = "c-unit-testing"))] +pub extern "C" fn rust_main_loop() -> ! { + main_loop() +} diff --git a/src/rust/bitbox02-rust/src/workflow.rs b/src/rust/bitbox02-rust/src/workflow.rs index 7da142b061..de77ba6816 100644 --- a/src/rust/bitbox02-rust/src/workflow.rs +++ b/src/rust/bitbox02-rust/src/workflow.rs @@ -27,6 +27,8 @@ pub mod testing; pub mod transaction; pub mod trinary_choice; pub mod trinary_input_string; +#[cfg(feature = "app-u2f")] +pub mod u2f_c_api; pub mod unlock; pub mod unlock_animation; pub mod verify_message; diff --git a/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs b/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs index 530190e95c..236e0ee385 100644 --- a/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs +++ b/src/rust/bitbox02-rust/src/workflow/orientation_screen.rs @@ -13,18 +13,8 @@ // limitations under the License. use bitbox02::delay::delay_for; +use bitbox02::ui::choose_orientation; use core::time::Duration; -use util::bb02_async::option; - -pub async fn choose_orientation() -> bool { - let result = core::cell::RefCell::new(None as Option); - let mut orientation_arrows = bitbox02::ui::orientation_arrows(|upside_down| { - *result.borrow_mut() = Some(upside_down); - }); - orientation_arrows.screen_stack_push(); - // Wait until orientation has been chosen - option(&result).await -} pub async fn orientation_screen() -> bool { let upside_down = choose_orientation().await; diff --git a/src/rust/bitbox02-rust-c/src/workflow.rs b/src/rust/bitbox02-rust/src/workflow/u2f_c_api.rs similarity index 74% rename from src/rust/bitbox02-rust-c/src/workflow.rs rename to src/rust/bitbox02-rust/src/workflow/u2f_c_api.rs index 22952dae54..e79b4fad82 100644 --- a/src/rust/bitbox02-rust-c/src/workflow.rs +++ b/src/rust/bitbox02-rust/src/workflow/u2f_c_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Shift Cryptosecurity AG +// Copyright 2025 Shift Crypto AG // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ extern crate alloc; +use crate::workflow::confirm; use alloc::boxed::Box; use alloc::string::String; -use bitbox02_rust::workflow::{confirm, orientation_screen}; use core::task::Poll; use util::bb02_async::{Task, spin}; @@ -41,16 +41,13 @@ static mut CONFIRM_TITLE: Option = None; static mut CONFIRM_BODY: Option = None; static mut CONFIRM_PARAMS: Option = None; static mut CONFIRM_STATE: TaskState<'static, Result<(), confirm::UserAbort>> = TaskState::Nothing; -static mut BITBOX02_HAL: bitbox02_rust::hal::BitBox02Hal = bitbox02_rust::hal::BitBox02Hal::new(); - -static mut ORIENTATION_SCREEN_STATE: TaskState<'static, bool> = TaskState::Nothing; +static mut BITBOX02_HAL: crate::hal::BitBox02Hal = crate::hal::BitBox02Hal::new(); #[unsafe(no_mangle)] pub unsafe extern "C" fn rust_workflow_spawn_unlock() { unsafe { - UNLOCK_STATE = TaskState::Running(Box::pin(bitbox02_rust::workflow::unlock::unlock( - &mut BITBOX02_HAL, - ))); + UNLOCK_STATE = + TaskState::Running(Box::pin(crate::workflow::unlock::unlock(&mut BITBOX02_HAL))); } } @@ -74,14 +71,6 @@ pub unsafe extern "C" fn rust_workflow_spawn_confirm( } } -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_spawn_orientation_screen() { - unsafe { - ORIENTATION_SCREEN_STATE = - TaskState::Running(Box::pin(orientation_screen::orientation_screen())); - } -} - #[unsafe(no_mangle)] pub unsafe extern "C" fn rust_workflow_spin() { unsafe { @@ -103,15 +92,6 @@ pub unsafe extern "C" fn rust_workflow_spin() { } _ => (), } - match ORIENTATION_SCREEN_STATE { - TaskState::Running(ref mut task) => { - let result = spin(task); - if let Poll::Ready(result) = result { - ORIENTATION_SCREEN_STATE = TaskState::ResultAvailable(result); - } - } - _ => (), - } } } @@ -151,21 +131,6 @@ pub unsafe extern "C" fn rust_workflow_confirm_poll(result_out: &mut bool) -> bo } } -/// Returns true if there was a result. -#[unsafe(no_mangle)] -pub unsafe extern "C" fn rust_workflow_orientation_screen_poll(result_out: &mut bool) -> bool { - unsafe { - match ORIENTATION_SCREEN_STATE { - TaskState::ResultAvailable(result) => { - ORIENTATION_SCREEN_STATE = TaskState::Nothing; - *result_out = result; - true - } - _ => false, - } - } -} - #[unsafe(no_mangle)] pub unsafe extern "C" fn rust_workflow_abort_current() { unsafe { @@ -175,7 +140,5 @@ pub unsafe extern "C" fn rust_workflow_abort_current() { CONFIRM_BODY = None; CONFIRM_PARAMS = None; CONFIRM_STATE = TaskState::Nothing; - - ORIENTATION_SCREEN_STATE = TaskState::Nothing; } } diff --git a/src/rust/bitbox02-sys/build.rs b/src/rust/bitbox02-sys/build.rs index 1f0835989f..97cf5c2cf7 100644 --- a/src/rust/bitbox02-sys/build.rs +++ b/src/rust/bitbox02-sys/build.rs @@ -21,6 +21,8 @@ const ALLOWLIST_VARS: &[&str] = &[ "BASE58_CHECKSUM_LEN", "BIP32_SERIALIZED_LEN", "BIP39_WORDLIST_LEN", + "da14531_handler_current_product_len", + "da14531_handler_current_product", "EC_PUBLIC_KEY_LEN", "font_font_a_11X10", "font_font_a_9X9", @@ -46,6 +48,8 @@ const ALLOWLIST_VARS: &[&str] = &[ "secfalse_u8", "SD_MAX_FILE_SIZE", "SLIDER_POSITION_TWO_THIRD", + "USART_0_BUFFER_SIZE", + "USB_REPORT_SIZE", "XPUB_ENCODED_LEN", ]; @@ -54,9 +58,11 @@ const ALLOWLIST_TYPES: &[&str] = &[ "buffer_t", "component_t", "confirm_params_t", + "da14531_protocol_frame", "delay_t", "event_slider_data_t", "event_types", + "ringbuffer", "secp256k1_ecdsa_s2c_opening", "secp256k1_ecdsa_signature", "secp256k1_pubkey", @@ -66,19 +72,26 @@ const ALLOWLIST_TYPES: &[&str] = &[ "upside_down_t", ]; +const OPAQUE_TYPES: &[&str] = &["da14531_protocol_frame"]; + const ALLOWLIST_FNS: &[&str] = &[ "bip32_derive_xpub", - "bitbox02_smarteeprom_init", - "bitbox02_smarteeprom_get_unlock_attempts", - "bitbox02_smarteeprom_increment_unlock_attempts", - "bitbox02_smarteeprom_reset_unlock_attempts", "bitbox_secp256k1_dleq_prove", "bitbox_secp256k1_dleq_verify", + "bitbox02_smarteeprom_get_unlock_attempts", + "bitbox02_smarteeprom_increment_unlock_attempts", "bitbox02_smarteeprom_init", + "bitbox02_smarteeprom_reset_unlock_attempts", + "communication_mode_ble_disable", "communication_mode_ble_enabled", "confirm_create", "confirm_transaction_address_create", "confirm_transaction_fee_create", + "da14531_handler", + "da14531_power_down", + "da14531_protocol_poll", + "da14531_set_name", + "da14531_set_product", "delay_cancel", "delay_init_ms", "delay_is_elapsed", @@ -90,6 +103,10 @@ const ALLOWLIST_FNS: &[&str] = &[ "fake_securechip_event_counter_reset", "fake_securechip_event_counter", "gmtime", + "hid_hww_read", + "hid_hww_write_poll", + "hid_u2f_read", + "hid_u2f_write_poll", "hww_setup", "keystore_bip39_mnemonic_to_seed", "keystore_get_bip39_word", @@ -136,17 +153,21 @@ const ALLOWLIST_FNS: &[&str] = &[ "memory_spi_get_active_ble_firmware_version", "menu_create", "orientation_arrows_create", + "platform_product", "printf", "progress_create", "progress_set", "queue_hww_queue", "queue_pull", + "queue_u2f_queue", "random_32_bytes_mcu", "random_32_bytes", "random_fake_reset", "reboot_to_bootloader", "reset_ble", "reset_reset", + "ringbuffer_init", + "ringbuffer_num", "screen_clear", "screen_init", "screen_print_debug", @@ -184,6 +205,12 @@ const ALLOWLIST_FNS: &[&str] = &[ "trinary_choice_create", "trinary_input_string_create", "trinary_input_string_set_input", + "u2f_packet_init", + "u2f_packet_process", + "u2f_packet_timeout_get", + "u2f_packet_timeout", + "u2f_process", + "uart_poll", "UG_ClearBuffer", "UG_FontSelect", "UG_PutString", @@ -195,8 +222,12 @@ const ALLOWLIST_FNS: &[&str] = &[ "usb_packet_process", "usb_processing_hww", "usb_processing_init", + "usb_processing_locked", "usb_processing_process", "usb_processing_timeout_reset", + "usb_processing_u2f", + "usb_processing_unlock", + "usb_start", "util_format_datetime", ]; @@ -417,6 +448,7 @@ pub fn main() -> Result<(), &'static str> { .args(ALLOWLIST_TYPES.iter().flat_map(|s| ["--allowlist-type", s])) .args(ALLOWLIST_VARS.iter().flat_map(|s| ["--allowlist-var", s])) .args(RUSTIFIED_ENUMS.iter().flat_map(|s| ["--rustified-enum", s])) + .args(OPAQUE_TYPES.iter().flat_map(|s| ["--opaque-type", s])) .arg("wrapper.h") .arg("--") .args(&definitions) diff --git a/src/rust/bitbox02-sys/wrapper.h b/src/rust/bitbox02-sys/wrapper.h index 6f6aaef58e..037fb9644d 100644 --- a/src/rust/bitbox02-sys/wrapper.h +++ b/src/rust/bitbox02-sys/wrapper.h @@ -13,7 +13,11 @@ // limitations under the License. #include +#include +#include +#include #include +#include #include #include #include @@ -21,6 +25,8 @@ #include #include #include +#include +#include #include #include #include @@ -30,6 +36,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -51,18 +60,19 @@ #include #include #include +#include +#include #include +#include #include #include +#include #if defined(TESTING) #include - #include #include #include #include - #include - #include #endif #if !defined(TESTING) diff --git a/src/rust/bitbox02/src/communication_mode.rs b/src/rust/bitbox02/src/communication_mode.rs new file mode 100644 index 0000000000..5a0767b344 --- /dev/null +++ b/src/rust/bitbox02/src/communication_mode.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn ble_disable() { + unsafe { + bitbox02_sys::communication_mode_ble_disable(); + } +} + +pub fn ble_enabled() -> bool { + unsafe { bitbox02_sys::communication_mode_ble_enabled() } +} diff --git a/src/rust/bitbox02/src/da14531.rs b/src/rust/bitbox02/src/da14531.rs new file mode 100644 index 0000000000..f394c37b24 --- /dev/null +++ b/src/rust/bitbox02/src/da14531.rs @@ -0,0 +1,36 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; + +pub fn set_name(name: &str, queue: &mut RingBuffer) { + let name = crate::util::str_to_cstr_vec(name).unwrap(); + unsafe { bitbox02_sys::da14531_set_name(name.as_ptr(), &mut queue.inner as *mut _) }; +} + +pub fn set_product(product: &str, queue: &mut RingBuffer) { + unsafe { + bitbox02_sys::da14531_set_product( + product.as_bytes().as_ptr() as *const _, + product.len() as u16, + &mut queue.inner, + ) + } +} + +pub fn power_down(queue: &mut RingBuffer) { + unsafe { + bitbox02_sys::da14531_power_down(&mut queue.inner as *mut _); + } +} diff --git a/src/rust/bitbox02/src/da14531_handler.rs b/src/rust/bitbox02/src/da14531_handler.rs new file mode 100644 index 0000000000..2a5fd4826c --- /dev/null +++ b/src/rust/bitbox02/src/da14531_handler.rs @@ -0,0 +1,29 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; +use bitbox02_sys::da14531_protocol_frame; + +pub fn handler(frame: &'static da14531_protocol_frame, uart_write_queue: &mut RingBuffer) { + unsafe { + bitbox02_sys::da14531_handler(frame as *const _, &mut uart_write_queue.inner); + } +} + +pub fn set_product(product: &'static str, len: u16) { + unsafe { + bitbox02_sys::da14531_handler_current_product = product.as_bytes().as_ptr(); + bitbox02_sys::da14531_handler_current_product_len = len; + } +} diff --git a/src/rust/bitbox02/src/da14531_protocol.rs b/src/rust/bitbox02/src/da14531_protocol.rs new file mode 100644 index 0000000000..6a3cc92645 --- /dev/null +++ b/src/rust/bitbox02/src/da14531_protocol.rs @@ -0,0 +1,45 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; +pub use bitbox02_sys::da14531_protocol_frame; + +pub fn poll( + uart_read_buf: &mut [u8], + uart_read_buf_len: &mut u16, + hww_data: &mut Option<[u8; 64]>, + uart_write_queue: &mut RingBuffer, +) -> Option<&'static da14531_protocol_frame> { + let mut data: *const u8 = if let Some(data) = (*hww_data).as_ref() { + data.as_ptr() as *const _ + } else { + core::ptr::null() + }; + let frame = unsafe { + bitbox02_sys::da14531_protocol_poll( + uart_read_buf.as_mut_ptr() as *mut _, + uart_read_buf_len as *mut _, + &mut data as *mut _, + &mut uart_write_queue.inner as *mut _, + ) + }; + if data.is_null() { + *hww_data = None; + } + if frame.is_null() { + None + } else { + Some(unsafe { &*frame }) + } +} diff --git a/src/rust/bitbox02/src/delay.rs b/src/rust/bitbox02/src/delay.rs index a31d3af7eb..30f93a130c 100644 --- a/src/rust/bitbox02/src/delay.rs +++ b/src/rust/bitbox02/src/delay.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use core::pin::Pin; -use core::task::{Context, Poll}; +use alloc::sync::Arc; +use core::task::{Poll, Waker}; use core::time::Duration; #[cfg(not(any( @@ -21,86 +21,52 @@ use core::time::Duration; feature = "c-unit-testing", feature = "simulator-graphical" )))] -struct DelayInner { - bitbox02_delay: bitbox02_sys::delay_t, -} - -#[cfg(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" -))] -struct DelayInner { - thread_handle: Option>, - done: std::sync::Arc, -} - -pub struct Delay { - inner: DelayInner, -} - -impl Delay { - #[cfg(not(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" - )))] - pub fn from_ms(ms: u32) -> Delay { - let mut delay = Delay { - inner: DelayInner { - bitbox02_delay: bitbox02_sys::delay_t { id: usize::MAX }, - }, - }; - unsafe { bitbox02_sys::delay_init_ms(&mut delay.inner.bitbox02_delay as *mut _, ms) } - delay +pub async fn delay_for(duration: Duration) { + use core::cell::RefCell; + use core::ffi::c_void; + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option<()>, + bitbox02_delay: bitbox02_sys::delay_t, } - #[cfg(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" - ))] - pub fn from_ms(ms: u32) -> Delay { - let (thread_handle, done) = if ms == 0 { - ( - None, - std::sync::Arc::new(std::sync::atomic::AtomicBool::new(true)), - ) - } else { - let done = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)); - let handle = Some(std::thread::spawn({ - let done = std::sync::Arc::clone(&done); - move || { - std::thread::sleep(std::time::Duration::from_millis(ms as u64)); - (*done).store(true, std::sync::atomic::Ordering::Relaxed); - // TODO: Waker.wake, once we have an async runtime - } - })); - (handle, done) - }; - Delay { - inner: DelayInner { - thread_handle, - done, - }, + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + bitbox02_delay: bitbox02_sys::delay_t { id: 0 }, + })); + unsafe extern "C" fn callback(user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(()); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); } } -} + unsafe { + bitbox02_sys::delay_init_ms( + &mut shared_state.borrow_mut().bitbox02_delay as *mut _, + duration.as_millis() as u32, + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, + ) + } -#[cfg(not(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" -)))] -impl Future for Delay { - type Output = (); + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - if unsafe { bitbox02_sys::delay_is_elapsed(&self.inner.bitbox02_delay as *const _) } { - Poll::Ready(()) - } else { - Poll::Pending + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } } - } + }) + .await } #[cfg(any( @@ -108,31 +74,55 @@ impl Future for Delay { feature = "c-unit-testing", feature = "simulator-graphical" ))] -impl Future for Delay { - type Output = (); - fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - if self.inner.done.load(std::sync::atomic::Ordering::Relaxed) { - if let Some(th) = self.inner.thread_handle.take() { - th.join().unwrap(); - } - Poll::Ready(()) - } else { - Poll::Pending - } +pub async fn delay_for(duration: Duration) { + use std::sync::Mutex; + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option<()>, + handle: Option>, } -} -#[cfg(not(any( - feature = "testing", - feature = "c-unit-testing", - feature = "simulator-graphical" -)))] -impl Drop for Delay { - fn drop(&mut self) { - unsafe { bitbox02_sys::delay_cancel(&self.inner.bitbox02_delay as *const _) } + if duration == Duration::ZERO { + return; } -} -pub fn delay_for(duration: Duration) -> Delay { - Delay::from_ms(duration.as_millis() as u32) + let shared_state = Arc::new(Mutex::new(SharedState { + waker: None, + result: None, + handle: None, + })); + + let handle = std::thread::spawn({ + let shared_state = Arc::clone(&shared_state); + move || { + std::thread::sleep(duration); + let mut shared_state = shared_state.lock().unwrap(); + shared_state.result = Some(()); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref() + } + } + }); + + shared_state.lock().unwrap().handle = Some(handle); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.lock().unwrap(); + + if let Some(result) = shared_state.result { + if let Some(handle) = shared_state.handle.take() { + handle.join().unwrap(); + } + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } diff --git a/src/rust/bitbox02/src/hid_hww.rs b/src/rust/bitbox02/src/hid_hww.rs new file mode 100644 index 0000000000..6060fd4519 --- /dev/null +++ b/src/rust/bitbox02/src/hid_hww.rs @@ -0,0 +1,21 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn write_poll(buf: &[u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_hww_write_poll(buf.as_ptr() as *const _) } +} + +pub fn read(buf: &mut [u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_hww_read(buf as *mut _) } +} diff --git a/src/rust/bitbox02/src/hid_u2f.rs b/src/rust/bitbox02/src/hid_u2f.rs new file mode 100644 index 0000000000..72eb2c8da0 --- /dev/null +++ b/src/rust/bitbox02/src/hid_u2f.rs @@ -0,0 +1,21 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn write_poll(buf: &[u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_u2f_write_poll(buf.as_ptr() as *const _) } +} + +pub fn read(buf: &mut [u8; 64]) -> bool { + unsafe { bitbox02_sys::hid_u2f_read(buf as *mut _) } +} diff --git a/src/rust/bitbox02/src/lib.rs b/src/rust/bitbox02/src/lib.rs index 56efecc2e1..49038fb2cc 100644 --- a/src/rust/bitbox02/src/lib.rs +++ b/src/rust/bitbox02/src/lib.rs @@ -34,17 +34,22 @@ use alloc::string::String; #[cfg(any(feature = "testing", feature = "simulator-graphical"))] pub mod testing; +pub mod communication_mode; +pub mod da14531; +pub mod da14531_handler; +pub mod da14531_protocol; pub mod delay; #[cfg(feature = "simulator-graphical")] pub mod event; -#[cfg(feature = "simulator-graphical")] +pub mod hid_hww; +pub mod hid_u2f; pub mod hww; pub mod keystore; pub mod memory; -#[cfg(feature = "simulator-graphical")] +pub mod platform; pub mod queue; pub mod random; -#[cfg(feature = "simulator-graphical")] +pub mod ringbuffer; pub mod screen; pub mod screen_saver; pub mod sd; @@ -53,8 +58,13 @@ pub mod securechip; #[cfg(feature = "simulator-graphical")] pub mod smarteeprom; pub mod spi_mem; +#[cfg(feature = "app-u2f")] +pub mod u2f; +#[cfg(feature = "app-u2f")] +pub mod u2f_packet; +pub mod uart; pub mod ui; -#[cfg(feature = "simulator-graphical")] +pub mod usb; pub mod usb_packet; pub mod usb_processing; diff --git a/src/rust/bitbox02/src/platform.rs b/src/rust/bitbox02/src/platform.rs new file mode 100644 index 0000000000..4d82fbdeb1 --- /dev/null +++ b/src/rust/bitbox02/src/platform.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn product() -> (&'static str, u16) { + unsafe { + let mut len = 0; + let s = bitbox02_sys::platform_product(&mut len as *mut _) as *const u8; + let s = core::slice::from_raw_parts(s, len); + let s = str::from_utf8_unchecked(s); + (s as _, len as u16) + } +} diff --git a/src/rust/bitbox02/src/queue.rs b/src/rust/bitbox02/src/queue.rs index d41dc3e3b1..abac7c6ac9 100644 --- a/src/rust/bitbox02/src/queue.rs +++ b/src/rust/bitbox02/src/queue.rs @@ -20,3 +20,14 @@ pub fn pull_hww() -> Option<[u8; 64]> { unsafe { core::ptr::copy_nonoverlapping(hww_data, data.as_mut_ptr(), 64) } Some(data) } + +#[cfg(feature = "app-u2f")] +pub fn pull_u2f() -> Option<[u8; 64]> { + let u2f_data = unsafe { bitbox02_sys::queue_pull(bitbox02_sys::queue_u2f_queue()) }; + if u2f_data.is_null() { + return None; + } + let mut data: [u8; 64] = [0; 64]; + unsafe { core::ptr::copy_nonoverlapping(u2f_data, data.as_mut_ptr(), 64) } + Some(data) +} diff --git a/src/rust/bitbox02/src/ringbuffer.rs b/src/rust/bitbox02/src/ringbuffer.rs new file mode 100644 index 0000000000..b9d816f526 --- /dev/null +++ b/src/rust/bitbox02/src/ringbuffer.rs @@ -0,0 +1,48 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use bitbox02_sys::{ringbuffer, ringbuffer_init}; + +/// A wrapper around ASF4 `ringbuffer` type +pub struct RingBuffer<'a> { + // For now we don't use `buf`, but when we implement push/pull we will need to. + _buf: &'a mut [u8], + pub inner: ringbuffer, +} + +impl<'a> RingBuffer<'a> { + /// `buf` length must be a power of 2 + pub fn new(buf: &'a mut [u8]) -> Self { + debug_assert!(buf.len().is_power_of_two()); + let mut inner = ringbuffer { + buf: core::ptr::null_mut(), + size: 0, + read_index: 0, + write_index: 0, + }; + unsafe { + ringbuffer_init( + &mut inner as *mut _, + buf as *mut _ as *mut _, + buf.len() as u32, + ); + }; + RingBuffer { _buf: buf, inner } + } + + /// Bytes currently used + pub fn len(&self) -> u32 { + unsafe { bitbox02_sys::ringbuffer_num(&self.inner as *const _) } + } +} diff --git a/src/rust/bitbox02/src/u2f.rs b/src/rust/bitbox02/src/u2f.rs new file mode 100644 index 0000000000..066635affe --- /dev/null +++ b/src/rust/bitbox02/src/u2f.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub fn process() { + unsafe { + bitbox02_sys::u2f_process(); + } +} diff --git a/src/rust/bitbox02/src/u2f_packet.rs b/src/rust/bitbox02/src/u2f_packet.rs new file mode 100644 index 0000000000..9c57807cf4 --- /dev/null +++ b/src/rust/bitbox02/src/u2f_packet.rs @@ -0,0 +1,33 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use bitbox02_sys::USB_FRAME; + +pub fn init() { + unsafe { + bitbox02_sys::u2f_packet_init(); + } +} + +pub fn timeout_get(cid: &mut u32) -> bool { + unsafe { bitbox02_sys::u2f_packet_timeout_get(cid as *mut _) } +} + +pub fn timeout(cid: u32) { + unsafe { bitbox02_sys::u2f_packet_timeout(cid) } +} + +pub fn process(packet: &[u8; 64]) -> bool { + unsafe { bitbox02_sys::u2f_packet_process(packet.as_ptr() as *const _) } +} diff --git a/src/rust/bitbox02/src/uart.rs b/src/rust/bitbox02/src/uart.rs new file mode 100644 index 0000000000..d89623e1bb --- /dev/null +++ b/src/rust/bitbox02/src/uart.rs @@ -0,0 +1,45 @@ +// Copyright 2025 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ringbuffer::RingBuffer; +pub use bitbox02_sys::USART_0_BUFFER_SIZE; + +pub fn poll( + uart_read_buf: Option<&mut [u8]>, + uart_read_buf_len: Option<&mut u16>, + uart_write_queue: Option<&mut RingBuffer>, +) { + let (uart_read_buf, cap) = if let Some(uart_read_buf) = uart_read_buf { + ( + uart_read_buf as *mut _ as *mut _, + uart_read_buf.len() as u16, + ) + } else { + (core::ptr::null_mut(), 0u16) + }; + let uart_read_buf_len = if let Some(len) = uart_read_buf_len { + len as *mut _ + } else { + core::ptr::null_mut() + }; + let uart_write_queue = if let Some(uart_write_queue) = uart_write_queue { + &mut uart_write_queue.inner as *mut _ + } else { + core::ptr::null_mut() + }; + + unsafe { + bitbox02_sys::uart_poll(uart_read_buf, cap, uart_read_buf_len, uart_write_queue); + } +} diff --git a/src/rust/bitbox02/src/ui/ui.rs b/src/rust/bitbox02/src/ui/ui.rs index 904b506fc7..acce83fb31 100644 --- a/src/rust/bitbox02/src/ui/ui.rs +++ b/src/rust/bitbox02/src/ui/ui.rs @@ -23,7 +23,11 @@ use core::ffi::{c_char, c_void}; extern crate alloc; use alloc::boxed::Box; use alloc::string::String; +use alloc::sync::Arc; use alloc::vec::Vec; +use core::cell::RefCell; +//use core::pin::Pin; +use core::task::{Poll, Waker}; use core::marker::PhantomData; @@ -498,30 +502,54 @@ where } } -pub fn orientation_arrows<'a, F>(on_done: F) -> Component<'a> -where - // Callback must outlive component. - F: FnMut(bool) + 'a, -{ - unsafe extern "C" fn c_on_done(upside_down: bool, param: *mut c_void) - where - F2: FnOnce(bool), - { - // The callback is dropped afterwards. This is safe because - // this C callback is guaranteed to be called only once. - let on_done = unsafe { Box::from_raw(param as *mut F2) }; - on_done(upside_down); +pub async fn choose_orientation() -> bool { + // Shared between the async context and the c callback + struct SharedState { + waker: Option, + result: Option, + } + let shared_state = Arc::new(RefCell::new(SharedState { + waker: None, + result: None, + })); + + unsafe extern "C" fn callback(upside_down: bool, user_data: *mut c_void) { + let shared_state: Arc> = unsafe { Arc::from_raw(user_data as *mut _) }; + let mut shared_state = shared_state.borrow_mut(); + shared_state.result = Some(upside_down); + if let Some(waker) = shared_state.waker.as_ref() { + waker.wake_by_ref(); + } } + let component = unsafe { bitbox02_sys::orientation_arrows_create( - Some(c_on_done::), - Box::into_raw(Box::new(on_done)) as *mut _, // passed to c_on_done as `param`. + Some(callback), + Arc::into_raw(Arc::clone(&shared_state)) as *mut _, // passed to callback as `user_data`. ) }; - Component { + + let mut component = Component { component, is_pushed: false, on_drop: None, _p: PhantomData, - } + }; + component.screen_stack_push(); + + core::future::poll_fn({ + let shared_state = Arc::clone(&shared_state); + move |cx| { + let mut shared_state = shared_state.borrow_mut(); + + if let Some(result) = shared_state.result { + Poll::Ready(result) + } else { + // Store the waker so the callback can wake up this task + shared_state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + }) + .await } diff --git a/src/rust/bitbox02/src/ui/ui_stub.rs b/src/rust/bitbox02/src/ui/ui_stub.rs index 5b409c62b6..852114cf69 100644 --- a/src/rust/bitbox02/src/ui/ui_stub.rs +++ b/src/rust/bitbox02/src/ui/ui_stub.rs @@ -148,14 +148,6 @@ where } } -pub fn orientation_arrows<'a, F>(on_done: F) -> Component<'a> -where - // Callback must outlive component. - F: FnOnce(bool) + 'a, -{ - on_done(false); - Component { - is_pushed: false, - _p: PhantomData, - } +pub async fn choose_orientation() -> bool { + false } diff --git a/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs b/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs index 69a4b22c7c..62e04e3448 100644 --- a/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs +++ b/src/rust/bitbox02/src/ui/ui_stub_c_unit_tests.rs @@ -191,14 +191,6 @@ where } } -pub fn orientation_arrows<'a, F>(on_done: F) -> Component<'a> -where - // Callback must outlive component. - F: FnOnce(bool) + 'a, -{ - on_done(false); - Component { - is_pushed: false, - _p: PhantomData, - } +pub async fn choose_orientation() -> bool { + false } diff --git a/src/firmware_main_loop.h b/src/rust/bitbox02/src/usb.rs similarity index 72% rename from src/firmware_main_loop.h rename to src/rust/bitbox02/src/usb.rs index 88272135c8..2bde58dadf 100644 --- a/src/firmware_main_loop.h +++ b/src/rust/bitbox02/src/usb.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Shift Cryptosecurity AG +// Copyright 2025 Shift Crypto AG // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,14 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef _FIRMWARE_MAIN_LOOP_H_ -#define _FIRMWARE_MAIN_LOOP_H_ +pub use bitbox02_sys::USB_REPORT_SIZE; -#include - -/** - * Runs the main UI of the bitbox. - */ -void firmware_main_loop(void); - -#endif +pub fn start() { + unsafe { + bitbox02_sys::usb_start(); + } +} diff --git a/src/rust/bitbox02/src/usb_processing.rs b/src/rust/bitbox02/src/usb_processing.rs index 0b52c04d0f..15f6e5f32f 100644 --- a/src/rust/bitbox02/src/usb_processing.rs +++ b/src/rust/bitbox02/src/usb_processing.rs @@ -24,7 +24,20 @@ pub fn init() { unsafe { bitbox02_sys::usb_processing_init() } } -#[cfg(feature = "simulator-graphical")] pub fn process_hww() { unsafe { bitbox02_sys::usb_processing_process(bitbox02_sys::usb_processing_hww()) } } + +#[cfg(feature = "app-u2f")] +pub fn process_u2f() { + unsafe { bitbox02_sys::usb_processing_process(bitbox02_sys::usb_processing_u2f()) } +} + +#[cfg(feature = "app-u2f")] +pub fn locked_u2f() -> bool { + unsafe { bitbox02_sys::usb_processing_locked(bitbox02_sys::usb_processing_u2f()) } +} + +pub fn unlock() { + unsafe { bitbox02_sys::usb_processing_unlock() } +} diff --git a/src/rust/util/src/lib.rs b/src/rust/util/src/lib.rs index 0c784b14ed..e2cfb15caf 100644 --- a/src/rust/util/src/lib.rs +++ b/src/rust/util/src/lib.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(not(test), no_std)] +#![cfg_attr(not(feature = "testing"), no_std)] pub mod ascii; pub mod bb02_async; pub mod bip32; diff --git a/src/rust/util/src/log.rs b/src/rust/util/src/log.rs index d50d6d905e..62af7f27b0 100644 --- a/src/rust/util/src/log.rs +++ b/src/rust/util/src/log.rs @@ -4,8 +4,21 @@ pub use ::rtt_target; /// Macro to log over RTT if `rtt` feature is set, otherwise noop #[macro_export] +#[cfg(all(feature = "rtt", target_os = "none"))] +macro_rules! log { + ($($arg:tt)*) => { {$crate::log::rtt_target::rprintln!($($arg)*) }}; +} + +#[macro_export] +#[cfg(all(not(feature = "rtt"), target_os = "none"))] +macro_rules! log { + ($($arg:tt)*) => {}; +} + +#[macro_export] +#[cfg(not(target_os = "none"))] macro_rules! log { - ($($arg:tt)*) => { #[cfg(feature="rtt")] {$crate::log::rtt_target::rprintln!($($arg)*) }}; + ($($arg:tt)*) => {std::println!($($arg)*) }; } // Make log macro usable in crate diff --git a/src/usb/class/hid/hww/hid_hww.c b/src/usb/class/hid/hww/hid_hww.c index 16ba518033..107224dbc0 100644 --- a/src/usb/class/hid/hww/hid_hww.c +++ b/src/usb/class/hid/hww/hid_hww.c @@ -57,6 +57,7 @@ static struct usbdc_handler _request_handler = {NULL, (FUNC_PTR)_request}; static volatile bool _send_busy = false; static volatile bool _has_data = false; static volatile bool _request_in_flight = false; +static uint8_t _write_buf[64] __attribute__((aligned(4))); // First time this function is called it initiates a transfer. Call it multiple times to poll for // completion. Once it returns true, there is data in the buffer. @@ -85,7 +86,8 @@ bool hid_hww_write_poll(const uint8_t* data) if (_send_busy) { return false; } - if (hid_write(&_func_data, data, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { + memcpy(_write_buf, data, USB_HID_REPORT_OUT_SIZE); + if (hid_write(&_func_data, _write_buf, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { _send_busy = true; return true; } diff --git a/src/usb/class/hid/u2f/hid_u2f.c b/src/usb/class/hid/u2f/hid_u2f.c index 0aa3ecb9f7..e96da48fad 100644 --- a/src/usb/class/hid/u2f/hid_u2f.c +++ b/src/usb/class/hid/u2f/hid_u2f.c @@ -45,6 +45,7 @@ static uint8_t _report_descriptor[] = {USB_DESC_U2F_REPORT}; static volatile bool _send_busy = false; static volatile bool _has_data = false; static volatile bool _request_in_flight = false; +static uint8_t _write_buf[64] __attribute__((aligned(4))); /** * The USB device core request handler callback for the U2F interface. @@ -87,7 +88,8 @@ bool hid_u2f_write_poll(const uint8_t* data) if (_send_busy) { return false; } - if (hid_write(&_func_data, data, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { + memcpy(_write_buf, data, USB_HID_REPORT_OUT_SIZE); + if (hid_write(&_func_data, _write_buf, USB_HID_REPORT_OUT_SIZE) == ERR_NONE) { _send_busy = true; return true; } diff --git a/test/simulator-graphical/Cargo.lock b/test/simulator-graphical/Cargo.lock index 492cf0afc4..df9c028158 100644 --- a/test/simulator-graphical/Cargo.lock +++ b/test/simulator-graphical/Cargo.lock @@ -183,6 +183,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "175571dd1d178ced59193a6fc02dde1b972eb0bc56c892cde9beeceac5bf0f6b" +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "atomic-waker" version = "1.1.2" @@ -297,6 +303,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bitbox02-executor" +version = "0.1.0" +dependencies = [ + "async-task", + "concurrent-queue", + "critical-section", + "futures-lite", + "pin-project-lite", +] + [[package]] name = "bitbox02-noise" version = "0.1.0" @@ -315,6 +332,7 @@ dependencies = [ "bip39", "bitbox-aes", "bitbox02", + "bitbox02-executor", "bitbox02-noise", "bitcoin", "bitcoin_hashes", diff --git a/test/simulator-graphical/src/main.rs b/test/simulator-graphical/src/main.rs index 6f0bd5e8c3..0f659e7bb4 100644 --- a/test/simulator-graphical/src/main.rs +++ b/test/simulator-graphical/src/main.rs @@ -51,6 +51,9 @@ use tracing_subscriber::{EnvFilter, filter::LevelFilter, fmt, prelude::*}; use bitbox02::ui::ugui::UG_COLOR; +// Explicitly link library +use bitbox02_rust_c as _; + static BG: &[u8; 325362] = include_bytes!("../bg.png"); const MARGIN: usize = 20; @@ -669,7 +672,6 @@ impl ApplicationHandler for App { } } // Business logic - unsafe { bitbox02_rust_c::workflow::rust_workflow_spin() } bitbox02_rust::async_usb::spin(); bitbox02::usb_processing::process_hww(); bitbox02::screen::process();