From f25cd267ac0bb9f313b75138a8cf24aae408b83d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 18 Apr 2021 22:39:34 +0900 Subject: [PATCH 001/110] Replace vec-arena with slab --- Cargo.toml | 2 +- src/lib.rs | 32 +++++++++++++++++++++----------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2c58175..0735ac3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ concurrent-queue = "1.2.2" fastrand = "1.3.4" futures-lite = "1.11.0" once_cell = "1.4.1" -vec-arena = "1.0.0" +slab = "0.4.2" [dev-dependencies] async-channel = "1.4.1" diff --git a/src/lib.rs b/src/lib.rs index a43a498..72bcc1e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -31,7 +31,7 @@ use std::task::{Poll, Waker}; use async_task::Runnable; use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; -use vec_arena::Arena; +use slab::Slab; #[doc(no_inline)] pub use async_task::Task; @@ -131,10 +131,16 @@ impl<'a> Executor<'a> { let mut active = self.state().active.lock().unwrap(); // Remove the task from the set of active tasks when the future finishes. - let index = active.next_vacant(); + let index = active.vacant_entry().key(); let state = self.state().clone(); let future = async move { - let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().remove(index))); + let _guard = CallOnDrop(move || { + // TODO: use try_remove once https://github.com/tokio-rs/slab/pull/89 merged + let mut active = state.active.lock().unwrap(); + if active.contains(index) { + drop(active.remove(index)); + } + }); future.await }; @@ -257,10 +263,8 @@ impl Drop for Executor<'_> { fn drop(&mut self) { if let Some(state) = self.state.get() { let mut active = state.active.lock().unwrap(); - for i in 0..active.capacity() { - if let Some(w) = active.remove(i) { - w.wake(); - } + for w in active.drain() { + w.wake(); } drop(active); @@ -359,10 +363,16 @@ impl<'a> LocalExecutor<'a> { let mut active = self.inner().state().active.lock().unwrap(); // Remove the task from the set of active tasks when the future finishes. - let index = active.next_vacant(); + let index = active.vacant_entry().key(); let state = self.inner().state().clone(); let future = async move { - let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().remove(index))); + let _guard = CallOnDrop(move || { + // TODO: use try_remove once https://github.com/tokio-rs/slab/pull/89 merged + let mut active = state.active.lock().unwrap(); + if active.contains(index) { + drop(active.remove(index)); + } + }); future.await }; @@ -475,7 +485,7 @@ struct State { sleepers: Mutex, /// Currently active tasks. - active: Mutex>, + active: Mutex>, } impl State { @@ -490,7 +500,7 @@ impl State { wakers: Vec::new(), free_ids: Vec::new(), }), - active: Mutex::new(Arena::new()), + active: Mutex::new(Slab::new()), } } From edf0296f59f08a16868d4ba712240e3481a84c91 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 24 Apr 2021 17:56:59 +0900 Subject: [PATCH 002/110] Bump to v1.4.1 --- CHANGELOG.md | 4 ++++ Cargo.toml | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3098544..fda0f60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.4.1 + +- Remove dependency on deprecated `vec-arena`. (#23) + # Version 1.4.0 - Add `Executor::is_empty()` and `LocalExecutor::is_empty()`. diff --git a/Cargo.toml b/Cargo.toml index 0735ac3..f4c4ca2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,9 @@ [package] name = "async-executor" -version = "1.4.0" +# When publishing a new version: +# - Update CHANGELOG.md +# - Create "v1.x.y" git tag +version = "1.4.1" authors = ["Stjepan Glavina "] edition = "2018" description = "Async executor" From b9ac443e56eb954a11442be6b4f401939744d760 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Thu, 30 Dec 2021 09:36:58 +0900 Subject: [PATCH 003/110] Update slab to 0.4.4 --- Cargo.toml | 2 +- src/lib.rs | 16 ++-------------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f4c4ca2..a8cdcb6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ concurrent-queue = "1.2.2" fastrand = "1.3.4" futures-lite = "1.11.0" once_cell = "1.4.1" -slab = "0.4.2" +slab = "0.4.4" [dev-dependencies] async-channel = "1.4.1" diff --git a/src/lib.rs b/src/lib.rs index 72bcc1e..5ee05d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -134,13 +134,7 @@ impl<'a> Executor<'a> { let index = active.vacant_entry().key(); let state = self.state().clone(); let future = async move { - let _guard = CallOnDrop(move || { - // TODO: use try_remove once https://github.com/tokio-rs/slab/pull/89 merged - let mut active = state.active.lock().unwrap(); - if active.contains(index) { - drop(active.remove(index)); - } - }); + let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index))); future.await }; @@ -366,13 +360,7 @@ impl<'a> LocalExecutor<'a> { let index = active.vacant_entry().key(); let state = self.inner().state().clone(); let future = async move { - let _guard = CallOnDrop(move || { - // TODO: use try_remove once https://github.com/tokio-rs/slab/pull/89 merged - let mut active = state.active.lock().unwrap(); - if active.contains(index) { - drop(active.remove(index)); - } - }); + let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index))); future.await }; From 2341801cd051af275ff31149d8d60550b91133f5 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Thu, 30 Dec 2021 09:38:02 +0900 Subject: [PATCH 004/110] Fix clippy::redundant_closure warning --- src/lib.rs | 2 +- tests/drop.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 5ee05d0..cf38665 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -447,7 +447,7 @@ impl<'a> LocalExecutor<'a> { /// Returns a reference to the inner executor. fn inner(&self) -> &Executor<'a> { - self.inner.get_or_init(|| Executor::new()) + self.inner.get_or_init(Executor::new) } } diff --git a/tests/drop.rs b/tests/drop.rs index 7293cae..dd20add 100644 --- a/tests/drop.rs +++ b/tests/drop.rs @@ -11,7 +11,7 @@ use once_cell::sync::Lazy; #[test] fn executor_cancels_everything() { static DROP: AtomicUsize = AtomicUsize::new(0); - static WAKER: Lazy>> = Lazy::new(|| Default::default()); + static WAKER: Lazy>> = Lazy::new(Default::default); let ex = Executor::new(); @@ -41,7 +41,7 @@ fn executor_cancels_everything() { #[test] fn leaked_executor_leaks_everything() { static DROP: AtomicUsize = AtomicUsize::new(0); - static WAKER: Lazy>> = Lazy::new(|| Default::default()); + static WAKER: Lazy>> = Lazy::new(Default::default); let ex = Executor::new(); From ee7bd4d2aff3416300f06c07c5aa5b73a72e7cff Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 8 Jan 2022 21:33:35 +0900 Subject: [PATCH 005/110] Clean up CI config --- .clippy.toml | 1 + .github/workflows/build-and-test.yaml | 43 ---------------- .github/workflows/ci.yml | 72 +++++++++++++++++++++++++++ .github/workflows/lint.yaml | 23 --------- .github/workflows/security.yaml | 17 ------- Cargo.toml | 2 + 6 files changed, 75 insertions(+), 83 deletions(-) create mode 100644 .clippy.toml delete mode 100644 .github/workflows/build-and-test.yaml create mode 100644 .github/workflows/ci.yml delete mode 100644 .github/workflows/lint.yaml delete mode 100644 .github/workflows/security.yaml diff --git a/.clippy.toml b/.clippy.toml new file mode 100644 index 0000000..0f404fa --- /dev/null +++ b/.clippy.toml @@ -0,0 +1 @@ +msrv = "1.40" diff --git a/.github/workflows/build-and-test.yaml b/.github/workflows/build-and-test.yaml deleted file mode 100644 index 00e00cc..0000000 --- a/.github/workflows/build-and-test.yaml +++ /dev/null @@ -1,43 +0,0 @@ -name: Build and test - -on: - push: - branches: - - master - pull_request: - -jobs: - build_and_test: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest] - rust: [nightly, beta, stable] - steps: - - uses: actions/checkout@v2 - - - name: Install latest ${{ matrix.rust }} - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.rust }} - profile: minimal - override: true - - - name: Run cargo check - uses: actions-rs/cargo@v1 - with: - command: check - args: --all --bins --examples --tests --all-features - - - name: Run cargo check (without dev-dependencies to catch missing feature flags) - if: startsWith(matrix.rust, 'nightly') - uses: actions-rs/cargo@v1 - with: - command: check - args: -Z features=dev_dep - - - name: Run cargo test - uses: actions-rs/cargo@v1 - with: - command: test diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..e40fa2d --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,72 @@ +name: CI + +on: + pull_request: + push: + branches: + - master + schedule: + - cron: '0 2 * * *' + +env: + RUSTFLAGS: -D warnings + RUST_BACKTRACE: 1 + +jobs: + test: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + rust: [nightly, beta, stable] + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} + - run: cargo build --all --all-features --all-targets + if: startsWith(matrix.rust, 'nightly') + - name: Run cargo check (without dev-dependencies to catch missing feature flags) + if: startsWith(matrix.rust, 'nightly') + run: cargo check -Z features=dev_dep + - run: cargo test + + msrv: + runs-on: ubuntu-latest + strategy: + matrix: + # When updating this, the reminder to update the minimum supported + # Rust version in Cargo.toml and .clippy.toml. + rust: ['1.40'] + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} + - run: cargo build + + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update stable + - uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: --all-features -- -W clippy::all + + fmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update stable + - run: cargo fmt --all -- --check + + security_audit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/audit-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml deleted file mode 100644 index b6017f1..0000000 --- a/.github/workflows/lint.yaml +++ /dev/null @@ -1,23 +0,0 @@ -name: Lint - -on: - push: - branches: - - master - pull_request: - -jobs: - clippy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - profile: minimal - components: clippy - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features -- -W clippy::all diff --git a/.github/workflows/security.yaml b/.github/workflows/security.yaml deleted file mode 100644 index c4f7947..0000000 --- a/.github/workflows/security.yaml +++ /dev/null @@ -1,17 +0,0 @@ -name: Security audit - -on: - push: - branches: - - master - pull_request: - -jobs: - security_audit: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - uses: actions-rs/audit-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/Cargo.toml b/Cargo.toml index a8cdcb6..af956e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ name = "async-executor" version = "1.4.1" authors = ["Stjepan Glavina "] edition = "2018" +rust-version = "1.40" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" @@ -13,6 +14,7 @@ homepage = "https://github.com/smol-rs/async-executor" documentation = "https://docs.rs/async-executor" keywords = ["asynchronous", "executor", "single", "multi", "spawn"] categories = ["asynchronous", "concurrency"] +exclude = ["/.*"] [dependencies] async-task = "4.0.0" From 367095cdc5b47c88663e9d060ce0404f1b28c0f3 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 8 Jan 2022 21:33:51 +0900 Subject: [PATCH 006/110] Create GitHub release automatically --- .github/workflows/release.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..d1e7c66 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,26 @@ +name: Release + +on: + push: + tags: + - v[0-9]+.* + +env: + RUSTFLAGS: -D warnings + RUST_BACKTRACE: 1 + +jobs: + create-release: + if: github.repository_owner == 'smol-rs' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install Rust + run: rustup update stable + - run: cargo package + - uses: taiki-e/create-gh-release-action@v1 + with: + changelog: CHANGELOG.md + branch: master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From f190408a6fee4177f3002516f93f5b1aa28396e5 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 9 Jan 2022 01:25:32 +0900 Subject: [PATCH 007/110] Remove rustfmt.toml --- rustfmt.toml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 rustfmt.toml diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index 1082fd8..0000000 --- a/rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -version = "Two" From 19919c469490289784eee4830a4b360a4efc23a0 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 1 May 2022 13:35:15 +0900 Subject: [PATCH 008/110] Update actions/checkout action to v3 --- .github/workflows/ci.yml | 10 +++++----- .github/workflows/release.yml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e40fa2d..2d44a52 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: os: [ubuntu-latest] rust: [nightly, beta, stable] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - run: cargo build --all --all-features --all-targets @@ -39,7 +39,7 @@ jobs: # Rust version in Cargo.toml and .clippy.toml. rust: ['1.40'] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - run: cargo build @@ -47,7 +47,7 @@ jobs: clippy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable - uses: actions-rs/clippy-check@v1 @@ -58,7 +58,7 @@ jobs: fmt: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable - run: cargo fmt --all -- --check @@ -66,7 +66,7 @@ jobs: security_audit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/audit-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d1e7c66..a917c26 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,7 +14,7 @@ jobs: if: github.repository_owner == 'smol-rs' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable - run: cargo package From 21f4982a3d4843f0a4a377b73dd6245037c59f47 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 8 Jul 2022 01:12:38 +0900 Subject: [PATCH 009/110] Update CI config --- .github/workflows/ci.yml | 7 ++----- .github/workflows/release.yml | 7 ------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2d44a52..1aa686f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,10 +50,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features -- -W clippy::all + - run: cargo clippy --all-features fmt: runs-on: ubuntu-latest @@ -61,7 +58,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable - - run: cargo fmt --all -- --check + - run: cargo fmt --all --check security_audit: runs-on: ubuntu-latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a917c26..be57bd1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,19 +5,12 @@ on: tags: - v[0-9]+.* -env: - RUSTFLAGS: -D warnings - RUST_BACKTRACE: 1 - jobs: create-release: if: github.repository_owner == 'smol-rs' runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Install Rust - run: rustup update stable - - run: cargo package - uses: taiki-e/create-gh-release-action@v1 with: changelog: CHANGELOG.md From 16f0b9ca7063dab993a14bc0e7f95938df0980e6 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 8 Jul 2022 01:14:13 +0900 Subject: [PATCH 010/110] Bump MSRV to Rust 1.47 https://github.com/smol-rs/async-task/releases/tag/v4.3.0 --- .clippy.toml | 2 +- .github/workflows/ci.yml | 2 +- Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.clippy.toml b/.clippy.toml index 0f404fa..7846a3e 100644 --- a/.clippy.toml +++ b/.clippy.toml @@ -1 +1 @@ -msrv = "1.40" +msrv = "1.47" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1aa686f..3ef88fc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: matrix: # When updating this, the reminder to update the minimum supported # Rust version in Cargo.toml and .clippy.toml. - rust: ['1.40'] + rust: ['1.47'] steps: - uses: actions/checkout@v3 - name: Install Rust diff --git a/Cargo.toml b/Cargo.toml index af956e2..1462d58 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.4.1" authors = ["Stjepan Glavina "] edition = "2018" -rust-version = "1.40" +rust-version = "1.47" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" From d1e4817bdca4b8b38880ee34a21e101019da9177 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 8 Jul 2022 01:21:16 +0900 Subject: [PATCH 011/110] Run Miri on CI (#27) --- .github/workflows/ci.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ef88fc..a5e9451 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -60,6 +60,18 @@ jobs: run: rustup update stable - run: cargo fmt --all --check + miri: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install Rust + run: rustup toolchain install nightly --component miri && rustup default nightly + - run: cargo miri test + env: + # TODO: enable -Zmiri-strict-provenance once https://github.com/matklad/once_cell/pull/185 merged and released. + MIRIFLAGS: -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation + RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout + security_audit: runs-on: ubuntu-latest steps: From 660747cd8d3e65cc833dc9999027871ea91e7b63 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 17 Jul 2022 21:46:43 +0900 Subject: [PATCH 012/110] Apply clippy to tests and examples --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a5e9451..f0451f6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,7 +50,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable - - run: cargo clippy --all-features + - run: cargo clippy --all-features --tests --examples fmt: runs-on: ubuntu-latest From d2daab599b1b105399c77abfeeea007464401fa3 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 16 Aug 2022 22:54:23 +0900 Subject: [PATCH 013/110] Enable -Zmiri-strict-provenance --- .github/workflows/ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f0451f6..27bfef9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,8 +68,7 @@ jobs: run: rustup toolchain install nightly --component miri && rustup default nightly - run: cargo miri test env: - # TODO: enable -Zmiri-strict-provenance once https://github.com/matklad/once_cell/pull/185 merged and released. - MIRIFLAGS: -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout security_audit: From 263ea8939042a050f7c158dfd142a03e5590ea3e Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 29 Oct 2022 21:41:55 -0700 Subject: [PATCH 014/110] Replace once_cell with async-lock (#29) --- Cargo.toml | 3 ++- src/lib.rs | 13 +++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1462d58..75d2949 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,11 +17,11 @@ categories = ["asynchronous", "concurrency"] exclude = ["/.*"] [dependencies] +async-lock = "2.6" async-task = "4.0.0" concurrent-queue = "1.2.2" fastrand = "1.3.4" futures-lite = "1.11.0" -once_cell = "1.4.1" slab = "0.4.4" [dev-dependencies] @@ -29,3 +29,4 @@ async-channel = "1.4.1" async-io = "1.1.9" easy-parallel = "3.1.0" num_cpus = "1.13.0" +once_cell = "1.16.0" diff --git a/src/lib.rs b/src/lib.rs index cf38665..24fbfac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,6 +28,7 @@ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, RwLock}; use std::task::{Poll, Waker}; +use async_lock::OnceCell; use async_task::Runnable; use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; @@ -63,7 +64,7 @@ pub use async_task::Task; #[derive(Debug)] pub struct Executor<'a> { /// The executor state. - state: once_cell::sync::OnceCell>, + state: OnceCell>, /// Makes the `'a` lifetime invariant. _marker: PhantomData>, @@ -87,7 +88,7 @@ impl<'a> Executor<'a> { /// ``` pub const fn new() -> Executor<'a> { Executor { - state: once_cell::sync::OnceCell::new(), + state: OnceCell::new(), _marker: PhantomData, } } @@ -249,7 +250,7 @@ impl<'a> Executor<'a> { /// Returns a reference to the inner state. fn state(&self) -> &Arc { - self.state.get_or_init(|| Arc::new(State::new())) + self.state.get_or_init_blocking(|| Arc::new(State::new())) } } @@ -292,7 +293,7 @@ impl<'a> Default for Executor<'a> { #[derive(Debug)] pub struct LocalExecutor<'a> { /// The inner executor. - inner: once_cell::unsync::OnceCell>, + inner: Executor<'a>, /// Makes the type `!Send` and `!Sync`. _marker: PhantomData>, @@ -313,7 +314,7 @@ impl<'a> LocalExecutor<'a> { /// ``` pub const fn new() -> LocalExecutor<'a> { LocalExecutor { - inner: once_cell::unsync::OnceCell::new(), + inner: Executor::new(), _marker: PhantomData, } } @@ -447,7 +448,7 @@ impl<'a> LocalExecutor<'a> { /// Returns a reference to the inner executor. fn inner(&self) -> &Executor<'a> { - self.inner.get_or_init(Executor::new) + &self.inner } } From 92423cfaa1e35659a1eba1062885a49a77581514 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 9 Nov 2022 11:33:00 +0900 Subject: [PATCH 015/110] Remove msrv field from .clippy.toml Since Rust 1.64, Clippy respects `rust-version` field in Cargo.toml. rust-lang/rust@b776fb8 --- .clippy.toml | 1 - .github/workflows/ci.yml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 .clippy.toml diff --git a/.clippy.toml b/.clippy.toml deleted file mode 100644 index 7846a3e..0000000 --- a/.clippy.toml +++ /dev/null @@ -1 +0,0 @@ -msrv = "1.47" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 27bfef9..8037bf6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: strategy: matrix: # When updating this, the reminder to update the minimum supported - # Rust version in Cargo.toml and .clippy.toml. + # Rust version in Cargo.toml. rust: ['1.47'] steps: - uses: actions/checkout@v3 From c09ecba5bba3da14ea86db0907b3b7c6749b5bc8 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 9 Nov 2022 11:33:22 +0900 Subject: [PATCH 016/110] Update concurrent-queue to 2 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 75d2949..e1afe7b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ exclude = ["/.*"] [dependencies] async-lock = "2.6" async-task = "4.0.0" -concurrent-queue = "1.2.2" +concurrent-queue = "2.0.0" fastrand = "1.3.4" futures-lite = "1.11.0" slab = "0.4.4" From 00ea6cf6a19d23c1464790e53fe3f61ea0e94271 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 9 Nov 2022 11:34:15 +0900 Subject: [PATCH 017/110] Release 1.5.0 --- CHANGELOG.md | 5 +++++ Cargo.toml | 4 +--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fda0f60..0105c9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 1.5.0 + +- Remove the dependency on the `once_cell` crate to restore the MSRV. (#29) +- Update `concurrent-queue` to v2. + # Version 1.4.1 - Remove dependency on deprecated `vec-arena`. (#23) diff --git a/Cargo.toml b/Cargo.toml index e1afe7b..faee198 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,15 +3,13 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.4.1" +version = "1.5.0" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.47" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" -homepage = "https://github.com/smol-rs/async-executor" -documentation = "https://docs.rs/async-executor" keywords = ["asynchronous", "executor", "single", "multi", "spawn"] categories = ["asynchronous", "concurrency"] exclude = ["/.*"] From 8287e520b947cb289f1f2e2b7f7d9106a8cc0f0f Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 25 Dec 2022 07:12:59 -0800 Subject: [PATCH 018/110] Implement debug output to be better (#33) --- src/lib.rs | 90 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 83 insertions(+), 7 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 24fbfac..5a1d203 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,12 +20,13 @@ #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +use std::fmt; use std::future::Future; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex, RwLock, TryLockError}; use std::task::{Poll, Waker}; use async_lock::OnceCell; @@ -61,7 +62,6 @@ pub use async_task::Task; /// drop(signal); /// })); /// ``` -#[derive(Debug)] pub struct Executor<'a> { /// The executor state. state: OnceCell>, @@ -76,6 +76,12 @@ unsafe impl Sync for Executor<'_> {} impl UnwindSafe for Executor<'_> {} impl RefUnwindSafe for Executor<'_> {} +impl fmt::Debug for Executor<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + debug_executor(self, "Executor", f) + } +} + impl<'a> Executor<'a> { /// Creates a new executor. /// @@ -290,7 +296,6 @@ impl<'a> Default for Executor<'a> { /// println!("Hello world!"); /// })); /// ``` -#[derive(Debug)] pub struct LocalExecutor<'a> { /// The inner executor. inner: Executor<'a>, @@ -302,6 +307,12 @@ pub struct LocalExecutor<'a> { impl UnwindSafe for LocalExecutor<'_> {} impl RefUnwindSafe for LocalExecutor<'_> {} +impl fmt::Debug for LocalExecutor<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + debug_executor(&self.inner, "LocalExecutor", f) + } +} + impl<'a> LocalExecutor<'a> { /// Creates a single-threaded executor. /// @@ -459,7 +470,6 @@ impl<'a> Default for LocalExecutor<'a> { } /// The state of a executor. -#[derive(Debug)] struct State { /// The global queue. queue: ConcurrentQueue, @@ -510,7 +520,6 @@ impl State { } /// A list of sleeping tickers. -#[derive(Debug)] struct Sleepers { /// Number of sleeping tickers (both notified and unnotified). count: usize, @@ -587,7 +596,6 @@ impl Sleepers { } /// Runs task one by one. -#[derive(Debug)] struct Ticker<'a> { /// The executor state. state: &'a State, @@ -708,7 +716,6 @@ impl Drop for Ticker<'_> { /// A worker in a work-stealing executor. /// /// This is just a ticker that also has an associated local queue for improved cache locality. -#[derive(Debug)] struct Runner<'a> { /// The executor state. state: &'a State, @@ -833,6 +840,75 @@ fn steal(src: &ConcurrentQueue, dest: &ConcurrentQueue) { } } +/// Debug implementation for `Executor` and `LocalExecutor`. +fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Get a reference to the state. + let state = match executor.state.get() { + Some(state) => state, + None => { + // The executor has not been initialized. + struct Uninitialized; + + impl fmt::Debug for Uninitialized { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("") + } + } + + return f.debug_tuple(name).field(&Uninitialized).finish(); + } + }; + + /// Debug wrapper for the number of active tasks. + struct ActiveTasks<'a>(&'a Mutex>); + + impl fmt::Debug for ActiveTasks<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0.try_lock() { + Ok(lock) => fmt::Debug::fmt(&lock.len(), f), + Err(TryLockError::WouldBlock) => f.write_str(""), + Err(TryLockError::Poisoned(_)) => f.write_str(""), + } + } + } + + /// Debug wrapper for the local runners. + struct LocalRunners<'a>(&'a RwLock>>>); + + impl fmt::Debug for LocalRunners<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0.try_read() { + Ok(lock) => f + .debug_list() + .entries(lock.iter().map(|queue| queue.len())) + .finish(), + Err(TryLockError::WouldBlock) => f.write_str(""), + Err(TryLockError::Poisoned(_)) => f.write_str(""), + } + } + } + + /// Debug wrapper for the sleepers. + struct SleepCount<'a>(&'a Mutex); + + impl fmt::Debug for SleepCount<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0.try_lock() { + Ok(lock) => fmt::Debug::fmt(&lock.count, f), + Err(TryLockError::WouldBlock) => f.write_str(""), + Err(TryLockError::Poisoned(_)) => f.write_str(""), + } + } + } + + f.debug_struct(name) + .field("active", &ActiveTasks(&state.active)) + .field("global_tasks", &state.queue.len()) + .field("local_runners", &LocalRunners(&state.local_queues)) + .field("sleepers", &SleepCount(&state.sleepers)) + .finish() +} + /// Runs a closure when dropped. struct CallOnDrop(F); From b48a50310916ddc8f960b618cbcc7b8230871790 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 28 Dec 2022 11:57:53 +0900 Subject: [PATCH 019/110] Clean up CI config --- .github/workflows/ci.yml | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8037bf6..afa0537 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,11 +6,20 @@ on: branches: - master schedule: - - cron: '0 2 * * *' + - cron: '0 2 * * 0' env: - RUSTFLAGS: -D warnings + CARGO_INCREMENTAL: 0 + CARGO_NET_RETRY: 10 + CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 + RUSTFLAGS: -D warnings + RUSTDOCFLAGS: -D warnings + RUSTUP_MAX_RETRIES: 10 + +defaults: + run: + shell: bash jobs: test: @@ -75,6 +84,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions-rs/audit-check@v1 + # https://github.com/rustsec/audit-check/issues/2 + - uses: rustsec/audit-check@master with: token: ${{ secrets.GITHUB_TOKEN }} From f196463b0940ee89db02b3105d08b51cf8982304 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 28 Dec 2022 12:09:04 +0900 Subject: [PATCH 020/110] Enable dependabot update for Rust --- .github/dependabot.yml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..52f7945 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: cargo + directory: / + schedule: + interval: weekly + commit-message: + prefix: '' + labels: [] From a988ee3e46d338e78fe38aa2066b9312c6ffe616 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Mon, 23 Jan 2023 19:30:43 +0000 Subject: [PATCH 021/110] m: Migrate benchmarks to criterion (#35) * m: Migrate to criterion * Update CI --- .github/workflows/ci.yml | 2 +- Cargo.toml | 5 ++ benches/executor.rs | 142 +++++++++++++++++++++------------------ 3 files changed, 84 insertions(+), 65 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index afa0537..ffcbcf4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,7 +59,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable - - run: cargo clippy --all-features --tests --examples + - run: cargo clippy --all-features --all-targets fmt: runs-on: ubuntu-latest diff --git a/Cargo.toml b/Cargo.toml index faee198..5bd5abd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,11 @@ slab = "0.4.4" [dev-dependencies] async-channel = "1.4.1" async-io = "1.1.9" +criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" num_cpus = "1.13.0" once_cell = "1.16.0" + +[[bench]] +name = "executor" +harness = false diff --git a/benches/executor.rs b/benches/executor.rs index 98f1cb5..7d3ec8c 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,10 +1,7 @@ -#![feature(test)] - -extern crate test; - use std::future::Future; use async_executor::Executor; +use criterion::{criterion_group, criterion_main, Criterion}; use futures_lite::{future, prelude::*}; const TASKS: usize = 300; @@ -23,87 +20,104 @@ fn run(f: impl FnOnce()) { }); } -#[bench] -fn create(b: &mut test::Bencher) { - b.iter(move || { - let ex = Executor::new(); - let task = ex.spawn(async {}); - future::block_on(ex.run(task)); +fn create(c: &mut Criterion) { + c.bench_function("executor::create", |b| { + b.iter(|| { + let ex = Executor::new(); + let task = ex.spawn(async {}); + future::block_on(ex.run(task)); + }) }); } -#[bench] -fn spawn_one(b: &mut test::Bencher) { - run(|| { - b.iter(move || { - future::block_on(async { EX.spawn(async {}).await }); +fn spawn_one(c: &mut Criterion) { + c.bench_function("executor::spawn_one", |b| { + run(|| { + b.iter(|| { + future::block_on(async { EX.spawn(async {}).await }); + }); }); }); } -#[bench] -fn spawn_many(b: &mut test::Bencher) { - run(|| { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..LIGHT_TASKS { - tasks.push(EX.spawn(async {})); - } - for task in tasks { - task.await; - } +fn spawn_many(c: &mut Criterion) { + c.bench_function("executor::spawn_many_local", |b| { + run(|| { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..LIGHT_TASKS { + tasks.push(EX.spawn(async {})); + } + for task in tasks { + task.await; + } + }); }); }); }); } -#[bench] -fn spawn_recursively(b: &mut test::Bencher) { - fn go(i: usize) -> impl Future + Send + 'static { - async move { - if i != 0 { - EX.spawn(async move { - let fut = go(i - 1).boxed(); - fut.await; - }) - .await; +fn spawn_recursively(c: &mut Criterion) { + c.bench_function("executor::spawn_recursively", |b| { + #[allow(clippy::manual_async_fn)] + fn go(i: usize) -> impl Future + Send + 'static { + async move { + if i != 0 { + EX.spawn(async move { + let fut = go(i - 1).boxed(); + fut.await; + }) + .await; + } } } - } - run(|| { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..TASKS { - tasks.push(EX.spawn(go(STEPS))); - } - for task in tasks { - task.await; - } + run(|| { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(EX.spawn(go(STEPS))); + } + for task in tasks { + task.await; + } + }); }); }); }); } -#[bench] -fn yield_now(b: &mut test::Bencher) { - run(|| { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..TASKS { - tasks.push(EX.spawn(async move { - for _ in 0..STEPS { - future::yield_now().await; - } - })); - } - for task in tasks { - task.await; - } +fn yield_now(c: &mut Criterion) { + c.bench_function("executor::yield_now", |b| { + run(|| { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(EX.spawn(async move { + for _ in 0..STEPS { + future::yield_now().await; + } + })); + } + for task in tasks { + task.await; + } + }); }); }); }); } + +criterion_group!( + benches, + create, + spawn_one, + spawn_many, + spawn_recursively, + yield_now, +); + +criterion_main!(benches); From 4d8e7bad2356892bb34ee4cc9434eab1f8b12339 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 21 Jan 2023 20:06:57 +0900 Subject: [PATCH 022/110] Set CARGO_NET_GIT_FETCH_WITH_CLI=true in CI --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ffcbcf4..1e72217 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,6 +10,7 @@ on: env: CARGO_INCREMENTAL: 0 + CARGO_NET_GIT_FETCH_WITH_CLI: true CARGO_NET_RETRY: 10 CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 From ddfb54d1c469ddc201b2c6d521c6de5ac368ba3d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 21 Jan 2023 20:32:53 +0900 Subject: [PATCH 023/110] Minimize GITHUB_TOKEN permissions Refs: https://github.blog/changelog/2021-04-20-github-actions-control-permissions-for-github_token --- .github/workflows/ci.yml | 3 +++ .github/workflows/release.yml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1e72217..dd32bb4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,8 @@ name: CI +permissions: + contents: read + on: pull_request: push: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index be57bd1..d61ac7a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,8 @@ name: Release +permissions: + contents: write + on: push: tags: From b8885f957807c14b74dc00d0ccbe4bec3a50a20f Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 6 Mar 2023 01:43:03 +0900 Subject: [PATCH 024/110] Bump MSRV to 1.48 async-lock 2.7.0 requires Rust 1.48. ``` error[E0658]: use of unstable library feature 'future_readiness_fns' --> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/async-lock-2.7.0/src/once_cell.rs:430:45 | 430 | self.initialize_or_wait(move || std::future::ready(closure()), &mut Blocking), | ^^^^^^^^^^^^^^^^^^ | ``` --- .github/workflows/ci.yml | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dd32bb4..eea3a01 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,7 +50,7 @@ jobs: matrix: # When updating this, the reminder to update the minimum supported # Rust version in Cargo.toml. - rust: ['1.47'] + rust: ['1.48'] steps: - uses: actions/checkout@v3 - name: Install Rust diff --git a/Cargo.toml b/Cargo.toml index 5bd5abd..b994277 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.5.0" authors = ["Stjepan Glavina "] edition = "2018" -rust-version = "1.47" +rust-version = "1.48" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" From 6aba704efc09cc7881c96049f0da7ce48240e741 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 10 Mar 2023 19:18:48 -0800 Subject: [PATCH 025/110] bench: Add benchmarks for lower thread counts (#38) --- benches/executor.rs | 161 +++++++++++++++++++++++--------------------- 1 file changed, 83 insertions(+), 78 deletions(-) diff --git a/benches/executor.rs b/benches/executor.rs index 7d3ec8c..68cb4dd 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -10,10 +10,12 @@ const LIGHT_TASKS: usize = 25_000; static EX: Executor<'_> = Executor::new(); -fn run(f: impl FnOnce()) { +fn run(f: impl FnOnce(), multithread: bool) { + let limit = if multithread { num_cpus::get() } else { 1 }; + let (s, r) = async_channel::bounded::<()>(1); easy_parallel::Parallel::new() - .each(0..num_cpus::get(), |_| future::block_on(EX.run(r.recv()))) + .each(0..limit, |_| future::block_on(EX.run(r.recv()))) .finish(move || { let _s = s; f() @@ -30,94 +32,97 @@ fn create(c: &mut Criterion) { }); } -fn spawn_one(c: &mut Criterion) { - c.bench_function("executor::spawn_one", |b| { - run(|| { - b.iter(|| { - future::block_on(async { EX.spawn(async {}).await }); - }); +fn running_benches(c: &mut Criterion) { + for (group_name, multithread) in [("single_thread", false), ("multi_thread", true)].iter() { + let mut group = c.benchmark_group(group_name.to_string()); + + group.bench_function("executor::spawn_one", |b| { + run( + || { + b.iter(|| { + future::block_on(async { EX.spawn(async {}).await }); + }); + }, + *multithread, + ); }); - }); -} -fn spawn_many(c: &mut Criterion) { - c.bench_function("executor::spawn_many_local", |b| { - run(|| { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..LIGHT_TASKS { - tasks.push(EX.spawn(async {})); - } - for task in tasks { - task.await; - } - }); - }); + group.bench_function("executor::spawn_many_local", |b| { + run( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..LIGHT_TASKS { + tasks.push(EX.spawn(async {})); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); }); - }); -} -fn spawn_recursively(c: &mut Criterion) { - c.bench_function("executor::spawn_recursively", |b| { - #[allow(clippy::manual_async_fn)] - fn go(i: usize) -> impl Future + Send + 'static { - async move { - if i != 0 { - EX.spawn(async move { - let fut = go(i - 1).boxed(); - fut.await; - }) - .await; + group.bench_function("executor::spawn_recursively", |b| { + #[allow(clippy::manual_async_fn)] + fn go(i: usize) -> impl Future + Send + 'static { + async move { + if i != 0 { + EX.spawn(async move { + let fut = go(i - 1).boxed(); + fut.await; + }) + .await; + } } } - } - run(|| { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..TASKS { - tasks.push(EX.spawn(go(STEPS))); - } - for task in tasks { - task.await; - } - }); - }); + run( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(EX.spawn(go(STEPS))); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); }); - }); -} -fn yield_now(c: &mut Criterion) { - c.bench_function("executor::yield_now", |b| { - run(|| { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..TASKS { - tasks.push(EX.spawn(async move { - for _ in 0..STEPS { - future::yield_now().await; + group.bench_function("executor::yield_now", |b| { + run( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(EX.spawn(async move { + for _ in 0..STEPS { + future::yield_now().await; + } + })); } - })); - } - for task in tasks { - task.await; - } - }); - }); + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); }); - }); + } } -criterion_group!( - benches, - create, - spawn_one, - spawn_many, - spawn_recursively, - yield_now, -); +criterion_group!(benches, create, running_benches); criterion_main!(benches); From a438e9da8c69c05842be4cf7f39e32e18fda0cde Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 7 Apr 2023 11:01:23 -0700 Subject: [PATCH 026/110] v1.5.1 (#40) --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0105c9e..ead7f67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.5.1 + +- Implement a better form of debug output for Executor and LocalExecutor. (#33) + # Version 1.5.0 - Remove the dependency on the `once_cell` crate to restore the MSRV. (#29) diff --git a/Cargo.toml b/Cargo.toml index b994277..e6bff80 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.5.0" +version = "1.5.1" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.48" From 8562c41062e8ff65b72004db2d498cbf0c1ff62f Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 10 Apr 2023 02:35:44 +0900 Subject: [PATCH 027/110] Update permissions for security_audit --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eea3a01..fcf0232 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -85,6 +85,10 @@ jobs: RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout security_audit: + permissions: + checks: write + contents: read + issues: write runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 From 85c20eb98b84b27fc9c460429a94ad8b8920ed47 Mon Sep 17 00:00:00 2001 From: Yosh Date: Fri, 5 May 2023 14:38:00 +0200 Subject: [PATCH 028/110] Replace `num_cpus` crate with `std::thread` (#42) --- Cargo.toml | 1 - benches/executor.rs | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e6bff80..5a6f068 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,6 @@ async-channel = "1.4.1" async-io = "1.1.9" criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" -num_cpus = "1.13.0" once_cell = "1.16.0" [[bench]] diff --git a/benches/executor.rs b/benches/executor.rs index 68cb4dd..20d41a1 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,4 +1,5 @@ use std::future::Future; +use std::thread::available_parallelism; use async_executor::Executor; use criterion::{criterion_group, criterion_main, Criterion}; @@ -11,7 +12,11 @@ const LIGHT_TASKS: usize = 25_000; static EX: Executor<'_> = Executor::new(); fn run(f: impl FnOnce(), multithread: bool) { - let limit = if multithread { num_cpus::get() } else { 1 }; + let limit = if multithread { + available_parallelism().unwrap().get() + } else { + 1 + }; let (s, r) = async_channel::bounded::<()>(1); easy_parallel::Parallel::new() From 1a9e08ce73f2c356103bfaf14cafd6fde376565b Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 9 Jun 2023 17:53:03 -0700 Subject: [PATCH 029/110] Use fastrand v2.0.0 (#45) --- Cargo.toml | 2 +- src/lib.rs | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5a6f068..5792614 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ exclude = ["/.*"] async-lock = "2.6" async-task = "4.0.0" concurrent-queue = "2.0.0" -fastrand = "1.3.4" +fastrand = "2.0.0" futures-lite = "1.11.0" slab = "0.4.4" diff --git a/src/lib.rs b/src/lib.rs index 5a1d203..d8e59ca 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -227,12 +227,13 @@ impl<'a> Executor<'a> { /// ``` pub async fn run(&self, future: impl Future) -> T { let runner = Runner::new(self.state()); + let mut rng = fastrand::Rng::new(); // A future that runs tasks forever. let run_forever = async { loop { for _ in 0..200 { - let runnable = runner.runnable().await; + let runnable = runner.runnable(&mut rng).await; runnable.run(); } future::yield_now().await; @@ -748,7 +749,7 @@ impl Runner<'_> { } /// Waits for the next runnable task to run. - async fn runnable(&self) -> Runnable { + async fn runnable(&self, rng: &mut fastrand::Rng) -> Runnable { let runnable = self .ticker .runnable_with(|| { @@ -768,7 +769,7 @@ impl Runner<'_> { // Pick a random starting point in the iterator list and rotate the list. let n = local_queues.len(); - let start = fastrand::usize(..n); + let start = rng.usize(..n); let iter = local_queues .iter() .chain(local_queues.iter()) From 9df3dd49741170561ca7eadbfeba49430dcbb96b Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 2 Jul 2023 11:29:19 -0700 Subject: [PATCH 030/110] alg: Push tasks directly to the local runner This commit adds an optimization where a thread-local variable contains the queue of the current runner. Rather than pushing to the global queue and hoping that a local queue eventually picks it up, tasks are pushed directly to this local queue if available. This has led to speedups of up to 70% in some cases and up to 10% in other workloads. --- src/lib.rs | 162 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 145 insertions(+), 17 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index d8e59ca..904803f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,6 +20,7 @@ #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +use std::cell::RefCell; use std::fmt; use std::future::Future; use std::marker::PhantomData; @@ -229,29 +230,56 @@ impl<'a> Executor<'a> { let runner = Runner::new(self.state()); let mut rng = fastrand::Rng::new(); - // A future that runs tasks forever. - let run_forever = async { - loop { - for _ in 0..200 { - let runnable = runner.runnable(&mut rng).await; - runnable.run(); - } - future::yield_now().await; - } - }; + // Set the local queue while we're running. + LocalQueue::set(self.state(), &runner.local, { + let runner = &runner; + async move { + // A future that runs tasks forever. + let run_forever = async { + loop { + for _ in 0..200 { + let runnable = runner.runnable(&mut rng).await; + runnable.run(); + } + future::yield_now().await; + } + }; - // Run `future` and `run_forever` concurrently until `future` completes. - future.or(run_forever).await + // Run `future` and `run_forever` concurrently until `future` completes. + future.or(run_forever).await + } + }) + .await } /// Returns a function that schedules a runnable task when it gets woken up. fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static { let state = self.state().clone(); - // TODO(stjepang): If possible, push into the current local queue and notify the ticker. + // If possible, push into the current local queue and notify the ticker. move |runnable| { - state.queue.push(runnable).unwrap(); - state.notify(); + let mut runnable = Some(runnable); + + // Try to push into the local queue. + LocalQueue::with(|local_queue| { + // Make sure that we don't accidentally push to an executor that isn't ours. + if !std::ptr::eq(local_queue.state, &*state) { + return; + } + + if let Err(e) = local_queue.queue.push(runnable.take().unwrap()) { + runnable = Some(e.into_inner()); + return; + } + + local_queue.waker.wake_by_ref(); + }); + + // If the local queue push failed, just push to the global queue. + if let Some(runnable) = runnable { + state.queue.push(runnable).unwrap(); + state.notify(); + } } } @@ -819,6 +847,97 @@ impl Drop for Runner<'_> { } } +/// The state of the currently running local queue. +struct LocalQueue { + /// The pointer to the state of the executor. + /// + /// Used to make sure we don't push runnables to the wrong executor. + state: *const State, + + /// The concurrent queue. + queue: Arc>, + + /// The waker for the runnable. + waker: Waker, +} + +impl LocalQueue { + /// Run a function with the current local queue. + fn with(f: impl FnOnce(&LocalQueue) -> R) -> Option { + std::thread_local! { + /// The current local queue. + static LOCAL_QUEUE: RefCell> = RefCell::new(None); + } + + impl LocalQueue { + /// Run a function with a set local queue. + async fn set( + state: &State, + queue: &Arc>, + fut: F, + ) -> F::Output + where + F: Future, + { + // Store the local queue and the current waker. + let mut old = with_waker(|waker| { + LOCAL_QUEUE.with(move |slot| { + slot.borrow_mut().replace(LocalQueue { + state: state as *const State, + queue: queue.clone(), + waker: waker.clone(), + }) + }) + }) + .await; + + // Restore the old local queue on drop. + let _guard = CallOnDrop(move || { + let old = old.take(); + let _ = LOCAL_QUEUE.try_with(move |slot| { + *slot.borrow_mut() = old; + }); + }); + + // Pin the future. + futures_lite::pin!(fut); + + // Run it such that the waker is updated every time it's polled. + future::poll_fn(move |cx| { + LOCAL_QUEUE + .try_with({ + let waker = cx.waker(); + move |slot| { + let mut slot = slot.borrow_mut(); + let qaw = slot.as_mut().expect("missing local queue"); + + // If we've been replaced, just ignore the slot. + if !Arc::ptr_eq(&qaw.queue, queue) { + return; + } + + // Update the waker, if it has changed. + if !qaw.waker.will_wake(waker) { + qaw.waker = waker.clone(); + } + } + }) + .ok(); + + // Poll the future. + fut.as_mut().poll(cx) + }) + .await + } + } + + LOCAL_QUEUE + .try_with(|local_queue| local_queue.borrow().as_ref().map(f)) + .ok() + .flatten() + } +} + /// Steals some items from one queue into another. fn steal(src: &ConcurrentQueue, dest: &ConcurrentQueue) { // Half of `src`'s length rounded up. @@ -911,10 +1030,19 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ } /// Runs a closure when dropped. -struct CallOnDrop(F); +struct CallOnDrop(F); -impl Drop for CallOnDrop { +impl Drop for CallOnDrop { fn drop(&mut self) { (self.0)(); } } + +/// Run a closure with the current waker. +fn with_waker R, R>(f: F) -> impl Future { + let mut f = Some(f); + future::poll_fn(move |cx| { + let f = f.take().unwrap(); + Poll::Ready(f(cx.waker())) + }) +} From aed7279805e7818b006eb2cbbfeb789d5709b60a Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 16 Jul 2023 22:35:07 -0700 Subject: [PATCH 031/110] Add smol-rs logo (#46) --- src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 904803f..ff2b8d8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,6 +19,12 @@ //! ``` #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] use std::cell::RefCell; use std::fmt; From e19573367bd92c0219d7f68a12a3955e1e0d213a Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 19 Aug 2023 19:17:39 -0700 Subject: [PATCH 032/110] v1.5.2 Signed-off-by: John Nunley --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ead7f67..8e93304 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 1.5.2 + +- Add thread-local task queue optimizations, allowing new tasks to avoid using the global queue. (#37) +- Update `fastrand` to v2. (#45) + # Version 1.5.1 - Implement a better form of debug output for Executor and LocalExecutor. (#33) diff --git a/Cargo.toml b/Cargo.toml index 5792614..197afc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.5.1" +version = "1.5.2" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.48" From a5ff8df7d94eb5f716132f6702d1d662a29034d6 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 20 Aug 2023 17:08:35 -0700 Subject: [PATCH 033/110] bugfix: Ensure that ex.run() produces a Send future This commit makes sure that the run() and tick() functions produce futures that are Send and Sync, to prevent a regression introduced in PR #37. Tests are also added to prevent this regression in the future. Signed-off-by: John Nunley --- src/lib.rs | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ff2b8d8..d4d77a5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -269,7 +269,7 @@ impl<'a> Executor<'a> { // Try to push into the local queue. LocalQueue::with(|local_queue| { // Make sure that we don't accidentally push to an executor that isn't ours. - if !std::ptr::eq(local_queue.state, &*state) { + if local_queue.state != &*state as *const State as usize { return; } @@ -858,7 +858,7 @@ struct LocalQueue { /// The pointer to the state of the executor. /// /// Used to make sure we don't push runnables to the wrong executor. - state: *const State, + state: usize, /// The concurrent queue. queue: Arc>, @@ -889,7 +889,7 @@ impl LocalQueue { let mut old = with_waker(|waker| { LOCAL_QUEUE.with(move |slot| { slot.borrow_mut().replace(LocalQueue { - state: state as *const State, + state: state as *const State as usize, queue: queue.clone(), waker: waker.clone(), }) @@ -1052,3 +1052,37 @@ fn with_waker R, R>(f: F) -> impl Future { Poll::Ready(f(cx.waker())) }) } + +fn _ensure_send_and_sync() { + use futures_lite::future::pending; + + fn is_send(_: T) {} + fn is_sync(_: T) {} + + is_send::>(Executor::new()); + is_sync::>(Executor::new()); + + let ex = Executor::new(); + is_send(ex.run(pending::<()>())); + is_sync(ex.run(pending::<()>())); + is_send(ex.tick()); + is_sync(ex.tick()); + + /// ```compile_fail + /// use async_executor::LocalExecutor; + /// use futures_lite::future::pending; + /// + /// fn is_send(_: T) {} + /// fn is_sync(_: T) {} + /// + /// is_send::>(LocalExecutor::new()); + /// is_sync::>(LocalExecutor::new()); + /// + /// let ex = LocalExecutor::new(); + /// is_send(ex.run(pending::<()>())); + /// is_sync(ex.run(pending::<()>())); + /// is_send(ex.tick()); + /// is_sync(ex.tick()); + /// ``` + fn _negative_test() {} +} From 609aafb33074fac638a9deff3c5d27646208c2c0 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 5 Sep 2023 00:49:20 +0900 Subject: [PATCH 034/110] Bump MSRV to 1.61 ``` error: package `memchr v2.6.3` cannot be built because it requires rustc 1.61 or newer, while the currently active rustc version is 1.60.0 ``` --- .github/workflows/ci.yml | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fcf0232..9305870 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,7 +50,7 @@ jobs: matrix: # When updating this, the reminder to update the minimum supported # Rust version in Cargo.toml. - rust: ['1.48'] + rust: ['1.61'] steps: - uses: actions/checkout@v3 - name: Install Rust diff --git a/Cargo.toml b/Cargo.toml index 197afc9..b14a9b1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.5.2" authors = ["Stjepan Glavina "] edition = "2018" -rust-version = "1.48" +rust-version = "1.61" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" From ff67cb9a5f628bea9e0ca46a5ad203e2c6f8d812 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 10 Sep 2023 18:18:02 +0900 Subject: [PATCH 035/110] Update actions/checkout action to v4 --- .github/workflows/ci.yml | 12 ++++++------ .github/workflows/release.yml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9305870..66f75cf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,7 +34,7 @@ jobs: os: [ubuntu-latest] rust: [nightly, beta, stable] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - run: cargo build --all --all-features --all-targets @@ -52,7 +52,7 @@ jobs: # Rust version in Cargo.toml. rust: ['1.61'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - run: cargo build @@ -60,7 +60,7 @@ jobs: clippy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update stable - run: cargo clippy --all-features --all-targets @@ -68,7 +68,7 @@ jobs: fmt: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update stable - run: cargo fmt --all --check @@ -76,7 +76,7 @@ jobs: miri: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup toolchain install nightly --component miri && rustup default nightly - run: cargo miri test @@ -91,7 +91,7 @@ jobs: issues: write runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # https://github.com/rustsec/audit-check/issues/2 - uses: rustsec/audit-check@master with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d61ac7a..59fad1f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'smol-rs' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: taiki-e/create-gh-release-action@v1 with: changelog: CHANGELOG.md From ecddfde87afaa7c3a9ab87d01fadd20b25b688f5 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 23 Sep 2023 11:01:56 -0700 Subject: [PATCH 036/110] m: Remove unused memchr dependency Signed-off-by: John Nunley --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index b14a9b1..acd2a71 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ async-lock = "2.6" async-task = "4.0.0" concurrent-queue = "2.0.0" fastrand = "2.0.0" -futures-lite = "1.11.0" +futures-lite = { version = "1.11.0", default-features = false } slab = "0.4.4" [dev-dependencies] From 77b5b169c583259c27b8e9fc4c484aaebee2ad76 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Mon, 25 Sep 2023 09:52:25 -0700 Subject: [PATCH 037/110] v1.5.3 Signed-off-by: John Nunley --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e93304..e7cbb7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 1.5.3 + +- Fix an accidental breaking change in v1.5.2, where `ex.run()` was no longer `Send`. (#50) +- Remove the unused `memchr` dependency. (#51) + # Version 1.5.2 - Add thread-local task queue optimizations, allowing new tasks to avoid using the global queue. (#37) diff --git a/Cargo.toml b/Cargo.toml index acd2a71..d5a7d2f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.5.2" +version = "1.5.3" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.61" From 4154ad219091383d8f522223d0e358bf7056cc2f Mon Sep 17 00:00:00 2001 From: John Nunley Date: Wed, 27 Sep 2023 20:01:15 -0700 Subject: [PATCH 038/110] Fix a bug where TLS would become None (#55) * Fix a bug where TLS would become None The bug is invoked as follows: - Runner 1 is created and stores the current version of the TLS LOCAL_QUEUE variable, which is None. - Runner 2 is also created. It stores the current version of the TLS variable as well, which is Runner 1's queue. - Runner 1 is dropped. It stores None into the LOCAL_QUEUE variable. - Runner 2 tries to run. It reads from the LOCAL_QUEUE variable, sees that it is None, and panics. This could be solved by just not using the local queue if the variable is None. However, we can do one better; if the slot is open, we can optimize the runner by replacing it with our own queue. This should allow for the local queue to be used more often. Closes #54 Signed-off-by: John Nunley --- src/lib.rs | 25 +++++++++++++++++-------- tests/different_executors.rs | 34 ++++++++++++++++++++++++++++++++++ tests/local_queue.rs | 24 ++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 8 deletions(-) create mode 100644 tests/different_executors.rs create mode 100644 tests/local_queue.rs diff --git a/src/lib.rs b/src/lib.rs index d4d77a5..213f11f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -885,15 +885,16 @@ impl LocalQueue { where F: Future, { + // Make the `LocalQueue` structure. + let make_local_queue = |waker: &Waker| LocalQueue { + state: state as *const State as usize, + queue: queue.clone(), + waker: waker.clone(), + }; + // Store the local queue and the current waker. let mut old = with_waker(|waker| { - LOCAL_QUEUE.with(move |slot| { - slot.borrow_mut().replace(LocalQueue { - state: state as *const State as usize, - queue: queue.clone(), - waker: waker.clone(), - }) - }) + LOCAL_QUEUE.with(move |slot| slot.borrow_mut().replace(make_local_queue(waker))) }) .await; @@ -915,7 +916,15 @@ impl LocalQueue { let waker = cx.waker(); move |slot| { let mut slot = slot.borrow_mut(); - let qaw = slot.as_mut().expect("missing local queue"); + let qaw = match slot.as_mut() { + None => { + // Another local queue dropped itself and replaced with None, + // we can take its place! + *slot = Some(make_local_queue(waker)); + return; + } + Some(qaw) => qaw, + }; // If we've been replaced, just ignore the slot. if !Arc::ptr_eq(&qaw.queue, queue) { diff --git a/tests/different_executors.rs b/tests/different_executors.rs new file mode 100644 index 0000000..afef3be --- /dev/null +++ b/tests/different_executors.rs @@ -0,0 +1,34 @@ +use async_executor::LocalExecutor; +use futures_lite::future::{block_on, pending, poll_once}; +use futures_lite::pin; +use std::cell::Cell; + +#[test] +fn shared_queue_slot() { + block_on(async { + let was_polled = Cell::new(false); + let future = async { + was_polled.set(true); + pending::<()>().await; + }; + + let ex1 = LocalExecutor::new(); + let ex2 = LocalExecutor::new(); + + // Start the futures for running forever. + let (run1, run2) = (ex1.run(pending::<()>()), ex2.run(pending::<()>())); + pin!(run1); + pin!(run2); + assert!(poll_once(run1.as_mut()).await.is_none()); + assert!(poll_once(run2.as_mut()).await.is_none()); + + // Spawn the future on executor one and then poll executor two. + ex1.spawn(future).detach(); + assert!(poll_once(run2).await.is_none()); + assert!(!was_polled.get()); + + // Poll the first one. + assert!(poll_once(run1).await.is_none()); + assert!(was_polled.get()); + }); +} diff --git a/tests/local_queue.rs b/tests/local_queue.rs new file mode 100644 index 0000000..4678366 --- /dev/null +++ b/tests/local_queue.rs @@ -0,0 +1,24 @@ +use async_executor::Executor; +use futures_lite::{future, pin}; + +#[test] +fn two_queues() { + future::block_on(async { + // Create an executor with two runners. + let ex = Executor::new(); + let (run1, run2) = ( + ex.run(future::pending::<()>()), + ex.run(future::pending::<()>()), + ); + let mut run1 = Box::pin(run1); + pin!(run2); + + // Poll them both. + assert!(future::poll_once(run1.as_mut()).await.is_none()); + assert!(future::poll_once(run2.as_mut()).await.is_none()); + + // Drop the first one, which should leave the local queue in the `None` state. + drop(run1); + assert!(future::poll_once(run2.as_mut()).await.is_none()); + }); +} From 2cfb6e4ed03c7eaf6c5fb2d3170ac870ba0a9b9d Mon Sep 17 00:00:00 2001 From: John Nunley Date: Wed, 27 Sep 2023 21:17:03 -0700 Subject: [PATCH 039/110] v1.5.4 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7cbb7b..b795839 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.5.4 + +- Fix a panic that could happen when two concurrent `run()` calls are made and the thread local task slot is left as `None`. (#55) + # Version 1.5.3 - Fix an accidental breaking change in v1.5.2, where `ex.run()` was no longer `Send`. (#50) diff --git a/Cargo.toml b/Cargo.toml index d5a7d2f..c81851b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.5.3" +version = "1.5.4" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.61" From 917caad8b94077dd136f98ea02a9f1b10cb431f9 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 15 Oct 2023 19:26:22 -0700 Subject: [PATCH 040/110] ex: Add an example of an executor with limited tasks Signed-off-by: John Nunley --- Cargo.toml | 2 + examples/limit.rs | 167 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 169 insertions(+) create mode 100644 examples/limit.rs diff --git a/Cargo.toml b/Cargo.toml index c81851b..50fda40 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,8 @@ async-channel = "1.4.1" async-io = "1.1.9" criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" +event-listener = "3.0.0" +fastrand = "2.0.0" once_cell = "1.16.0" [[bench]] diff --git a/examples/limit.rs b/examples/limit.rs new file mode 100644 index 0000000..80b5f9e --- /dev/null +++ b/examples/limit.rs @@ -0,0 +1,167 @@ +//! An executor where you can only push a limited number of tasks. + +use async_executor::{Executor, Task}; +use event_listener::{Event, EventListener}; +use futures_lite::pin; +use std::{ + future::Future, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +/// An executor where you can only push a limited number of tasks. +struct LimitedExecutor { + /// Inner running executor. + executor: Executor<'static>, + + /// Shared state. + shared: Arc, +} + +struct SharedState { + /// The maximum number of tasks that can be pushed. + max: usize, + + /// The current number of active tasks. + active: AtomicUsize, + + /// Event listeners for when a new task is available. + slot_available: Event, +} + +impl LimitedExecutor { + fn new(max: usize) -> Self { + Self { + executor: Executor::new(), + shared: Arc::new(SharedState { + max, + active: AtomicUsize::new(0), + slot_available: Event::new(), + }), + } + } + + /// Spawn a task, waiting until there is a slot available. + async fn spawn(&self, future: F) -> Task + where + F::Output: Send + 'static, + { + let listener = EventListener::new(&self.shared.slot_available); + pin!(listener); + + // Load the current number of active tasks. + let mut active = self.shared.active.load(Ordering::Acquire); + + loop { + // Check if there is a slot available. + if active < self.shared.max { + // Try to set the slot to what would be the new number of tasks. + let new_active = active + 1; + match self.shared.active.compare_exchange( + active, + new_active, + Ordering::SeqCst, + Ordering::SeqCst, + ) { + Ok(_) => { + // Wrap the future in another future that decrements the active count + // when it's done. + let future = { + let shared = self.shared.clone(); + async move { + struct DecOnDrop(Arc); + + impl Drop for DecOnDrop { + fn drop(&mut self) { + // Decrement the count and notify someone. + self.0.active.fetch_sub(1, Ordering::SeqCst); + self.0.slot_available.notify(usize::MAX); + } + } + + let _dec = DecOnDrop(shared); + future.await + } + }; + + // Wake up another waiter, in case there is one. + self.shared.slot_available.notify(1); + + // Spawn the task. + return self.executor.spawn(future); + } + + Err(actual) => { + // Try again. + active = actual; + } + } + } else { + // Start waiting for a slot to become available. + if listener.as_ref().is_listening() { + listener.as_mut().await; + } else { + listener.as_mut().listen(); + } + + active = self.shared.active.load(Ordering::Acquire); + } + } + } + + /// Run a future to completion. + async fn run(&self, future: F) -> F::Output { + self.executor.run(future).await + } +} + +fn main() { + futures_lite::future::block_on(async { + let ex = Arc::new(LimitedExecutor::new(10)); + ex.run({ + let ex = ex.clone(); + async move { + // Spawn a bunch of tasks that wait for a while. + for i in 0..15 { + ex.spawn(async move { + async_io::Timer::after(Duration::from_millis(fastrand::u64(1..3))).await; + println!("Waiting task #{i} finished!"); + }) + .await + .detach(); + } + + let (start_tx, start_rx) = async_channel::bounded::<()>(1); + let mut current_rx = start_rx; + + // Send the first message. + start_tx.send(()).await.unwrap(); + + // Spawn a bunch of channel tasks that wake eachother up. + for i in 0..25 { + let (next_tx, next_rx) = async_channel::bounded::<()>(1); + + ex.spawn(async move { + current_rx.recv().await.unwrap(); + println!("Channel task {i} woken up!"); + next_tx.send(()).await.unwrap(); + println!("Channel task {i} finished!"); + }) + .await + .detach(); + + current_rx = next_rx; + } + + // Wait for the last task to finish. + current_rx.recv().await.unwrap(); + + println!("All tasks finished!"); + } + }) + .await; + }); +} From 8a0832c090625dd36e66ff7b42a76a80b421f90f Mon Sep 17 00:00:00 2001 From: John Nunley Date: Mon, 16 Oct 2023 18:50:00 -0700 Subject: [PATCH 041/110] m: Remove the thread-local executor optimization This was added in #37 as an optimization, but has since lead to many bugs. See the issues #53, #57 and #60 for more information. I do not have the bandwidth to address all of these bugs, so I'm taking the path of least resistance by just removing the problematic code. CLoses #53, #57 and #60 Signed-off-by: John Nunley --- src/lib.rs | 167 +++++------------------------------------------------ 1 file changed, 15 insertions(+), 152 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 213f11f..79428cd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -26,7 +26,6 @@ html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] -use std::cell::RefCell; use std::fmt; use std::future::Future; use std::marker::PhantomData; @@ -236,56 +235,29 @@ impl<'a> Executor<'a> { let runner = Runner::new(self.state()); let mut rng = fastrand::Rng::new(); - // Set the local queue while we're running. - LocalQueue::set(self.state(), &runner.local, { - let runner = &runner; - async move { - // A future that runs tasks forever. - let run_forever = async { - loop { - for _ in 0..200 { - let runnable = runner.runnable(&mut rng).await; - runnable.run(); - } - future::yield_now().await; - } - }; - - // Run `future` and `run_forever` concurrently until `future` completes. - future.or(run_forever).await + // A future that runs tasks forever. + let run_forever = async { + loop { + for _ in 0..200 { + let runnable = runner.runnable(&mut rng).await; + runnable.run(); + } + future::yield_now().await; } - }) - .await + }; + + // Run `future` and `run_forever` concurrently until `future` completes. + future.or(run_forever).await } /// Returns a function that schedules a runnable task when it gets woken up. fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static { let state = self.state().clone(); - // If possible, push into the current local queue and notify the ticker. + // TODO: If possible, push into the current local queue and notify the ticker. move |runnable| { - let mut runnable = Some(runnable); - - // Try to push into the local queue. - LocalQueue::with(|local_queue| { - // Make sure that we don't accidentally push to an executor that isn't ours. - if local_queue.state != &*state as *const State as usize { - return; - } - - if let Err(e) = local_queue.queue.push(runnable.take().unwrap()) { - runnable = Some(e.into_inner()); - return; - } - - local_queue.waker.wake_by_ref(); - }); - - // If the local queue push failed, just push to the global queue. - if let Some(runnable) = runnable { - state.queue.push(runnable).unwrap(); - state.notify(); - } + state.queue.push(runnable).unwrap(); + state.notify(); } } @@ -853,106 +825,6 @@ impl Drop for Runner<'_> { } } -/// The state of the currently running local queue. -struct LocalQueue { - /// The pointer to the state of the executor. - /// - /// Used to make sure we don't push runnables to the wrong executor. - state: usize, - - /// The concurrent queue. - queue: Arc>, - - /// The waker for the runnable. - waker: Waker, -} - -impl LocalQueue { - /// Run a function with the current local queue. - fn with(f: impl FnOnce(&LocalQueue) -> R) -> Option { - std::thread_local! { - /// The current local queue. - static LOCAL_QUEUE: RefCell> = RefCell::new(None); - } - - impl LocalQueue { - /// Run a function with a set local queue. - async fn set( - state: &State, - queue: &Arc>, - fut: F, - ) -> F::Output - where - F: Future, - { - // Make the `LocalQueue` structure. - let make_local_queue = |waker: &Waker| LocalQueue { - state: state as *const State as usize, - queue: queue.clone(), - waker: waker.clone(), - }; - - // Store the local queue and the current waker. - let mut old = with_waker(|waker| { - LOCAL_QUEUE.with(move |slot| slot.borrow_mut().replace(make_local_queue(waker))) - }) - .await; - - // Restore the old local queue on drop. - let _guard = CallOnDrop(move || { - let old = old.take(); - let _ = LOCAL_QUEUE.try_with(move |slot| { - *slot.borrow_mut() = old; - }); - }); - - // Pin the future. - futures_lite::pin!(fut); - - // Run it such that the waker is updated every time it's polled. - future::poll_fn(move |cx| { - LOCAL_QUEUE - .try_with({ - let waker = cx.waker(); - move |slot| { - let mut slot = slot.borrow_mut(); - let qaw = match slot.as_mut() { - None => { - // Another local queue dropped itself and replaced with None, - // we can take its place! - *slot = Some(make_local_queue(waker)); - return; - } - Some(qaw) => qaw, - }; - - // If we've been replaced, just ignore the slot. - if !Arc::ptr_eq(&qaw.queue, queue) { - return; - } - - // Update the waker, if it has changed. - if !qaw.waker.will_wake(waker) { - qaw.waker = waker.clone(); - } - } - }) - .ok(); - - // Poll the future. - fut.as_mut().poll(cx) - }) - .await - } - } - - LOCAL_QUEUE - .try_with(|local_queue| local_queue.borrow().as_ref().map(f)) - .ok() - .flatten() - } -} - /// Steals some items from one queue into another. fn steal(src: &ConcurrentQueue, dest: &ConcurrentQueue) { // Half of `src`'s length rounded up. @@ -1053,15 +925,6 @@ impl Drop for CallOnDrop { } } -/// Run a closure with the current waker. -fn with_waker R, R>(f: F) -> impl Future { - let mut f = Some(f); - future::poll_fn(move |cx| { - let f = f.take().unwrap(); - Poll::Ready(f(cx.waker())) - }) -} - fn _ensure_send_and_sync() { use futures_lite::future::pending; From 599c71a3f97fcb15e41b56210e50ec95931dcb90 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Mon, 16 Oct 2023 19:17:27 -0700 Subject: [PATCH 042/110] v1.6.0 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b795839..659148b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.6.0 + +- Remove the thread-local queue optimization, as it caused a number of bugs in production use cases. (#61) + # Version 1.5.4 - Fix a panic that could happen when two concurrent `run()` calls are made and the thread local task slot is left as `None`. (#55) diff --git a/Cargo.toml b/Cargo.toml index 50fda40..0af474b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.5.4" +version = "1.6.0" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.61" From b91875e73bd9aec582e099d8c792514381fc8d0f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 20:54:02 -0700 Subject: [PATCH 043/110] deps: Update async-channel requirement from 1.4.1 to 2.0.0 Updates the requirements on [async-channel](https://github.com/smol-rs/async-channel) to permit the latest version. - [Release notes](https://github.com/smol-rs/async-channel/releases) - [Changelog](https://github.com/smol-rs/async-channel/blob/master/CHANGELOG.md) - [Commits](https://github.com/smol-rs/async-channel/compare/v1.4.1...v2.0.0) --- updated-dependencies: - dependency-name: async-channel dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0af474b..6d46d4a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ futures-lite = { version = "1.11.0", default-features = false } slab = "0.4.4" [dev-dependencies] -async-channel = "1.4.1" +async-channel = "2.0.0" async-io = "1.1.9" criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" From e1e2ab11df8d096ec119bd7a4e3dd338ea6c21e4 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Wed, 1 Nov 2023 20:09:35 -0700 Subject: [PATCH 044/110] Bump async-io, async-lock and futures-lite Signed-off-by: John Nunley --- Cargo.toml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6d46d4a..317b93d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,20 +15,21 @@ categories = ["asynchronous", "concurrency"] exclude = ["/.*"] [dependencies] -async-lock = "2.6" +async-lock = "3.0.0" async-task = "4.0.0" concurrent-queue = "2.0.0" fastrand = "2.0.0" -futures-lite = { version = "1.11.0", default-features = false } +futures-lite = { version = "2.0.0", default-features = false } slab = "0.4.4" [dev-dependencies] async-channel = "2.0.0" -async-io = "1.1.9" +async-io = "2.1.0" criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" event-listener = "3.0.0" fastrand = "2.0.0" +futures-lite = "2.0.0" once_cell = "1.16.0" [[bench]] From 457cf7b888115eb34bea333d615c83b2cc9c8cb0 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Wed, 1 Nov 2023 20:13:37 -0700 Subject: [PATCH 045/110] Disable leaky test for MIRI Signed-off-by: John Nunley --- tests/drop.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/drop.rs b/tests/drop.rs index dd20add..202b78d 100644 --- a/tests/drop.rs +++ b/tests/drop.rs @@ -38,6 +38,7 @@ fn executor_cancels_everything() { assert_eq!(DROP.load(Ordering::SeqCst), 1); } +#[cfg(not(miri))] #[test] fn leaked_executor_leaks_everything() { static DROP: AtomicUsize = AtomicUsize::new(0); From 361c5fd359380771f117e06b380f82774a9e98f4 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Thu, 2 Nov 2023 19:41:44 -0700 Subject: [PATCH 046/110] Fix missing import on Miri Signed-off-by: John Nunley --- tests/drop.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/drop.rs b/tests/drop.rs index 202b78d..2b1ce56 100644 --- a/tests/drop.rs +++ b/tests/drop.rs @@ -1,3 +1,4 @@ +#[cfg(not(miri))] use std::mem; use std::panic::catch_unwind; use std::sync::atomic::{AtomicUsize, Ordering}; From c7fd967c9e0658e886203999ffcf10698bc1002c Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 5 Nov 2023 17:24:42 -0800 Subject: [PATCH 047/110] v1.7.0 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 659148b..0226832 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.7.0 + +- Bump `async-lock` and `futures-lite` to their latest versions. (#70) + # Version 1.6.0 - Remove the thread-local queue optimization, as it caused a number of bugs in production use cases. (#61) diff --git a/Cargo.toml b/Cargo.toml index 317b93d..8023362 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.6.0" +version = "1.7.0" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.61" From f076528d27f318cef71c9828a34302a23afc00f1 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 11 Nov 2023 08:34:46 -0800 Subject: [PATCH 048/110] Add a disclaimer saying this is a basic executor (#74) In many issues I've mentioned that the executors in this crate are just reference executors. However, this is not documented in the crate itself. This commit adds a disclaimer to the crate documentation and to README.md that these are reference executors that shouldn't be relied on for performance. Signed-off-by: John Nunley --- README.md | 7 +++++++ src/lib.rs | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/README.md b/README.md index c330bd6..a968713 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,13 @@ https://docs.rs/async-executor) Async executors. +This crate provides two reference executors that trade performance for +functionality. They should be considered reference executors that are "good +enough" for most use cases. For more specialized use cases, consider writing +your own executor on top of [`async-task`]. + +[`async-task`]: https://crates.io/crates/async-task + ## Examples ```rust diff --git a/src/lib.rs b/src/lib.rs index 79428cd..a30dc9c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,12 @@ //! Async executors. //! +//! This crate provides two reference executors that trade performance for +//! functionality. They should be considered reference executors that are "good +//! enough" for most use cases. For more specialized use cases, consider writing +//! your own executor on top of [`async-task`]. +//! +//! [`async-task`]: https://crates.io/crates/async-task +//! //! # Examples //! //! ``` From 6c3d45b23c6c0d5b8a2491e832d7a69d897f4773 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 11 Nov 2023 10:15:04 -0800 Subject: [PATCH 049/110] bugfix: Fix wasm32 compile errors Signed-off-by: John Nunley --- .github/workflows/ci.yml | 2 ++ src/lib.rs | 13 ++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 66f75cf..a1b1af2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,12 +37,14 @@ jobs: - uses: actions/checkout@v4 - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} + - run: rustup target add wasm32-unknown-unknown - run: cargo build --all --all-features --all-targets if: startsWith(matrix.rust, 'nightly') - name: Run cargo check (without dev-dependencies to catch missing feature flags) if: startsWith(matrix.rust, 'nightly') run: cargo check -Z features=dev_dep - run: cargo test + - run: cargo check --all --all-features --target wasm32-unknown-unknown msrv: runs-on: ubuntu-latest diff --git a/src/lib.rs b/src/lib.rs index a30dc9c..dcc1f99 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -270,7 +270,18 @@ impl<'a> Executor<'a> { /// Returns a reference to the inner state. fn state(&self) -> &Arc { - self.state.get_or_init_blocking(|| Arc::new(State::new())) + #[cfg(not(target_family = "wasm"))] + { + return self.state.get_or_init_blocking(|| Arc::new(State::new())); + } + + // Some projects use this on WASM for some reason. In this case get_or_init_blocking + // doesn't work. Just poll the future once and panic if there is contention. + #[cfg(target_family = "wasm")] + future::block_on(future::poll_once( + self.state.get_or_init(|| async { Arc::new(State::new()) }), + )) + .expect("encountered contention on WASM") } } From 1d4769a7b5bbe689f7d4582e28fc89d6a4bd6790 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 12 Nov 2023 16:21:46 -0800 Subject: [PATCH 050/110] v1.7.1 Signed-off-by: John Nunley --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0226832..7099f65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 1.7.1 + +- Fix compilation under WebAssembly targets (#75). +- Add a disclaimer indicating that this is a reference executor (#74). + # Version 1.7.0 - Bump `async-lock` and `futures-lite` to their latest versions. (#70) diff --git a/Cargo.toml b/Cargo.toml index 8023362..32d1b2f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.7.0" +version = "1.7.1" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.61" From b140c461238384784ce0a8ac14ffd71d4ea35748 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Mon, 13 Nov 2023 08:22:40 -0800 Subject: [PATCH 051/110] Fix dev-dependency WASM compilation issue `futures-lite` in the dev dependencies added a `block_on` call that was not present in the WASM build, causing a compile error. This PR makes sure that the `std` feature of `futures-lite` is enabled in Cargo.toml. This also adds a CI check to ensure that this doesn't happen again Signed-off-by: John Nunley --- .github/workflows/ci.yml | 2 ++ Cargo.toml | 3 +++ 2 files changed, 5 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a1b1af2..fa454e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,6 +38,7 @@ jobs: - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - run: rustup target add wasm32-unknown-unknown + - uses: taiki-e/install-action@cargo-hack - run: cargo build --all --all-features --all-targets if: startsWith(matrix.rust, 'nightly') - name: Run cargo check (without dev-dependencies to catch missing feature flags) @@ -45,6 +46,7 @@ jobs: run: cargo check -Z features=dev_dep - run: cargo test - run: cargo check --all --all-features --target wasm32-unknown-unknown + - run: cargo hack build --all --all-features --target wasm32-unknown-unknown --no-dev-deps msrv: runs-on: ubuntu-latest diff --git a/Cargo.toml b/Cargo.toml index 32d1b2f..d9509b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,9 @@ fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } slab = "0.4.4" +[target.'cfg(target_family = "wasm")'.dependencies] +futures-lite = { version = "2.0.0", default-features = false, features = ["std"] } + [dev-dependencies] async-channel = "2.0.0" async-io = "2.1.0" From 144b0576d10b6b56812214343644a3540746ea60 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Mon, 13 Nov 2023 08:24:42 -0800 Subject: [PATCH 052/110] Update to 2021 edition Signed-off-by: John Nunley --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index d9509b5..935a625 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "async-executor" # - Create "v1.x.y" git tag version = "1.7.1" authors = ["Stjepan Glavina "] -edition = "2018" +edition = "2021" rust-version = "1.61" description = "Async executor" license = "Apache-2.0 OR MIT" From 4b1cf401421cc6e71ca376a7b686e3c0df8a1889 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 18 Nov 2023 09:26:16 -0800 Subject: [PATCH 053/110] v1.7.2 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7099f65..49e9e8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.7.2 + +- Fix compilation under WebAssembly targets (#77). + # Version 1.7.1 - Fix compilation under WebAssembly targets (#75). diff --git a/Cargo.toml b/Cargo.toml index 935a625..c09d807 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.7.1" +version = "1.7.2" authors = ["Stjepan Glavina "] edition = "2021" rust-version = "1.61" From fa117dee27fc162ad956d796aa7d706afda4964c Mon Sep 17 00:00:00 2001 From: John Nunley Date: Tue, 21 Nov 2023 02:39:09 -0800 Subject: [PATCH 054/110] Propagate panics in tasks (#78) After smol-rs/async-task#37 I meant to add this to the executor. This commit makes it so all panics are surfaced in the tasks that the user calls. Hopefully this improves ergonomics. Signed-off-by: John Nunley Signed-off-by: Alain Zscheile --- src/lib.rs | 14 +++++++++++--- tests/panic_prop.rs | 14 ++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 tests/panic_prop.rs diff --git a/src/lib.rs b/src/lib.rs index dcc1f99..7116794 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -43,7 +43,7 @@ use std::sync::{Arc, Mutex, RwLock, TryLockError}; use std::task::{Poll, Waker}; use async_lock::OnceCell; -use async_task::Runnable; +use async_task::{Builder, Runnable}; use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; use slab::Slab; @@ -159,7 +159,11 @@ impl<'a> Executor<'a> { }; // Create the task and register it in the set of active tasks. - let (runnable, task) = unsafe { async_task::spawn_unchecked(future, self.schedule()) }; + let (runnable, task) = unsafe { + Builder::new() + .propagate_panic(true) + .spawn_unchecked(|()| future, self.schedule()) + }; active.insert(runnable.waker()); runnable.schedule(); @@ -402,7 +406,11 @@ impl<'a> LocalExecutor<'a> { }; // Create the task and register it in the set of active tasks. - let (runnable, task) = unsafe { async_task::spawn_unchecked(future, self.schedule()) }; + let (runnable, task) = unsafe { + Builder::new() + .propagate_panic(true) + .spawn_unchecked(|()| future, self.schedule()) + }; active.insert(runnable.waker()); runnable.schedule(); diff --git a/tests/panic_prop.rs b/tests/panic_prop.rs new file mode 100644 index 0000000..eab4901 --- /dev/null +++ b/tests/panic_prop.rs @@ -0,0 +1,14 @@ +use async_executor::Executor; +use futures_lite::{future, prelude::*}; + +#[test] +fn test_panic_propagation() { + let ex = Executor::new(); + let task = ex.spawn(async { panic!("should be caught by the task") }); + + // Running the executor should not panic. + assert!(ex.try_tick()); + + // Polling the task should. + assert!(future::block_on(task.catch_unwind()).is_err()); +} From d747bcd8277f7928a825129139a9290632f4d90d Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 24 Nov 2023 08:21:32 -0800 Subject: [PATCH 055/110] v1.8.0 Signed-off-by: John Nunley --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49e9e8b..924727b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 1.8.0 + +- When spawned tasks panic, the panic is caught and then surfaced in the spawned + `Task`. Previously, the panic would be surfaced in `tick()` or `run()`. (#78) + # Version 1.7.2 - Fix compilation under WebAssembly targets (#77). diff --git a/Cargo.toml b/Cargo.toml index c09d807..5d6e966 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.7.2" +version = "1.8.0" authors = ["Stjepan Glavina "] edition = "2021" rust-version = "1.61" From 24510a7b720d8bdaeab1b793416e817596d11714 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 7 Jan 2024 07:04:21 +0900 Subject: [PATCH 056/110] ci: Use cargo-hack's --rust-version flag for msrv check This respects rust-version field in Cargo.toml, so it removes the need to manage MSRV in both the CI file and Cargo.toml. --- .github/workflows/ci.yml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fa454e0..be05945 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,16 +50,11 @@ jobs: msrv: runs-on: ubuntu-latest - strategy: - matrix: - # When updating this, the reminder to update the minimum supported - # Rust version in Cargo.toml. - rust: ['1.61'] steps: - uses: actions/checkout@v4 - - name: Install Rust - run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - - run: cargo build + - name: Install cargo-hack + uses: taiki-e/install-action@cargo-hack + - run: cargo hack build --rust-version clippy: runs-on: ubuntu-latest From 57fcc2d991b13a68c500f2e5fbacd9ab41db99d1 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 7 Jan 2024 07:05:15 +0900 Subject: [PATCH 057/110] Relax MSRV to 1.60 https://github.com/smol-rs/futures-lite/pull/90 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 5d6e966..dc4b65c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.8.0" authors = ["Stjepan Glavina "] edition = "2021" -rust-version = "1.61" +rust-version = "1.60" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" From 6c70369102f947a9f4abc48c370c5ab8e7810840 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Mon, 8 Jan 2024 16:01:07 -0800 Subject: [PATCH 058/110] ex: Use Semaphore instead of manual event-listener Whoops, I accidentally reinvented a semaphore and made the example a lot more complicated than it needed to be. Signed-off-by: John Nunley --- Cargo.toml | 2 +- examples/limit.rs | 106 ++++++++-------------------------------------- 2 files changed, 18 insertions(+), 90 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dc4b65c..c399c8c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,9 +28,9 @@ futures-lite = { version = "2.0.0", default-features = false, features = ["std"] [dev-dependencies] async-channel = "2.0.0" async-io = "2.1.0" +async-lock = "3.0.0" criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" -event-listener = "3.0.0" fastrand = "2.0.0" futures-lite = "2.0.0" once_cell = "1.16.0" diff --git a/examples/limit.rs b/examples/limit.rs index 80b5f9e..4342c79 100644 --- a/examples/limit.rs +++ b/examples/limit.rs @@ -1,46 +1,23 @@ //! An executor where you can only push a limited number of tasks. use async_executor::{Executor, Task}; -use event_listener::{Event, EventListener}; -use futures_lite::pin; -use std::{ - future::Future, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, - time::Duration, -}; +use async_lock::Semaphore; +use std::{future::Future, sync::Arc, time::Duration}; /// An executor where you can only push a limited number of tasks. struct LimitedExecutor { /// Inner running executor. executor: Executor<'static>, - /// Shared state. - shared: Arc, -} - -struct SharedState { - /// The maximum number of tasks that can be pushed. - max: usize, - - /// The current number of active tasks. - active: AtomicUsize, - - /// Event listeners for when a new task is available. - slot_available: Event, + /// Semaphore limiting the number of tasks. + semaphore: Arc, } impl LimitedExecutor { fn new(max: usize) -> Self { Self { executor: Executor::new(), - shared: Arc::new(SharedState { - max, - active: AtomicUsize::new(0), - slot_available: Event::new(), - }), + semaphore: Semaphore::new(max).into(), } } @@ -49,67 +26,18 @@ impl LimitedExecutor { where F::Output: Send + 'static, { - let listener = EventListener::new(&self.shared.slot_available); - pin!(listener); - - // Load the current number of active tasks. - let mut active = self.shared.active.load(Ordering::Acquire); - - loop { - // Check if there is a slot available. - if active < self.shared.max { - // Try to set the slot to what would be the new number of tasks. - let new_active = active + 1; - match self.shared.active.compare_exchange( - active, - new_active, - Ordering::SeqCst, - Ordering::SeqCst, - ) { - Ok(_) => { - // Wrap the future in another future that decrements the active count - // when it's done. - let future = { - let shared = self.shared.clone(); - async move { - struct DecOnDrop(Arc); - - impl Drop for DecOnDrop { - fn drop(&mut self) { - // Decrement the count and notify someone. - self.0.active.fetch_sub(1, Ordering::SeqCst); - self.0.slot_available.notify(usize::MAX); - } - } - - let _dec = DecOnDrop(shared); - future.await - } - }; - - // Wake up another waiter, in case there is one. - self.shared.slot_available.notify(1); - - // Spawn the task. - return self.executor.spawn(future); - } - - Err(actual) => { - // Try again. - active = actual; - } - } - } else { - // Start waiting for a slot to become available. - if listener.as_ref().is_listening() { - listener.as_mut().await; - } else { - listener.as_mut().listen(); - } - - active = self.shared.active.load(Ordering::Acquire); - } - } + // Wait for a semaphore permit. + let permit = self.semaphore.acquire_arc().await; + + // Wrap it into a new future. + let future = async move { + let result = future.await; + drop(permit); + result + }; + + // Spawn the task. + self.executor.spawn(future) } /// Run a future to completion. From 4fbe23af692ac7939d3a281c709c8afd4f0dac95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 27 Jan 2024 00:34:45 +0900 Subject: [PATCH 059/110] Update criterion requirement from 0.4 to 0.5 (#43) Signed-off-by: dependabot[bot] --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index c399c8c..0d30945 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ futures-lite = { version = "2.0.0", default-features = false, features = ["std"] async-channel = "2.0.0" async-io = "2.1.0" async-lock = "3.0.0" -criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } +criterion = { version = "0.5", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" fastrand = "2.0.0" futures-lite = "2.0.0" From 0baba46152d65f9ba10acd0fe4b612b8c8339f23 Mon Sep 17 00:00:00 2001 From: Jacob Rothstein Date: Mon, 12 Feb 2024 19:40:56 -0800 Subject: [PATCH 060/110] chore: Bump async-task to v4.4.0 this crate depends on async_task::Builder, which was introduced in 4.4.0 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0d30945..d64beb4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ exclude = ["/.*"] [dependencies] async-lock = "3.0.0" -async-task = "4.0.0" +async-task = "4.4.0" concurrent-queue = "2.0.0" fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } From 7ffdf5ba922fd5bbc840185c7cf7b1a21ba52793 Mon Sep 17 00:00:00 2001 From: James Liu Date: Fri, 16 Feb 2024 17:22:43 -0800 Subject: [PATCH 061/110] m: Replace unnecessary atomics with non-atomic operations --- src/lib.rs | 48 ++++++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 7116794..18143df 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -38,7 +38,7 @@ use std::future::Future; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex, RwLock, TryLockError}; use std::task::{Poll, Waker}; @@ -243,7 +243,7 @@ impl<'a> Executor<'a> { /// assert_eq!(res, 6); /// ``` pub async fn run(&self, future: impl Future) -> T { - let runner = Runner::new(self.state()); + let mut runner = Runner::new(self.state()); let mut rng = fastrand::Rng::new(); // A future that runs tasks forever. @@ -639,29 +639,26 @@ struct Ticker<'a> { /// 1) Woken. /// 2a) Sleeping and unnotified. /// 2b) Sleeping and notified. - sleeping: AtomicUsize, + sleeping: usize, } impl Ticker<'_> { /// Creates a ticker. fn new(state: &State) -> Ticker<'_> { - Ticker { - state, - sleeping: AtomicUsize::new(0), - } + Ticker { state, sleeping: 0 } } /// Moves the ticker into sleeping and unnotified state. /// /// Returns `false` if the ticker was already sleeping and unnotified. - fn sleep(&self, waker: &Waker) -> bool { + fn sleep(&mut self, waker: &Waker) -> bool { let mut sleepers = self.state.sleepers.lock().unwrap(); - match self.sleeping.load(Ordering::SeqCst) { + match self.sleeping { // Move to sleeping state. - 0 => self - .sleeping - .store(sleepers.insert(waker), Ordering::SeqCst), + 0 => { + self.sleeping = sleepers.insert(waker); + } // Already sleeping, check if notified. id => { @@ -679,25 +676,25 @@ impl Ticker<'_> { } /// Moves the ticker into woken state. - fn wake(&self) { - let id = self.sleeping.swap(0, Ordering::SeqCst); - if id != 0 { + fn wake(&mut self) { + if self.sleeping != 0 { let mut sleepers = self.state.sleepers.lock().unwrap(); - sleepers.remove(id); + sleepers.remove(self.sleeping); self.state .notified .swap(sleepers.is_notified(), Ordering::SeqCst); } + self.sleeping = 0; } /// Waits for the next runnable task to run. - async fn runnable(&self) -> Runnable { + async fn runnable(&mut self) -> Runnable { self.runnable_with(|| self.state.queue.pop().ok()).await } /// Waits for the next runnable task to run, given a function that searches for a task. - async fn runnable_with(&self, mut search: impl FnMut() -> Option) -> Runnable { + async fn runnable_with(&mut self, mut search: impl FnMut() -> Option) -> Runnable { future::poll_fn(|cx| { loop { match search() { @@ -728,10 +725,9 @@ impl Ticker<'_> { impl Drop for Ticker<'_> { fn drop(&mut self) { // If this ticker is in sleeping state, it must be removed from the sleepers list. - let id = self.sleeping.swap(0, Ordering::SeqCst); - if id != 0 { + if self.sleeping != 0 { let mut sleepers = self.state.sleepers.lock().unwrap(); - let notified = sleepers.remove(id); + let notified = sleepers.remove(self.sleeping); self.state .notified @@ -760,7 +756,7 @@ struct Runner<'a> { local: Arc>, /// Bumped every time a runnable task is found. - ticks: AtomicUsize, + ticks: usize, } impl Runner<'_> { @@ -770,7 +766,7 @@ impl Runner<'_> { state, ticker: Ticker::new(state), local: Arc::new(ConcurrentQueue::bounded(512)), - ticks: AtomicUsize::new(0), + ticks: 0, }; state .local_queues @@ -781,7 +777,7 @@ impl Runner<'_> { } /// Waits for the next runnable task to run. - async fn runnable(&self, rng: &mut fastrand::Rng) -> Runnable { + async fn runnable(&mut self, rng: &mut fastrand::Rng) -> Runnable { let runnable = self .ticker .runnable_with(|| { @@ -824,9 +820,9 @@ impl Runner<'_> { .await; // Bump the tick counter. - let ticks = self.ticks.fetch_add(1, Ordering::SeqCst); + self.ticks += 1; - if ticks % 64 == 0 { + if self.ticks % 64 == 0 { // Steal tasks from the global queue to ensure fair task scheduling. steal(&self.state.queue, &self.local); } From 568a314ad9bc6234fdae4e7390d3c8b2eb96a8f4 Mon Sep 17 00:00:00 2001 From: James Liu Date: Sat, 17 Feb 2024 00:02:59 -0800 Subject: [PATCH 062/110] Avoid redundant lookups in the active slab when spawning new tasks (#96) --- src/lib.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 18143df..768691b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -151,7 +151,8 @@ impl<'a> Executor<'a> { let mut active = self.state().active.lock().unwrap(); // Remove the task from the set of active tasks when the future finishes. - let index = active.vacant_entry().key(); + let entry = active.vacant_entry(); + let index = entry.key(); let state = self.state().clone(); let future = async move { let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index))); @@ -164,7 +165,7 @@ impl<'a> Executor<'a> { .propagate_panic(true) .spawn_unchecked(|()| future, self.schedule()) }; - active.insert(runnable.waker()); + entry.insert(runnable.waker()); runnable.schedule(); task @@ -398,7 +399,8 @@ impl<'a> LocalExecutor<'a> { let mut active = self.inner().state().active.lock().unwrap(); // Remove the task from the set of active tasks when the future finishes. - let index = active.vacant_entry().key(); + let entry = active.vacant_entry(); + let index = entry.key(); let state = self.inner().state().clone(); let future = async move { let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index))); @@ -411,7 +413,7 @@ impl<'a> LocalExecutor<'a> { .propagate_panic(true) .spawn_unchecked(|()| future, self.schedule()) }; - active.insert(runnable.waker()); + entry.insert(runnable.waker()); runnable.schedule(); task From 188f976dc34597d1f28e7fdb06df8e665a198101 Mon Sep 17 00:00:00 2001 From: James Liu Date: Sat, 17 Feb 2024 12:20:57 -0800 Subject: [PATCH 063/110] m: Weaken the atomic orderings for notification The atomic orderings on State::notified might be too strong, as it's primarily being used as a deterrent against waking up too many threads. This PR weakens their sequentially consistent operations to Acquire/Release. --- src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 768691b..cafc6e6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -543,7 +543,7 @@ impl State { fn notify(&self) { if self .notified - .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) .is_ok() { let waker = self.sleepers.lock().unwrap().notify(); @@ -672,7 +672,7 @@ impl Ticker<'_> { self.state .notified - .swap(sleepers.is_notified(), Ordering::SeqCst); + .store(sleepers.is_notified(), Ordering::Release); true } @@ -685,7 +685,7 @@ impl Ticker<'_> { self.state .notified - .swap(sleepers.is_notified(), Ordering::SeqCst); + .store(sleepers.is_notified(), Ordering::Release); } self.sleeping = 0; } @@ -733,7 +733,7 @@ impl Drop for Ticker<'_> { self.state .notified - .swap(sleepers.is_notified(), Ordering::SeqCst); + .store(sleepers.is_notified(), Ordering::Release); // If this ticker was notified, then notify another ticker. if notified { From 7592d4188afd43c8fe23a7aabf12acaf16480fb1 Mon Sep 17 00:00:00 2001 From: James Liu Date: Wed, 21 Feb 2024 19:53:40 -0800 Subject: [PATCH 064/110] feat: Use actual thread local queues instead of using a RwLock Currently, runner local queues rely on a RwLock>>> to store the queues instead of using actual thread-local storage. This adds thread_local as a dependency, but this should allow the executor to work steal without needing to hold a lock, as well as allow tasks to schedule onto the local queue directly, where possible, instead of always relying on the global injector queue. Fixes #62 Co-authored-by: John Nunley --- Cargo.toml | 4 +- benches/executor.rs | 1 - examples/priority.rs | 1 - src/lib.rs | 133 +++++++++++++++++++++++++++++-------------- 4 files changed, 92 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d64beb4..cef7138 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.8.0" authors = ["Stjepan Glavina "] edition = "2021" -rust-version = "1.60" +rust-version = "1.61" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" @@ -17,10 +17,12 @@ exclude = ["/.*"] [dependencies] async-lock = "3.0.0" async-task = "4.4.0" +atomic-waker = "1.0" concurrent-queue = "2.0.0" fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } slab = "0.4.4" +thread_local = "1.1" [target.'cfg(target_family = "wasm")'.dependencies] futures-lite = { version = "2.0.0", default-features = false, features = ["std"] } diff --git a/benches/executor.rs b/benches/executor.rs index 20d41a1..b6e33c2 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,4 +1,3 @@ -use std::future::Future; use std::thread::available_parallelism; use async_executor::Executor; diff --git a/examples/priority.rs b/examples/priority.rs index df77dd1..60d5c9a 100644 --- a/examples/priority.rs +++ b/examples/priority.rs @@ -1,6 +1,5 @@ //! An executor with task priorities. -use std::future::Future; use std::thread; use async_executor::{Executor, Task}; diff --git a/src/lib.rs b/src/lib.rs index cafc6e6..05c69aa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,19 +34,20 @@ )] use std::fmt; -use std::future::Future; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex, RwLock, TryLockError}; +use std::sync::{Arc, Mutex, TryLockError}; use std::task::{Poll, Waker}; use async_lock::OnceCell; use async_task::{Builder, Runnable}; +use atomic_waker::AtomicWaker; use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; use slab::Slab; +use thread_local::ThreadLocal; #[doc(no_inline)] pub use async_task::Task; @@ -266,8 +267,23 @@ impl<'a> Executor<'a> { fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static { let state = self.state().clone(); - // TODO: If possible, push into the current local queue and notify the ticker. - move |runnable| { + move |mut runnable| { + // If possible, push into the current local queue and notify the ticker. + if let Some(local) = state.local_queue.get() { + runnable = if let Err(err) = local.queue.push(runnable) { + err.into_inner() + } else { + // Wake up this thread if it's asleep, otherwise notify another + // thread to try to have the task stolen. + if let Some(waker) = local.waker.take() { + waker.wake(); + } else { + state.notify(); + } + return; + } + } + // If the local queue is full, fallback to pushing onto the global injector queue. state.queue.push(runnable).unwrap(); state.notify(); } @@ -510,7 +526,16 @@ struct State { queue: ConcurrentQueue, /// Local queues created by runners. - local_queues: RwLock>>>, + /// + /// If possible, tasks are scheduled onto the local queue, and will only defer + /// to other global queue when they're full, or the task is being scheduled from + /// a thread without a runner. + /// + /// Note: if a runner terminates and drains its local queue, any subsequent + /// spawn calls from the same thread will be added to the same queue, but won't + /// be executed until `Executor::run` is run on the thread again, or another + /// thread steals the task. + local_queue: ThreadLocal, /// Set to `true` when a sleeping ticker is notified or no tickers are sleeping. notified: AtomicBool, @@ -527,7 +552,7 @@ impl State { fn new() -> State { State { queue: ConcurrentQueue::unbounded(), - local_queues: RwLock::new(Vec::new()), + local_queue: ThreadLocal::new(), notified: AtomicBool::new(true), sleepers: Mutex::new(Sleepers { count: 0, @@ -654,6 +679,12 @@ impl Ticker<'_> { /// /// Returns `false` if the ticker was already sleeping and unnotified. fn sleep(&mut self, waker: &Waker) -> bool { + self.state + .local_queue + .get_or_default() + .waker + .register(waker); + let mut sleepers = self.state.sleepers.lock().unwrap(); match self.sleeping { @@ -692,7 +723,14 @@ impl Ticker<'_> { /// Waits for the next runnable task to run. async fn runnable(&mut self) -> Runnable { - self.runnable_with(|| self.state.queue.pop().ok()).await + self.runnable_with(|| { + self.state + .local_queue + .get() + .and_then(|local| local.queue.pop().ok()) + .or_else(|| self.state.queue.pop().ok()) + }) + .await } /// Waits for the next runnable task to run, given a function that searches for a task. @@ -754,9 +792,6 @@ struct Runner<'a> { /// Inner ticker. ticker: Ticker<'a>, - /// The local queue. - local: Arc>, - /// Bumped every time a runnable task is found. ticks: usize, } @@ -767,38 +802,34 @@ impl Runner<'_> { let runner = Runner { state, ticker: Ticker::new(state), - local: Arc::new(ConcurrentQueue::bounded(512)), ticks: 0, }; - state - .local_queues - .write() - .unwrap() - .push(runner.local.clone()); runner } /// Waits for the next runnable task to run. async fn runnable(&mut self, rng: &mut fastrand::Rng) -> Runnable { + let local = self.state.local_queue.get_or_default(); + let runnable = self .ticker .runnable_with(|| { // Try the local queue. - if let Ok(r) = self.local.pop() { + if let Ok(r) = local.queue.pop() { return Some(r); } // Try stealing from the global queue. if let Ok(r) = self.state.queue.pop() { - steal(&self.state.queue, &self.local); + steal(&self.state.queue, &local.queue); return Some(r); } // Try stealing from other runners. - let local_queues = self.state.local_queues.read().unwrap(); + let local_queues = &self.state.local_queue; // Pick a random starting point in the iterator list and rotate the list. - let n = local_queues.len(); + let n = local_queues.iter().count(); let start = rng.usize(..n); let iter = local_queues .iter() @@ -807,12 +838,12 @@ impl Runner<'_> { .take(n); // Remove this runner's local queue. - let iter = iter.filter(|local| !Arc::ptr_eq(local, &self.local)); + let iter = iter.filter(|other| !core::ptr::eq(*other, local)); // Try stealing from each local queue in the list. - for local in iter { - steal(local, &self.local); - if let Ok(r) = self.local.pop() { + for other in iter { + steal(&other.queue, &local.queue); + if let Ok(r) = local.queue.pop() { return Some(r); } } @@ -826,7 +857,7 @@ impl Runner<'_> { if self.ticks % 64 == 0 { // Steal tasks from the global queue to ensure fair task scheduling. - steal(&self.state.queue, &self.local); + steal(&self.state.queue, &local.queue); } runnable @@ -836,15 +867,13 @@ impl Runner<'_> { impl Drop for Runner<'_> { fn drop(&mut self) { // Remove the local queue. - self.state - .local_queues - .write() - .unwrap() - .retain(|local| !Arc::ptr_eq(local, &self.local)); - - // Re-schedule remaining tasks in the local queue. - while let Ok(r) = self.local.pop() { - r.schedule(); + if let Some(local) = self.state.local_queue.get() { + // Re-schedule remaining tasks in the local queue. + for r in local.queue.try_iter() { + // Explicitly reschedule the runnable back onto the global + // queue to avoid rescheduling onto the local one. + self.state.queue.push(r).unwrap(); + } } } } @@ -904,18 +933,13 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ } /// Debug wrapper for the local runners. - struct LocalRunners<'a>(&'a RwLock>>>); + struct LocalRunners<'a>(&'a ThreadLocal); impl fmt::Debug for LocalRunners<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.0.try_read() { - Ok(lock) => f - .debug_list() - .entries(lock.iter().map(|queue| queue.len())) - .finish(), - Err(TryLockError::WouldBlock) => f.write_str(""), - Err(TryLockError::Poisoned(_)) => f.write_str(""), - } + f.debug_list() + .entries(self.0.iter().map(|local| local.queue.len())) + .finish() } } @@ -935,11 +959,32 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ f.debug_struct(name) .field("active", &ActiveTasks(&state.active)) .field("global_tasks", &state.queue.len()) - .field("local_runners", &LocalRunners(&state.local_queues)) + .field("local_runners", &LocalRunners(&state.local_queue)) .field("sleepers", &SleepCount(&state.sleepers)) .finish() } +/// A queue local to each thread. +/// +/// It's Default implementation is used for initializing each +/// thread's queue via `ThreadLocal::get_or_default`. +/// +/// The local queue *must* be flushed, and all pending runnables +/// rescheduled onto the global queue when a runner is dropped. +struct LocalQueue { + queue: ConcurrentQueue, + waker: AtomicWaker, +} + +impl Default for LocalQueue { + fn default() -> Self { + Self { + queue: ConcurrentQueue::bounded(512), + waker: AtomicWaker::new(), + } + } +} + /// Runs a closure when dropped. struct CallOnDrop(F); From c7bbe489ab0a2a2091c3a8500a97fc6fe2928610 Mon Sep 17 00:00:00 2001 From: James Liu Date: Wed, 21 Feb 2024 20:03:49 -0800 Subject: [PATCH 065/110] Use wrapping add on ticks to avoid tick counter overflow in debug builds (#101) --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 05c69aa..c13f67f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -853,7 +853,7 @@ impl Runner<'_> { .await; // Bump the tick counter. - self.ticks += 1; + self.ticks = self.ticks.wrapping_add(1); if self.ticks % 64 == 0 { // Steal tasks from the global queue to ensure fair task scheduling. From 2f3189a4b43319ece7abf99a10fb82187ff9672c Mon Sep 17 00:00:00 2001 From: John Nunley Date: Wed, 21 Feb 2024 20:58:51 -0800 Subject: [PATCH 066/110] v1.9.0 Signed-off-by: John Nunley --- CHANGELOG.md | 8 ++++++++ Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 924727b..256fbd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +# Version 1.9.0 + +- Re-introduce the thread-local task push optimization to the executor. (#93) +- Bump `async-task` to v4.4.0. (#90) +- Replace some unnecessary atomic operations with non-atomic operations. (#94) +- Use weaker atomic orderings for notifications. (#95) +- When spawning a future, avoid looking up the ID to assign to that future twice. (#96) + # Version 1.8.0 - When spawned tasks panic, the panic is caught and then surfaced in the spawned diff --git a/Cargo.toml b/Cargo.toml index cef7138..8325fe1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.8.0" +version = "1.9.0" authors = ["Stjepan Glavina "] edition = "2021" rust-version = "1.61" From d5dc7a8008bbbbeafe3764d07bbc4d19a415006d Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 24 Feb 2024 15:23:44 -0800 Subject: [PATCH 067/110] tests: Add tests with more complicated futures This should catch the errors from earlier. Signed-off-by: John Nunley --- tests/larger_tasks.rs | 95 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 tests/larger_tasks.rs diff --git a/tests/larger_tasks.rs b/tests/larger_tasks.rs new file mode 100644 index 0000000..12642ed --- /dev/null +++ b/tests/larger_tasks.rs @@ -0,0 +1,95 @@ +//! Test for larger tasks. + +use async_executor::Executor; +use futures_lite::future::{self, block_on}; +use futures_lite::prelude::*; + +use std::sync::Arc; +use std::thread; +use std::time::Duration; + +fn do_run>(mut f: impl FnMut(Arc>) -> Fut) { + // This should not run for longer than two minutes. + let (_stop_timeout, stopper) = async_channel::bounded::<()>(1); + thread::spawn(move || { + block_on(async move { + let timeout = async { + async_io::Timer::after(Duration::from_secs(2 * 60)).await; + eprintln!("test timed out after 2m"); + std::process::exit(1) + }; + + let _ = stopper.recv().or(timeout).await; + }) + }); + + let ex = Arc::new(Executor::new()); + + // Test 1: Use the `run` command. + block_on(ex.run(f(ex.clone()))); + + // Test 2: Loop on `tick`. + block_on(async { + let ticker = async { + loop { + ex.tick().await; + } + }; + + f(ex.clone()).or(ticker).await + }); + + // Test 3: Run on many threads. + thread::scope(|scope| { + let (_signal, shutdown) = async_channel::bounded::<()>(1); + + for _ in 0..16 { + let shutdown = shutdown.clone(); + let ex = &ex; + scope.spawn(move || block_on(ex.run(shutdown.recv()))); + } + + block_on(f(ex.clone())); + }); + + // Test 4: Tick loop on many threads. + thread::scope(|scope| { + let (_signal, shutdown) = async_channel::bounded::<()>(1); + + for _ in 0..16 { + let shutdown = shutdown.clone(); + let ex = &ex; + scope.spawn(move || { + block_on(async move { + let ticker = async { + loop { + ex.tick().await; + } + }; + + shutdown.recv().or(ticker).await + }) + }); + } + + block_on(f(ex.clone())); + }); +} + +#[test] +fn smoke() { + do_run(|ex| async move { ex.spawn(async {}).await }); +} + +#[test] +fn yield_now() { + do_run(|ex| async move { ex.spawn(future::yield_now()).await }) +} + +#[test] +fn timer() { + do_run(|ex| async move { + ex.spawn(async_io::Timer::after(Duration::from_millis(5))) + .await; + }) +} From 22a9e8b30587b2aa775ceb00da2185c01176d72b Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 1 Mar 2024 21:11:14 -0800 Subject: [PATCH 068/110] bugfix: Account for local queue corner cases It turns out that with the current strategy it is possible for tasks to be stuck in the local queue without any hope of being picked back up. In practice this seems to happen when the only entities polling the system are tickers, as opposed to runners. Since tickets don't steal tasks, it is possible for tasks to be left over in the local queue that don't filter out. One possible solution is to make it so tickers steal tasks, but this kind of defeats the point of tickers. So I've instead elected to replace the current strategy with one that accounts for the corner cases with local queues. The main difference is that I replace the Sleepers struct with two event_listener::Event's. One that handles tickers subscribed to the global queue and one that handles tickers subscribed to the local queue. The other main difference is that each local queue now has a reference counter. If this count reaches zero, no tasks will be pushed to this queue. Only runners increment or decrement this counter. This makes the previously instituted tests pass, so hopefully this works for most use cases. Signed-off-by: John Nunley --- Cargo.toml | 2 +- src/lib.rs | 363 ++++++++++++------------------------------ tests/larger_tasks.rs | 6 + 3 files changed, 111 insertions(+), 260 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 8325fe1..aadb95d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,8 +17,8 @@ exclude = ["/.*"] [dependencies] async-lock = "3.0.0" async-task = "4.4.0" -atomic-waker = "1.0" concurrent-queue = "2.0.0" +event-listener = { version = "5.2.0", default-features = false, features = ["std"] } fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } slab = "0.4.4" diff --git a/src/lib.rs b/src/lib.rs index c13f67f..483bc41 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,14 +37,14 @@ use std::fmt; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, TryLockError}; -use std::task::{Poll, Waker}; +use std::task::Waker; use async_lock::OnceCell; use async_task::{Builder, Runnable}; -use atomic_waker::AtomicWaker; use concurrent_queue::ConcurrentQueue; +use event_listener::{listener, Event}; use futures_lite::{future, prelude::*}; use slab::Slab; use thread_local::ThreadLocal; @@ -225,7 +225,15 @@ impl<'a> Executor<'a> { /// ``` pub async fn tick(&self) { let state = self.state(); - let runnable = Ticker::new(state).runnable().await; + let runnable = state + .tick_with(|local, steal| { + local + .queue + .pop() + .ok() + .or_else(|| if steal { state.queue.pop().ok() } else { None }) + }) + .await; runnable.run(); } @@ -270,22 +278,23 @@ impl<'a> Executor<'a> { move |mut runnable| { // If possible, push into the current local queue and notify the ticker. if let Some(local) = state.local_queue.get() { - runnable = if let Err(err) = local.queue.push(runnable) { - err.into_inner() - } else { - // Wake up this thread if it's asleep, otherwise notify another - // thread to try to have the task stolen. - if let Some(waker) = local.waker.take() { - waker.wake(); + // Don't push into the local queue if no one is ticking it. + if local.tickers.load(Ordering::Acquire) > 0 { + runnable = if let Err(err) = local.queue.push(runnable) { + err.into_inner() } else { - state.notify(); + // Try to notify threads waiting on this queue. If there are + // none, notify another thread. + if local.waiters.notify_additional(1) == 0 { + state.new_tasks.notify_additional(1); + } + return; } - return; } } // If the local queue is full, fallback to pushing onto the global injector queue. state.queue.push(runnable).unwrap(); - state.notify(); + state.new_tasks.notify_additional(1); } } @@ -315,6 +324,9 @@ impl Drop for Executor<'_> { } drop(active); + for local_queue in state.local_queue.iter() { + while local_queue.queue.pop().is_ok() {} + } while state.queue.pop().is_ok() {} } } @@ -537,11 +549,8 @@ struct State { /// thread steals the task. local_queue: ThreadLocal, - /// Set to `true` when a sleeping ticker is notified or no tickers are sleeping. - notified: AtomicBool, - - /// A list of sleeping tickers. - sleepers: Mutex, + /// Tickers waiting on new tasks from the global queue. + new_tasks: Event, /// Currently active tasks. active: Mutex>, @@ -553,12 +562,7 @@ impl State { State { queue: ConcurrentQueue::unbounded(), local_queue: ThreadLocal::new(), - notified: AtomicBool::new(true), - sleepers: Mutex::new(Sleepers { - count: 0, - wakers: Vec::new(), - free_ids: Vec::new(), - }), + new_tasks: Event::new(), active: Mutex::new(Slab::new()), } } @@ -566,218 +570,40 @@ impl State { /// Notifies a sleeping ticker. #[inline] fn notify(&self) { - if self - .notified - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) - .is_ok() - { - let waker = self.sleepers.lock().unwrap().notify(); - if let Some(w) = waker { - w.wake(); - } - } + self.new_tasks.notify(1); } -} - -/// A list of sleeping tickers. -struct Sleepers { - /// Number of sleeping tickers (both notified and unnotified). - count: usize, - /// IDs and wakers of sleeping unnotified tickers. - /// - /// A sleeping ticker is notified when its waker is missing from this list. - wakers: Vec<(usize, Waker)>, + /// Run a tick using the provided function to get the next task. + async fn tick_with( + &self, + mut local_ticker: impl FnMut(&LocalQueue, bool) -> Option, + ) -> Runnable { + let local = self.local_queue.get_or_default(); - /// Reclaimed IDs. - free_ids: Vec, -} - -impl Sleepers { - /// Inserts a new sleeping ticker. - fn insert(&mut self, waker: &Waker) -> usize { - let id = match self.free_ids.pop() { - Some(id) => id, - None => self.count + 1, - }; - self.count += 1; - self.wakers.push((id, waker.clone())); - id - } - - /// Re-inserts a sleeping ticker's waker if it was notified. - /// - /// Returns `true` if the ticker was notified. - fn update(&mut self, id: usize, waker: &Waker) -> bool { - for item in &mut self.wakers { - if item.0 == id { - if !item.1.will_wake(waker) { - item.1 = waker.clone(); - } - return false; + loop { + // Try to get a runnable from the local queue. + if let Some(runnable) = local_ticker(local, false) { + return runnable; } - } - self.wakers.push((id, waker.clone())); - true - } + // Register a local waiter. + listener!(local.waiters => local_listener); - /// Removes a previously inserted sleeping ticker. - /// - /// Returns `true` if the ticker was notified. - fn remove(&mut self, id: usize) -> bool { - self.count -= 1; - self.free_ids.push(id); - - for i in (0..self.wakers.len()).rev() { - if self.wakers[i].0 == id { - self.wakers.remove(i); - return false; + // Try for a global runner. + if let Ok(runnable) = self.queue.pop() { + return runnable; } - } - true - } - /// Returns `true` if a sleeping ticker is notified or no tickers are sleeping. - fn is_notified(&self) -> bool { - self.count == 0 || self.count > self.wakers.len() - } - - /// Returns notification waker for a sleeping ticker. - /// - /// If a ticker was notified already or there are no tickers, `None` will be returned. - fn notify(&mut self) -> Option { - if self.wakers.len() == self.count { - self.wakers.pop().map(|item| item.1) - } else { - None - } - } -} - -/// Runs task one by one. -struct Ticker<'a> { - /// The executor state. - state: &'a State, - - /// Set to a non-zero sleeper ID when in sleeping state. - /// - /// States a ticker can be in: - /// 1) Woken. - /// 2a) Sleeping and unnotified. - /// 2b) Sleeping and notified. - sleeping: usize, -} + // Register a global waiter. + listener!(self.new_tasks => global_listener); -impl Ticker<'_> { - /// Creates a ticker. - fn new(state: &State) -> Ticker<'_> { - Ticker { state, sleeping: 0 } - } - - /// Moves the ticker into sleeping and unnotified state. - /// - /// Returns `false` if the ticker was already sleeping and unnotified. - fn sleep(&mut self, waker: &Waker) -> bool { - self.state - .local_queue - .get_or_default() - .waker - .register(waker); - - let mut sleepers = self.state.sleepers.lock().unwrap(); - - match self.sleeping { - // Move to sleeping state. - 0 => { - self.sleeping = sleepers.insert(waker); - } - - // Already sleeping, check if notified. - id => { - if !sleepers.update(id, waker) { - return false; - } + // Try for both again. + if let Some(runnable) = local_ticker(local, true) { + return runnable; } - } - - self.state - .notified - .store(sleepers.is_notified(), Ordering::Release); - - true - } - - /// Moves the ticker into woken state. - fn wake(&mut self) { - if self.sleeping != 0 { - let mut sleepers = self.state.sleepers.lock().unwrap(); - sleepers.remove(self.sleeping); - self.state - .notified - .store(sleepers.is_notified(), Ordering::Release); - } - self.sleeping = 0; - } - - /// Waits for the next runnable task to run. - async fn runnable(&mut self) -> Runnable { - self.runnable_with(|| { - self.state - .local_queue - .get() - .and_then(|local| local.queue.pop().ok()) - .or_else(|| self.state.queue.pop().ok()) - }) - .await - } - - /// Waits for the next runnable task to run, given a function that searches for a task. - async fn runnable_with(&mut self, mut search: impl FnMut() -> Option) -> Runnable { - future::poll_fn(|cx| { - loop { - match search() { - None => { - // Move to sleeping and unnotified state. - if !self.sleep(cx.waker()) { - // If already sleeping and unnotified, return. - return Poll::Pending; - } - } - Some(r) => { - // Wake up. - self.wake(); - - // Notify another ticker now to pick up where this ticker left off, just in - // case running the task takes a long time. - self.state.notify(); - - return Poll::Ready(r); - } - } - } - }) - .await - } -} - -impl Drop for Ticker<'_> { - fn drop(&mut self) { - // If this ticker is in sleeping state, it must be removed from the sleepers list. - if self.sleeping != 0 { - let mut sleepers = self.state.sleepers.lock().unwrap(); - let notified = sleepers.remove(self.sleeping); - - self.state - .notified - .store(sleepers.is_notified(), Ordering::Release); - - // If this ticker was notified, then notify another ticker. - if notified { - drop(sleepers); - self.state.notify(); - } + // Wait on both listeners in parallel. + local_listener.or(global_listener).await; } } } @@ -789,9 +615,6 @@ struct Runner<'a> { /// The executor state. state: &'a State, - /// Inner ticker. - ticker: Ticker<'a>, - /// Bumped every time a runnable task is found. ticks: usize, } @@ -799,12 +622,9 @@ struct Runner<'a> { impl Runner<'_> { /// Creates a runner and registers it in the executor state. fn new(state: &State) -> Runner<'_> { - let runner = Runner { - state, - ticker: Ticker::new(state), - ticks: 0, - }; - runner + state.local_queue.get_or_default().start_ticking(); + + Runner { state, ticks: 0 } } /// Waits for the next runnable task to run. @@ -812,13 +632,18 @@ impl Runner<'_> { let local = self.state.local_queue.get_or_default(); let runnable = self - .ticker - .runnable_with(|| { + .state + .tick_with(|_, try_stealing| { // Try the local queue. if let Ok(r) = local.queue.pop() { return Some(r); } + // Remaining work involves stealing. + if !try_stealing { + return None; + } + // Try stealing from the global queue. if let Ok(r) = self.state.queue.pop() { steal(&self.state.queue, &local.queue); @@ -868,12 +693,7 @@ impl Drop for Runner<'_> { fn drop(&mut self) { // Remove the local queue. if let Some(local) = self.state.local_queue.get() { - // Re-schedule remaining tasks in the local queue. - for r in local.queue.try_iter() { - // Explicitly reschedule the runnable back onto the global - // queue to avoid rescheduling onto the local one. - self.state.queue.push(r).unwrap(); - } + local.stop_ticking(self.state); } } } @@ -943,24 +763,10 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ } } - /// Debug wrapper for the sleepers. - struct SleepCount<'a>(&'a Mutex); - - impl fmt::Debug for SleepCount<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.0.try_lock() { - Ok(lock) => fmt::Debug::fmt(&lock.count, f), - Err(TryLockError::WouldBlock) => f.write_str(""), - Err(TryLockError::Poisoned(_)) => f.write_str(""), - } - } - } - f.debug_struct(name) .field("active", &ActiveTasks(&state.active)) .field("global_tasks", &state.queue.len()) .field("local_runners", &LocalRunners(&state.local_queue)) - .field("sleepers", &SleepCount(&state.sleepers)) .finish() } @@ -972,15 +778,54 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ /// The local queue *must* be flushed, and all pending runnables /// rescheduled onto the global queue when a runner is dropped. struct LocalQueue { + /// Queue of concurrent tasks. queue: ConcurrentQueue, - waker: AtomicWaker, + + /// Tickers waiting on an event from this queue. + waiters: Event, + + /// Number of tickers waiting on this queue. + tickers: AtomicUsize, } impl Default for LocalQueue { fn default() -> Self { Self { queue: ConcurrentQueue::bounded(512), - waker: AtomicWaker::new(), + waiters: Event::new(), + tickers: AtomicUsize::new(0), + } + } +} + +impl LocalQueue { + /// Indicate that we are now waiting on this queue. + fn start_ticking(&self) { + // Relaxed ordering is fine here. + let old_tickers = self.tickers.fetch_add(1, Ordering::Relaxed); + if old_tickers > isize::MAX as usize { + panic!("too many tickers waiting on one thread"); + } + } + + /// Indicate that we are no longer waiting on this queue. + #[inline] + fn stop_ticking(&self, state: &State) { + if self.tickers.fetch_sub(1, Ordering::Release) == 1 { + // Make sure everyone knows we're about to release tasks. + std::sync::atomic::fence(Ordering::Acquire); + + // Drain any tasks. + self.drain_tasks(state); + } + } + + /// Drain all tasks from this queue. + #[cold] + fn drain_tasks(&self, state: &State) { + while let Ok(task) = self.queue.pop() { + state.queue.push(task).ok(); + state.notify(); } } } diff --git a/tests/larger_tasks.rs b/tests/larger_tasks.rs index 12642ed..ee6cc1e 100644 --- a/tests/larger_tasks.rs +++ b/tests/larger_tasks.rs @@ -6,11 +6,15 @@ use futures_lite::prelude::*; use std::sync::Arc; use std::thread; + +#[cfg(not(miri))] use std::time::Duration; fn do_run>(mut f: impl FnMut(Arc>) -> Fut) { // This should not run for longer than two minutes. + #[cfg(not(miri))] let (_stop_timeout, stopper) = async_channel::bounded::<()>(1); + #[cfg(not(miri))] thread::spawn(move || { block_on(async move { let timeout = async { @@ -86,6 +90,8 @@ fn yield_now() { do_run(|ex| async move { ex.spawn(future::yield_now()).await }) } +// Miri does not support timers. +#[cfg(not(miri))] #[test] fn timer() { do_run(|ex| async move { From c90fd306cda1f0c9b2341d8cade894e9cad77057 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 23 Mar 2024 16:03:09 -0700 Subject: [PATCH 069/110] Revert "bugfix: Account for local queue corner cases" This reverts commit 22a9e8b30587b2aa775ceb00da2185c01176d72b. --- Cargo.toml | 2 +- src/lib.rs | 363 ++++++++++++++++++++++++++++++------------ tests/larger_tasks.rs | 6 - 3 files changed, 260 insertions(+), 111 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index aadb95d..8325fe1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,8 +17,8 @@ exclude = ["/.*"] [dependencies] async-lock = "3.0.0" async-task = "4.4.0" +atomic-waker = "1.0" concurrent-queue = "2.0.0" -event-listener = { version = "5.2.0", default-features = false, features = ["std"] } fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } slab = "0.4.4" diff --git a/src/lib.rs b/src/lib.rs index 483bc41..c13f67f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,14 +37,14 @@ use std::fmt; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex, TryLockError}; -use std::task::Waker; +use std::task::{Poll, Waker}; use async_lock::OnceCell; use async_task::{Builder, Runnable}; +use atomic_waker::AtomicWaker; use concurrent_queue::ConcurrentQueue; -use event_listener::{listener, Event}; use futures_lite::{future, prelude::*}; use slab::Slab; use thread_local::ThreadLocal; @@ -225,15 +225,7 @@ impl<'a> Executor<'a> { /// ``` pub async fn tick(&self) { let state = self.state(); - let runnable = state - .tick_with(|local, steal| { - local - .queue - .pop() - .ok() - .or_else(|| if steal { state.queue.pop().ok() } else { None }) - }) - .await; + let runnable = Ticker::new(state).runnable().await; runnable.run(); } @@ -278,23 +270,22 @@ impl<'a> Executor<'a> { move |mut runnable| { // If possible, push into the current local queue and notify the ticker. if let Some(local) = state.local_queue.get() { - // Don't push into the local queue if no one is ticking it. - if local.tickers.load(Ordering::Acquire) > 0 { - runnable = if let Err(err) = local.queue.push(runnable) { - err.into_inner() + runnable = if let Err(err) = local.queue.push(runnable) { + err.into_inner() + } else { + // Wake up this thread if it's asleep, otherwise notify another + // thread to try to have the task stolen. + if let Some(waker) = local.waker.take() { + waker.wake(); } else { - // Try to notify threads waiting on this queue. If there are - // none, notify another thread. - if local.waiters.notify_additional(1) == 0 { - state.new_tasks.notify_additional(1); - } - return; + state.notify(); } + return; } } // If the local queue is full, fallback to pushing onto the global injector queue. state.queue.push(runnable).unwrap(); - state.new_tasks.notify_additional(1); + state.notify(); } } @@ -324,9 +315,6 @@ impl Drop for Executor<'_> { } drop(active); - for local_queue in state.local_queue.iter() { - while local_queue.queue.pop().is_ok() {} - } while state.queue.pop().is_ok() {} } } @@ -549,8 +537,11 @@ struct State { /// thread steals the task. local_queue: ThreadLocal, - /// Tickers waiting on new tasks from the global queue. - new_tasks: Event, + /// Set to `true` when a sleeping ticker is notified or no tickers are sleeping. + notified: AtomicBool, + + /// A list of sleeping tickers. + sleepers: Mutex, /// Currently active tasks. active: Mutex>, @@ -562,7 +553,12 @@ impl State { State { queue: ConcurrentQueue::unbounded(), local_queue: ThreadLocal::new(), - new_tasks: Event::new(), + notified: AtomicBool::new(true), + sleepers: Mutex::new(Sleepers { + count: 0, + wakers: Vec::new(), + free_ids: Vec::new(), + }), active: Mutex::new(Slab::new()), } } @@ -570,40 +566,218 @@ impl State { /// Notifies a sleeping ticker. #[inline] fn notify(&self) { - self.new_tasks.notify(1); + if self + .notified + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) + .is_ok() + { + let waker = self.sleepers.lock().unwrap().notify(); + if let Some(w) = waker { + w.wake(); + } + } } +} + +/// A list of sleeping tickers. +struct Sleepers { + /// Number of sleeping tickers (both notified and unnotified). + count: usize, - /// Run a tick using the provided function to get the next task. - async fn tick_with( - &self, - mut local_ticker: impl FnMut(&LocalQueue, bool) -> Option, - ) -> Runnable { - let local = self.local_queue.get_or_default(); + /// IDs and wakers of sleeping unnotified tickers. + /// + /// A sleeping ticker is notified when its waker is missing from this list. + wakers: Vec<(usize, Waker)>, - loop { - // Try to get a runnable from the local queue. - if let Some(runnable) = local_ticker(local, false) { - return runnable; + /// Reclaimed IDs. + free_ids: Vec, +} + +impl Sleepers { + /// Inserts a new sleeping ticker. + fn insert(&mut self, waker: &Waker) -> usize { + let id = match self.free_ids.pop() { + Some(id) => id, + None => self.count + 1, + }; + self.count += 1; + self.wakers.push((id, waker.clone())); + id + } + + /// Re-inserts a sleeping ticker's waker if it was notified. + /// + /// Returns `true` if the ticker was notified. + fn update(&mut self, id: usize, waker: &Waker) -> bool { + for item in &mut self.wakers { + if item.0 == id { + if !item.1.will_wake(waker) { + item.1 = waker.clone(); + } + return false; } + } - // Register a local waiter. - listener!(local.waiters => local_listener); + self.wakers.push((id, waker.clone())); + true + } - // Try for a global runner. - if let Ok(runnable) = self.queue.pop() { - return runnable; + /// Removes a previously inserted sleeping ticker. + /// + /// Returns `true` if the ticker was notified. + fn remove(&mut self, id: usize) -> bool { + self.count -= 1; + self.free_ids.push(id); + + for i in (0..self.wakers.len()).rev() { + if self.wakers[i].0 == id { + self.wakers.remove(i); + return false; } + } + true + } - // Register a global waiter. - listener!(self.new_tasks => global_listener); + /// Returns `true` if a sleeping ticker is notified or no tickers are sleeping. + fn is_notified(&self) -> bool { + self.count == 0 || self.count > self.wakers.len() + } + + /// Returns notification waker for a sleeping ticker. + /// + /// If a ticker was notified already or there are no tickers, `None` will be returned. + fn notify(&mut self) -> Option { + if self.wakers.len() == self.count { + self.wakers.pop().map(|item| item.1) + } else { + None + } + } +} + +/// Runs task one by one. +struct Ticker<'a> { + /// The executor state. + state: &'a State, + + /// Set to a non-zero sleeper ID when in sleeping state. + /// + /// States a ticker can be in: + /// 1) Woken. + /// 2a) Sleeping and unnotified. + /// 2b) Sleeping and notified. + sleeping: usize, +} - // Try for both again. - if let Some(runnable) = local_ticker(local, true) { - return runnable; +impl Ticker<'_> { + /// Creates a ticker. + fn new(state: &State) -> Ticker<'_> { + Ticker { state, sleeping: 0 } + } + + /// Moves the ticker into sleeping and unnotified state. + /// + /// Returns `false` if the ticker was already sleeping and unnotified. + fn sleep(&mut self, waker: &Waker) -> bool { + self.state + .local_queue + .get_or_default() + .waker + .register(waker); + + let mut sleepers = self.state.sleepers.lock().unwrap(); + + match self.sleeping { + // Move to sleeping state. + 0 => { + self.sleeping = sleepers.insert(waker); + } + + // Already sleeping, check if notified. + id => { + if !sleepers.update(id, waker) { + return false; + } } + } + + self.state + .notified + .store(sleepers.is_notified(), Ordering::Release); + + true + } + + /// Moves the ticker into woken state. + fn wake(&mut self) { + if self.sleeping != 0 { + let mut sleepers = self.state.sleepers.lock().unwrap(); + sleepers.remove(self.sleeping); - // Wait on both listeners in parallel. - local_listener.or(global_listener).await; + self.state + .notified + .store(sleepers.is_notified(), Ordering::Release); + } + self.sleeping = 0; + } + + /// Waits for the next runnable task to run. + async fn runnable(&mut self) -> Runnable { + self.runnable_with(|| { + self.state + .local_queue + .get() + .and_then(|local| local.queue.pop().ok()) + .or_else(|| self.state.queue.pop().ok()) + }) + .await + } + + /// Waits for the next runnable task to run, given a function that searches for a task. + async fn runnable_with(&mut self, mut search: impl FnMut() -> Option) -> Runnable { + future::poll_fn(|cx| { + loop { + match search() { + None => { + // Move to sleeping and unnotified state. + if !self.sleep(cx.waker()) { + // If already sleeping and unnotified, return. + return Poll::Pending; + } + } + Some(r) => { + // Wake up. + self.wake(); + + // Notify another ticker now to pick up where this ticker left off, just in + // case running the task takes a long time. + self.state.notify(); + + return Poll::Ready(r); + } + } + } + }) + .await + } +} + +impl Drop for Ticker<'_> { + fn drop(&mut self) { + // If this ticker is in sleeping state, it must be removed from the sleepers list. + if self.sleeping != 0 { + let mut sleepers = self.state.sleepers.lock().unwrap(); + let notified = sleepers.remove(self.sleeping); + + self.state + .notified + .store(sleepers.is_notified(), Ordering::Release); + + // If this ticker was notified, then notify another ticker. + if notified { + drop(sleepers); + self.state.notify(); + } } } } @@ -615,6 +789,9 @@ struct Runner<'a> { /// The executor state. state: &'a State, + /// Inner ticker. + ticker: Ticker<'a>, + /// Bumped every time a runnable task is found. ticks: usize, } @@ -622,9 +799,12 @@ struct Runner<'a> { impl Runner<'_> { /// Creates a runner and registers it in the executor state. fn new(state: &State) -> Runner<'_> { - state.local_queue.get_or_default().start_ticking(); - - Runner { state, ticks: 0 } + let runner = Runner { + state, + ticker: Ticker::new(state), + ticks: 0, + }; + runner } /// Waits for the next runnable task to run. @@ -632,18 +812,13 @@ impl Runner<'_> { let local = self.state.local_queue.get_or_default(); let runnable = self - .state - .tick_with(|_, try_stealing| { + .ticker + .runnable_with(|| { // Try the local queue. if let Ok(r) = local.queue.pop() { return Some(r); } - // Remaining work involves stealing. - if !try_stealing { - return None; - } - // Try stealing from the global queue. if let Ok(r) = self.state.queue.pop() { steal(&self.state.queue, &local.queue); @@ -693,7 +868,12 @@ impl Drop for Runner<'_> { fn drop(&mut self) { // Remove the local queue. if let Some(local) = self.state.local_queue.get() { - local.stop_ticking(self.state); + // Re-schedule remaining tasks in the local queue. + for r in local.queue.try_iter() { + // Explicitly reschedule the runnable back onto the global + // queue to avoid rescheduling onto the local one. + self.state.queue.push(r).unwrap(); + } } } } @@ -763,10 +943,24 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ } } + /// Debug wrapper for the sleepers. + struct SleepCount<'a>(&'a Mutex); + + impl fmt::Debug for SleepCount<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0.try_lock() { + Ok(lock) => fmt::Debug::fmt(&lock.count, f), + Err(TryLockError::WouldBlock) => f.write_str(""), + Err(TryLockError::Poisoned(_)) => f.write_str(""), + } + } + } + f.debug_struct(name) .field("active", &ActiveTasks(&state.active)) .field("global_tasks", &state.queue.len()) .field("local_runners", &LocalRunners(&state.local_queue)) + .field("sleepers", &SleepCount(&state.sleepers)) .finish() } @@ -778,54 +972,15 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ /// The local queue *must* be flushed, and all pending runnables /// rescheduled onto the global queue when a runner is dropped. struct LocalQueue { - /// Queue of concurrent tasks. queue: ConcurrentQueue, - - /// Tickers waiting on an event from this queue. - waiters: Event, - - /// Number of tickers waiting on this queue. - tickers: AtomicUsize, + waker: AtomicWaker, } impl Default for LocalQueue { fn default() -> Self { Self { queue: ConcurrentQueue::bounded(512), - waiters: Event::new(), - tickers: AtomicUsize::new(0), - } - } -} - -impl LocalQueue { - /// Indicate that we are now waiting on this queue. - fn start_ticking(&self) { - // Relaxed ordering is fine here. - let old_tickers = self.tickers.fetch_add(1, Ordering::Relaxed); - if old_tickers > isize::MAX as usize { - panic!("too many tickers waiting on one thread"); - } - } - - /// Indicate that we are no longer waiting on this queue. - #[inline] - fn stop_ticking(&self, state: &State) { - if self.tickers.fetch_sub(1, Ordering::Release) == 1 { - // Make sure everyone knows we're about to release tasks. - std::sync::atomic::fence(Ordering::Acquire); - - // Drain any tasks. - self.drain_tasks(state); - } - } - - /// Drain all tasks from this queue. - #[cold] - fn drain_tasks(&self, state: &State) { - while let Ok(task) = self.queue.pop() { - state.queue.push(task).ok(); - state.notify(); + waker: AtomicWaker::new(), } } } diff --git a/tests/larger_tasks.rs b/tests/larger_tasks.rs index ee6cc1e..12642ed 100644 --- a/tests/larger_tasks.rs +++ b/tests/larger_tasks.rs @@ -6,15 +6,11 @@ use futures_lite::prelude::*; use std::sync::Arc; use std::thread; - -#[cfg(not(miri))] use std::time::Duration; fn do_run>(mut f: impl FnMut(Arc>) -> Fut) { // This should not run for longer than two minutes. - #[cfg(not(miri))] let (_stop_timeout, stopper) = async_channel::bounded::<()>(1); - #[cfg(not(miri))] thread::spawn(move || { block_on(async move { let timeout = async { @@ -90,8 +86,6 @@ fn yield_now() { do_run(|ex| async move { ex.spawn(future::yield_now()).await }) } -// Miri does not support timers. -#[cfg(not(miri))] #[test] fn timer() { do_run(|ex| async move { From 00dbbbf85d271b801e537b58d3d6ea68b4b30756 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 23 Mar 2024 16:03:33 -0700 Subject: [PATCH 070/110] Revert "feat: Use actual thread local queues instead of using a RwLock" This reverts commit 7592d4188afd43c8fe23a7aabf12acaf16480fb1. --- Cargo.toml | 4 +- benches/executor.rs | 1 + examples/priority.rs | 1 + src/lib.rs | 133 ++++++++++++++----------------------------- 4 files changed, 47 insertions(+), 92 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 8325fe1..c7af992 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.9.0" authors = ["Stjepan Glavina "] edition = "2021" -rust-version = "1.61" +rust-version = "1.60" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" @@ -17,12 +17,10 @@ exclude = ["/.*"] [dependencies] async-lock = "3.0.0" async-task = "4.4.0" -atomic-waker = "1.0" concurrent-queue = "2.0.0" fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } slab = "0.4.4" -thread_local = "1.1" [target.'cfg(target_family = "wasm")'.dependencies] futures-lite = { version = "2.0.0", default-features = false, features = ["std"] } diff --git a/benches/executor.rs b/benches/executor.rs index b6e33c2..20d41a1 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,3 +1,4 @@ +use std::future::Future; use std::thread::available_parallelism; use async_executor::Executor; diff --git a/examples/priority.rs b/examples/priority.rs index 60d5c9a..df77dd1 100644 --- a/examples/priority.rs +++ b/examples/priority.rs @@ -1,5 +1,6 @@ //! An executor with task priorities. +use std::future::Future; use std::thread; use async_executor::{Executor, Task}; diff --git a/src/lib.rs b/src/lib.rs index c13f67f..3fea5e0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,20 +34,19 @@ )] use std::fmt; +use std::future::Future; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex, TryLockError}; +use std::sync::{Arc, Mutex, RwLock, TryLockError}; use std::task::{Poll, Waker}; use async_lock::OnceCell; use async_task::{Builder, Runnable}; -use atomic_waker::AtomicWaker; use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; use slab::Slab; -use thread_local::ThreadLocal; #[doc(no_inline)] pub use async_task::Task; @@ -267,23 +266,8 @@ impl<'a> Executor<'a> { fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static { let state = self.state().clone(); - move |mut runnable| { - // If possible, push into the current local queue and notify the ticker. - if let Some(local) = state.local_queue.get() { - runnable = if let Err(err) = local.queue.push(runnable) { - err.into_inner() - } else { - // Wake up this thread if it's asleep, otherwise notify another - // thread to try to have the task stolen. - if let Some(waker) = local.waker.take() { - waker.wake(); - } else { - state.notify(); - } - return; - } - } - // If the local queue is full, fallback to pushing onto the global injector queue. + // TODO: If possible, push into the current local queue and notify the ticker. + move |runnable| { state.queue.push(runnable).unwrap(); state.notify(); } @@ -526,16 +510,7 @@ struct State { queue: ConcurrentQueue, /// Local queues created by runners. - /// - /// If possible, tasks are scheduled onto the local queue, and will only defer - /// to other global queue when they're full, or the task is being scheduled from - /// a thread without a runner. - /// - /// Note: if a runner terminates and drains its local queue, any subsequent - /// spawn calls from the same thread will be added to the same queue, but won't - /// be executed until `Executor::run` is run on the thread again, or another - /// thread steals the task. - local_queue: ThreadLocal, + local_queues: RwLock>>>, /// Set to `true` when a sleeping ticker is notified or no tickers are sleeping. notified: AtomicBool, @@ -552,7 +527,7 @@ impl State { fn new() -> State { State { queue: ConcurrentQueue::unbounded(), - local_queue: ThreadLocal::new(), + local_queues: RwLock::new(Vec::new()), notified: AtomicBool::new(true), sleepers: Mutex::new(Sleepers { count: 0, @@ -679,12 +654,6 @@ impl Ticker<'_> { /// /// Returns `false` if the ticker was already sleeping and unnotified. fn sleep(&mut self, waker: &Waker) -> bool { - self.state - .local_queue - .get_or_default() - .waker - .register(waker); - let mut sleepers = self.state.sleepers.lock().unwrap(); match self.sleeping { @@ -723,14 +692,7 @@ impl Ticker<'_> { /// Waits for the next runnable task to run. async fn runnable(&mut self) -> Runnable { - self.runnable_with(|| { - self.state - .local_queue - .get() - .and_then(|local| local.queue.pop().ok()) - .or_else(|| self.state.queue.pop().ok()) - }) - .await + self.runnable_with(|| self.state.queue.pop().ok()).await } /// Waits for the next runnable task to run, given a function that searches for a task. @@ -792,6 +754,9 @@ struct Runner<'a> { /// Inner ticker. ticker: Ticker<'a>, + /// The local queue. + local: Arc>, + /// Bumped every time a runnable task is found. ticks: usize, } @@ -802,34 +767,38 @@ impl Runner<'_> { let runner = Runner { state, ticker: Ticker::new(state), + local: Arc::new(ConcurrentQueue::bounded(512)), ticks: 0, }; + state + .local_queues + .write() + .unwrap() + .push(runner.local.clone()); runner } /// Waits for the next runnable task to run. async fn runnable(&mut self, rng: &mut fastrand::Rng) -> Runnable { - let local = self.state.local_queue.get_or_default(); - let runnable = self .ticker .runnable_with(|| { // Try the local queue. - if let Ok(r) = local.queue.pop() { + if let Ok(r) = self.local.pop() { return Some(r); } // Try stealing from the global queue. if let Ok(r) = self.state.queue.pop() { - steal(&self.state.queue, &local.queue); + steal(&self.state.queue, &self.local); return Some(r); } // Try stealing from other runners. - let local_queues = &self.state.local_queue; + let local_queues = self.state.local_queues.read().unwrap(); // Pick a random starting point in the iterator list and rotate the list. - let n = local_queues.iter().count(); + let n = local_queues.len(); let start = rng.usize(..n); let iter = local_queues .iter() @@ -838,12 +807,12 @@ impl Runner<'_> { .take(n); // Remove this runner's local queue. - let iter = iter.filter(|other| !core::ptr::eq(*other, local)); + let iter = iter.filter(|local| !Arc::ptr_eq(local, &self.local)); // Try stealing from each local queue in the list. - for other in iter { - steal(&other.queue, &local.queue); - if let Ok(r) = local.queue.pop() { + for local in iter { + steal(local, &self.local); + if let Ok(r) = self.local.pop() { return Some(r); } } @@ -857,7 +826,7 @@ impl Runner<'_> { if self.ticks % 64 == 0 { // Steal tasks from the global queue to ensure fair task scheduling. - steal(&self.state.queue, &local.queue); + steal(&self.state.queue, &self.local); } runnable @@ -867,13 +836,15 @@ impl Runner<'_> { impl Drop for Runner<'_> { fn drop(&mut self) { // Remove the local queue. - if let Some(local) = self.state.local_queue.get() { - // Re-schedule remaining tasks in the local queue. - for r in local.queue.try_iter() { - // Explicitly reschedule the runnable back onto the global - // queue to avoid rescheduling onto the local one. - self.state.queue.push(r).unwrap(); - } + self.state + .local_queues + .write() + .unwrap() + .retain(|local| !Arc::ptr_eq(local, &self.local)); + + // Re-schedule remaining tasks in the local queue. + while let Ok(r) = self.local.pop() { + r.schedule(); } } } @@ -933,13 +904,18 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ } /// Debug wrapper for the local runners. - struct LocalRunners<'a>(&'a ThreadLocal); + struct LocalRunners<'a>(&'a RwLock>>>); impl fmt::Debug for LocalRunners<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.0.iter().map(|local| local.queue.len())) - .finish() + match self.0.try_read() { + Ok(lock) => f + .debug_list() + .entries(lock.iter().map(|queue| queue.len())) + .finish(), + Err(TryLockError::WouldBlock) => f.write_str(""), + Err(TryLockError::Poisoned(_)) => f.write_str(""), + } } } @@ -959,32 +935,11 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ f.debug_struct(name) .field("active", &ActiveTasks(&state.active)) .field("global_tasks", &state.queue.len()) - .field("local_runners", &LocalRunners(&state.local_queue)) + .field("local_runners", &LocalRunners(&state.local_queues)) .field("sleepers", &SleepCount(&state.sleepers)) .finish() } -/// A queue local to each thread. -/// -/// It's Default implementation is used for initializing each -/// thread's queue via `ThreadLocal::get_or_default`. -/// -/// The local queue *must* be flushed, and all pending runnables -/// rescheduled onto the global queue when a runner is dropped. -struct LocalQueue { - queue: ConcurrentQueue, - waker: AtomicWaker, -} - -impl Default for LocalQueue { - fn default() -> Self { - Self { - queue: ConcurrentQueue::bounded(512), - waker: AtomicWaker::new(), - } - } -} - /// Runs a closure when dropped. struct CallOnDrop(F); From a2c1267c850ea6714625c921011c98074a083175 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 23 Mar 2024 16:04:07 -0700 Subject: [PATCH 071/110] chore: Fix new nightly warnings Signed-off-by: John Nunley --- benches/executor.rs | 1 - examples/priority.rs | 1 - src/lib.rs | 1 - 3 files changed, 3 deletions(-) diff --git a/benches/executor.rs b/benches/executor.rs index 20d41a1..b6e33c2 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,4 +1,3 @@ -use std::future::Future; use std::thread::available_parallelism; use async_executor::Executor; diff --git a/examples/priority.rs b/examples/priority.rs index df77dd1..60d5c9a 100644 --- a/examples/priority.rs +++ b/examples/priority.rs @@ -1,6 +1,5 @@ //! An executor with task priorities. -use std::future::Future; use std::thread; use async_executor::{Executor, Task}; diff --git a/src/lib.rs b/src/lib.rs index 3fea5e0..31fd40d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,7 +34,6 @@ )] use std::fmt; -use std::future::Future; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; From b6d3a60b44de4843cae6e8421715a4162cc2275e Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 23 Mar 2024 16:26:19 -0700 Subject: [PATCH 072/110] chore: Fix MIRI failure in larger_tasks Signed-off-by: John Nunley --- tests/larger_tasks.rs | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/tests/larger_tasks.rs b/tests/larger_tasks.rs index 12642ed..cc57988 100644 --- a/tests/larger_tasks.rs +++ b/tests/larger_tasks.rs @@ -10,18 +10,22 @@ use std::time::Duration; fn do_run>(mut f: impl FnMut(Arc>) -> Fut) { // This should not run for longer than two minutes. - let (_stop_timeout, stopper) = async_channel::bounded::<()>(1); - thread::spawn(move || { - block_on(async move { - let timeout = async { - async_io::Timer::after(Duration::from_secs(2 * 60)).await; - eprintln!("test timed out after 2m"); - std::process::exit(1) - }; - - let _ = stopper.recv().or(timeout).await; - }) - }); + #[cfg(not(miri))] + let _stop_timeout = { + let (stop_timeout, stopper) = async_channel::bounded::<()>(1); + thread::spawn(move || { + block_on(async move { + let timeout = async { + async_io::Timer::after(Duration::from_secs(2 * 60)).await; + eprintln!("test timed out after 2m"); + std::process::exit(1) + }; + + let _ = stopper.recv().or(timeout).await; + }) + }); + stop_timeout + }; let ex = Arc::new(Executor::new()); From 17720b098a32454fdc522e527bd86a8bdeae33fe Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 29 Mar 2024 20:58:08 -0700 Subject: [PATCH 073/110] v1.9.1 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 256fbd4..05ad9e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.9.1 + +- Remove the thread-local optimization due to the bugs that it introduces. (#106) + # Version 1.9.0 - Re-introduce the thread-local task push optimization to the executor. (#93) diff --git a/Cargo.toml b/Cargo.toml index c7af992..0b8011d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.9.0" +version = "1.9.1" authors = ["Stjepan Glavina "] edition = "2021" rust-version = "1.60" From d3196999f466107fd22694662b502bc1e781f8dc Mon Sep 17 00:00:00 2001 From: John Nunley Date: Tue, 13 Feb 2024 07:18:54 -0800 Subject: [PATCH 074/110] feat: Add a way to batch spawn tasks For some workloads many tasks are spawned at a time. This requires locking and unlocking the executor's inner lock every time you spawn a task. If you spawn many tasks this can be expensive. This commit exposes a new "spawn_batch" method on both types. This method allows the user to spawn an entire set of tasks at a time. Closes #91 Signed-off-by: John Nunley --- benches/executor.rs | 15 ++++ src/lib.rs | 203 +++++++++++++++++++++++++++++++++++++------- tests/drop.rs | 14 +++ tests/spawn_many.rs | 45 ++++++++++ 4 files changed, 244 insertions(+), 33 deletions(-) create mode 100644 tests/spawn_many.rs diff --git a/benches/executor.rs b/benches/executor.rs index b6e33c2..791610f 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -51,6 +51,21 @@ fn running_benches(c: &mut Criterion) { ); }); + group.bench_function("executor::spawn_batch", |b| { + run( + || { + let mut handles = vec![]; + + b.iter(|| { + EX.spawn_many((0..250).map(|_| future::yield_now()), &mut handles); + }); + + handles.clear(); + }, + *multithread, + ) + }); + group.bench_function("executor::spawn_many_local", |b| { run( || { diff --git a/src/lib.rs b/src/lib.rs index 31fd40d..a7955bb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -149,6 +149,85 @@ impl<'a> Executor<'a> { pub fn spawn(&self, future: impl Future + Send + 'a) -> Task { let mut active = self.state().active.lock().unwrap(); + // SAFETY: `T` and the future are `Send`. + unsafe { self.spawn_inner(future, &mut active) } + } + + /// Spawns many tasks onto the executor. + /// + /// As opposed to the [`spawn`] method, this locks the executor's inner task lock once and + /// spawns all of the tasks in one go. With large amounts of tasks this can improve + /// contention. + /// + /// For very large numbers of tasks the lock is occasionally dropped and re-acquired to + /// prevent runner thread starvation. It is assumed that the iterator provided does not + /// block; blocking iterators can lock up the internal mutex and therefore the entire + /// executor. + /// + /// ## Example + /// + /// ``` + /// use async_executor::Executor; + /// use futures_lite::{stream, prelude::*}; + /// use std::future::ready; + /// + /// # futures_lite::future::block_on(async { + /// let mut ex = Executor::new(); + /// + /// let futures = [ + /// ready(1), + /// ready(2), + /// ready(3) + /// ]; + /// + /// // Spawn all of the futures onto the executor at once. + /// let mut tasks = vec![]; + /// ex.spawn_many(futures, &mut tasks); + /// + /// // Await all of them. + /// let results = ex.run(async move { + /// stream::iter(tasks).then(|x| x).collect::>().await + /// }).await; + /// assert_eq!(results, [1, 2, 3]); + /// # }); + /// ``` + /// + /// [`spawn`]: Executor::spawn + pub fn spawn_many + Send + 'a>( + &self, + futures: impl IntoIterator, + handles: &mut impl Extend>, + ) { + let mut active = Some(self.state().active.lock().unwrap()); + + // Convert the futures into tasks. + let tasks = futures.into_iter().enumerate().map(move |(i, future)| { + // SAFETY: `T` and the future are `Send`. + let task = unsafe { self.spawn_inner(future, active.as_mut().unwrap()) }; + + // Yield the lock every once in a while to ease contention. + if i.wrapping_sub(1) % 500 == 0 { + drop(active.take()); + active = Some(self.state().active.lock().unwrap()); + } + + task + }); + + // Push the tasks to the user's collection. + handles.extend(tasks); + } + + /// Spawn a future while holding the inner lock. + /// + /// # Safety + /// + /// If this is an `Executor`, `F` and `T` must be `Send`. + unsafe fn spawn_inner( + &self, + future: impl Future + 'a, + active: &mut Slab, + ) -> Task { // Remove the task from the set of active tasks when the future finishes. let entry = active.vacant_entry(); let index = entry.key(); @@ -159,11 +238,30 @@ impl<'a> Executor<'a> { }; // Create the task and register it in the set of active tasks. - let (runnable, task) = unsafe { - Builder::new() - .propagate_panic(true) - .spawn_unchecked(|()| future, self.schedule()) - }; + // + // SAFETY: + // + // If `future` is not `Send`, this must be a `LocalExecutor` as per this + // function's unsafe precondition. Since `LocalExecutor` is `!Sync`, + // `try_tick`, `tick` and `run` can only be called from the origin + // thread of the `LocalExecutor`. Similarly, `spawn` can only be called + // from the origin thread, ensuring that `future` and the executor share + // the same origin thread. The `Runnable` can be scheduled from other + // threads, but because of the above `Runnable` can only be called or + // dropped on the origin thread. + // + // `future` is not `'static`, but we make sure that the `Runnable` does + // not outlive `'a`. When the executor is dropped, the `active` field is + // drained and all of the `Waker`s are woken. Then, the queue inside of + // the `Executor` is drained of all of its runnables. This ensures that + // runnables are dropped and this precondition is satisfied. + // + // `self.schedule()` is `Send`, `Sync` and `'static`, as checked below. + // Therefore we do not need to worry about what is done with the + // `Waker`. + let (runnable, task) = Builder::new() + .propagate_panic(true) + .spawn_unchecked(|()| future, self.schedule()); entry.insert(runnable.waker()); runnable.schedule(); @@ -292,7 +390,7 @@ impl<'a> Executor<'a> { impl Drop for Executor<'_> { fn drop(&mut self) { if let Some(state) = self.state.get() { - let mut active = state.active.lock().unwrap(); + let mut active = state.active.lock().unwrap_or_else(|e| e.into_inner()); for w in active.drain() { w.wake(); } @@ -397,25 +495,70 @@ impl<'a> LocalExecutor<'a> { pub fn spawn(&self, future: impl Future + 'a) -> Task { let mut active = self.inner().state().active.lock().unwrap(); - // Remove the task from the set of active tasks when the future finishes. - let entry = active.vacant_entry(); - let index = entry.key(); - let state = self.inner().state().clone(); - let future = async move { - let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index))); - future.await - }; + // SAFETY: This executor is not thread safe, so the future and its result + // cannot be sent to another thread. + unsafe { self.inner().spawn_inner(future, &mut active) } + } - // Create the task and register it in the set of active tasks. - let (runnable, task) = unsafe { - Builder::new() - .propagate_panic(true) - .spawn_unchecked(|()| future, self.schedule()) - }; - entry.insert(runnable.waker()); + /// Spawns many tasks onto the executor. + /// + /// As opposed to the [`spawn`] method, this locks the executor's inner task lock once and + /// spawns all of the tasks in one go. With large amounts of tasks this can improve + /// contention. + /// + /// It is assumed that the iterator provided does not block; blocking iterators can lock up + /// the internal mutex and therefore the entire executor. Unlike [`Executor::spawn`], the + /// mutex is not released, as there are no other threads that can poll this executor. + /// + /// ## Example + /// + /// ``` + /// use async_executor::LocalExecutor; + /// use futures_lite::{stream, prelude::*}; + /// use std::future::ready; + /// + /// # futures_lite::future::block_on(async { + /// let mut ex = LocalExecutor::new(); + /// + /// let futures = [ + /// ready(1), + /// ready(2), + /// ready(3) + /// ]; + /// + /// // Spawn all of the futures onto the executor at once. + /// let mut tasks = vec![]; + /// ex.spawn_many(futures, &mut tasks); + /// + /// // Await all of them. + /// let results = ex.run(async move { + /// stream::iter(tasks).then(|x| x).collect::>().await + /// }).await; + /// assert_eq!(results, [1, 2, 3]); + /// # }); + /// ``` + /// + /// [`spawn`]: LocalExecutor::spawn + /// [`Executor::spawn_many`]: Executor::spawn_many + pub fn spawn_many + Send + 'a>( + &self, + futures: impl IntoIterator, + handles: &mut impl Extend>, + ) { + let mut active = self.inner().state().active.lock().unwrap(); - runnable.schedule(); - task + // Convert all of the futures to tasks. + let tasks = futures.into_iter().map(|future| { + // SAFETY: This executor is not thread safe, so the future and its result + // cannot be sent to another thread. + unsafe { self.inner().spawn_inner(future, &mut active) } + + // As only one thread can spawn or poll tasks at a time, there is no need + // to release lock contention here. + }); + + // Push them to the user's collection. + handles.extend(tasks); } /// Attempts to run a task if at least one is scheduled. @@ -481,16 +624,6 @@ impl<'a> LocalExecutor<'a> { self.inner().run(future).await } - /// Returns a function that schedules a runnable task when it gets woken up. - fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static { - let state = self.inner().state().clone(); - - move |runnable| { - state.queue.push(runnable).unwrap(); - state.notify(); - } - } - /// Returns a reference to the inner executor. fn inner(&self) -> &Executor<'a> { &self.inner @@ -953,6 +1086,7 @@ fn _ensure_send_and_sync() { fn is_send(_: T) {} fn is_sync(_: T) {} + fn is_static(_: T) {} is_send::>(Executor::new()); is_sync::>(Executor::new()); @@ -962,6 +1096,9 @@ fn _ensure_send_and_sync() { is_sync(ex.run(pending::<()>())); is_send(ex.tick()); is_sync(ex.tick()); + is_send(ex.schedule()); + is_sync(ex.schedule()); + is_static(ex.schedule()); /// ```compile_fail /// use async_executor::LocalExecutor; diff --git a/tests/drop.rs b/tests/drop.rs index 2b1ce56..54a0741 100644 --- a/tests/drop.rs +++ b/tests/drop.rs @@ -121,6 +121,20 @@ fn drop_finished_task_and_then_drop_executor() { assert_eq!(DROP.load(Ordering::SeqCst), 1); } +#[test] +fn iterator_panics_mid_run() { + let ex = Executor::new(); + + let panic = std::panic::catch_unwind(|| { + let mut handles = vec![]; + ex.spawn_many( + (0..50).map(|i| if i == 25 { panic!() } else { future::ready(i) }), + &mut handles, + ) + }); + assert!(panic.is_err()); +} + struct CallOnDrop(F); impl Drop for CallOnDrop { diff --git a/tests/spawn_many.rs b/tests/spawn_many.rs new file mode 100644 index 0000000..cebe2d3 --- /dev/null +++ b/tests/spawn_many.rs @@ -0,0 +1,45 @@ +use async_executor::{Executor, LocalExecutor}; +use futures_lite::future; + +#[cfg(not(miri))] +const READY_COUNT: usize = 50_000; +#[cfg(miri)] +const READY_COUNT: usize = 505; + +#[test] +fn spawn_many() { + future::block_on(async { + let ex = Executor::new(); + + // Spawn a lot of tasks. + let mut tasks = vec![]; + ex.spawn_many((0..READY_COUNT).map(future::ready), &mut tasks); + + // Run all of the tasks in parallel. + ex.run(async move { + for (i, task) in tasks.into_iter().enumerate() { + assert_eq!(task.await, i); + } + }) + .await; + }); +} + +#[test] +fn spawn_many_local() { + future::block_on(async { + let ex = LocalExecutor::new(); + + // Spawn a lot of tasks. + let mut tasks = vec![]; + ex.spawn_many((0..READY_COUNT).map(future::ready), &mut tasks); + + // Run all of the tasks in parallel. + ex.run(async move { + for (i, task) in tasks.into_iter().enumerate() { + assert_eq!(task.await, i); + } + }) + .await; + }); +} From 00f0b99fadc4d1f4f9f3770d1662fd58c3b57538 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 5 Apr 2024 08:23:43 -0700 Subject: [PATCH 075/110] chore: Silence clippy Signed-off-by: John Nunley --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index a7955bb..7d91a59 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -719,7 +719,7 @@ impl Sleepers { for item in &mut self.wakers { if item.0 == id { if !item.1.will_wake(waker) { - item.1 = waker.clone(); + item.1.clone_from(waker); } return false; } From 4b37c612f6f3df4da548854fdc166684651af6d6 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 7 Apr 2024 07:40:07 -0700 Subject: [PATCH 076/110] v1.10.0 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05ad9e0..a460eba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.10.0 + +- Add a function `spawn_batch` that allows users to spawn multiple tasks while only locking the executor once. (#92) + # Version 1.9.1 - Remove the thread-local optimization due to the bugs that it introduces. (#106) diff --git a/Cargo.toml b/Cargo.toml index 0b8011d..5d3f7bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,8 +3,8 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.9.1" -authors = ["Stjepan Glavina "] +version = "1.10.0" +authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.60" description = "Async executor" From 649bdfda2355ffbd18b1796c18e135310780181c Mon Sep 17 00:00:00 2001 From: James Liu Date: Mon, 8 Apr 2024 19:41:14 -0700 Subject: [PATCH 077/110] Support racy initialization of an Executor's state Fixes #89. Uses @notgull's suggestion of using a `AtomicPtr` with a racy initialization instead of a `OnceCell`. For the addition of more `unsafe`, I added the `clippy::undocumented_unsafe_blocks` lint at a warn, and fixed a few of the remaining open clippy issues (i.e. `Waker::clone_from` already handling the case where they're equal). Removing `async_lock` as a dependency shouldn't be a SemVer breaking change. --- Cargo.toml | 1 - src/lib.rs | 130 ++++++++++++++++++++++++++++++++++++----------------- 2 files changed, 88 insertions(+), 43 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5d3f7bb..6920042 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,6 @@ categories = ["asynchronous", "concurrency"] exclude = ["/.*"] [dependencies] -async-lock = "3.0.0" async-task = "4.4.0" concurrent-queue = "2.0.0" fastrand = "2.0.0" diff --git a/src/lib.rs b/src/lib.rs index 7d91a59..0c51fd5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,7 +25,12 @@ //! future::block_on(ex.run(task)); //! ``` -#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![warn( + missing_docs, + missing_debug_implementations, + rust_2018_idioms, + clippy::undocumented_unsafe_blocks +)] #![doc( html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] @@ -37,11 +42,10 @@ use std::fmt; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; use std::sync::{Arc, Mutex, RwLock, TryLockError}; use std::task::{Poll, Waker}; -use async_lock::OnceCell; use async_task::{Builder, Runnable}; use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; @@ -76,13 +80,15 @@ pub use async_task::Task; /// ``` pub struct Executor<'a> { /// The executor state. - state: OnceCell>, + state: AtomicPtr, /// Makes the `'a` lifetime invariant. _marker: PhantomData>, } +// SAFETY: Executor stores no thread local state that can be accessed via other thread. unsafe impl Send for Executor<'_> {} +// SAFETY: Executor internally synchronizes all of it's operations internally. unsafe impl Sync for Executor<'_> {} impl UnwindSafe for Executor<'_> {} @@ -106,7 +112,7 @@ impl<'a> Executor<'a> { /// ``` pub const fn new() -> Executor<'a> { Executor { - state: OnceCell::new(), + state: AtomicPtr::new(std::ptr::null_mut()), _marker: PhantomData, } } @@ -231,7 +237,7 @@ impl<'a> Executor<'a> { // Remove the task from the set of active tasks when the future finishes. let entry = active.vacant_entry(); let index = entry.key(); - let state = self.state().clone(); + let state = self.state_as_arc(); let future = async move { let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index))); future.await @@ -361,7 +367,7 @@ impl<'a> Executor<'a> { /// Returns a function that schedules a runnable task when it gets woken up. fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static { - let state = self.state().clone(); + let state = self.state_as_arc(); // TODO: If possible, push into the current local queue and notify the ticker. move |runnable| { @@ -370,34 +376,73 @@ impl<'a> Executor<'a> { } } - /// Returns a reference to the inner state. - fn state(&self) -> &Arc { - #[cfg(not(target_family = "wasm"))] - { - return self.state.get_or_init_blocking(|| Arc::new(State::new())); + /// Returns a pointer to the inner state. + #[inline] + fn state_ptr(&self) -> *const State { + #[cold] + fn alloc_state(atomic_ptr: &AtomicPtr) -> *mut State { + let state = Arc::new(State::new()); + // TODO: Switch this to use cast_mut once the MSRV can be bumped past 1.65 + let ptr = Arc::into_raw(state) as *mut State; + if let Err(actual) = atomic_ptr.compare_exchange( + std::ptr::null_mut(), + ptr, + Ordering::AcqRel, + Ordering::Acquire, + ) { + // SAFETY: This was just created from Arc::into_raw. + drop(unsafe { Arc::from_raw(ptr) }); + actual + } else { + ptr + } } - // Some projects use this on WASM for some reason. In this case get_or_init_blocking - // doesn't work. Just poll the future once and panic if there is contention. - #[cfg(target_family = "wasm")] - future::block_on(future::poll_once( - self.state.get_or_init(|| async { Arc::new(State::new()) }), - )) - .expect("encountered contention on WASM") + let mut ptr = self.state.load(Ordering::Acquire); + if ptr.is_null() { + ptr = alloc_state(&self.state); + } + ptr + } + + /// Returns a reference to the inner state. + #[inline] + fn state(&self) -> &State { + // SAFETY: So long as an Executor lives, it's state pointer will always be valid + // when accessed through state_ptr. + unsafe { &*self.state_ptr() } + } + + // Clones the inner state Arc + #[inline] + fn state_as_arc(&self) -> Arc { + // SAFETY: So long as an Executor lives, it's state pointer will always be a valid + // Arc when accessed through state_ptr. + let arc = unsafe { Arc::from_raw(self.state_ptr()) }; + let clone = arc.clone(); + std::mem::forget(arc); + clone } } impl Drop for Executor<'_> { fn drop(&mut self) { - if let Some(state) = self.state.get() { - let mut active = state.active.lock().unwrap_or_else(|e| e.into_inner()); - for w in active.drain() { - w.wake(); - } - drop(active); + let ptr = *self.state.get_mut(); + if ptr.is_null() { + return; + } + + // SAFETY: As ptr is not null, it was allocated via Arc::new and converted + // via Arc::into_raw in state_ptr. + let state = unsafe { Arc::from_raw(ptr) }; - while state.queue.pop().is_ok() {} + let mut active = state.active.lock().unwrap_or_else(|e| e.into_inner()); + for w in active.drain() { + w.wake(); } + drop(active); + + while state.queue.pop().is_ok() {} } } @@ -718,9 +763,7 @@ impl Sleepers { fn update(&mut self, id: usize, waker: &Waker) -> bool { for item in &mut self.wakers { if item.0 == id { - if !item.1.will_wake(waker) { - item.1.clone_from(waker); - } + item.1.clone_from(waker); return false; } } @@ -1006,21 +1049,24 @@ fn steal(src: &ConcurrentQueue, dest: &ConcurrentQueue) { /// Debug implementation for `Executor` and `LocalExecutor`. fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Get a reference to the state. - let state = match executor.state.get() { - Some(state) => state, - None => { - // The executor has not been initialized. - struct Uninitialized; - - impl fmt::Debug for Uninitialized { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("") - } + let ptr = executor.state.load(Ordering::Acquire); + if ptr.is_null() { + // The executor has not been initialized. + struct Uninitialized; + + impl fmt::Debug for Uninitialized { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("") } - - return f.debug_tuple(name).field(&Uninitialized).finish(); } - }; + + return f.debug_tuple(name).field(&Uninitialized).finish(); + } + + // SAFETY: If the state pointer is not null, it must have been + // allocated properly by Arc::new and converted via Arc::into_raw + // in state_ptr. + let state = unsafe { &*ptr }; /// Debug wrapper for the number of active tasks. struct ActiveTasks<'a>(&'a Mutex>); From df57d9bc98bff13fc1d4d62e652817590d107649 Mon Sep 17 00:00:00 2001 From: Jacob Rothstein Date: Thu, 11 Apr 2024 16:33:17 -0700 Subject: [PATCH 078/110] feat: reexport async_task::FallibleTask Motivation: FallibleTask is part of the public interface of this crate, in that Task::fallible returns FallibleTask. However, in order to name that type, users need to add a direct dependency on async_task and ensure the crates versions are compatible. Reexporting allows crate users to name the type directly. --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 0c51fd5..a663be8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -52,7 +52,7 @@ use futures_lite::{future, prelude::*}; use slab::Slab; #[doc(no_inline)] -pub use async_task::Task; +pub use async_task::{FallibleTask, Task}; /// An async executor. /// From ef512cb3846815f1a8e40a6cd23e9140289f2696 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 13 Apr 2024 22:52:52 -0700 Subject: [PATCH 079/110] v1.11.0 Signed-off-by: John Nunley --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a460eba..ca8ebc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# Version 1.11.0 + +- Re-export the `async_task::FallibleTask` primitive. (#113) +- Support racy initialization of the executor state. This should allow the executor to be + initialized on web targets without any issues. (#108) + # Version 1.10.0 - Add a function `spawn_batch` that allows users to spawn multiple tasks while only locking the executor once. (#92) diff --git a/Cargo.toml b/Cargo.toml index 6920042..15d0cf5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.10.0" +version = "1.11.0" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.60" From f1c7ae3340bd04e1504f2d6aca8180f12d14eda9 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Thu, 25 Apr 2024 22:52:40 -0700 Subject: [PATCH 080/110] bench: Add some more filled-out benchmarks This commit aims to add benchmarks that more realistically reflect workloads that might happen in the real world. These benchmarks are as follows: - "channels", which sets up TASKS tasks, where each task uses a channel to wake up the next one. - "server", which tries to simulate a web server-type scenario. Signed-off-by: John Nunley --- benches/executor.rs | 117 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/benches/executor.rs b/benches/executor.rs index 791610f..f624513 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,3 +1,4 @@ +use std::mem; use std::thread::available_parallelism; use async_executor::Executor; @@ -139,6 +140,122 @@ fn running_benches(c: &mut Criterion) { *multithread, ); }); + + group.bench_function("executor::channels", |b| { + run( + || { + b.iter(move || { + future::block_on(async { + // Create channels. + let mut tasks = Vec::new(); + let (first_send, first_recv) = async_channel::bounded(1); + let mut current_recv = first_recv; + + for _ in 0..TASKS { + let (next_send, next_recv) = async_channel::bounded(1); + let current_recv = mem::replace(&mut current_recv, next_recv); + + tasks.push(EX.spawn(async move { + // Send a notification on to the next task. + for _ in 0..STEPS { + current_recv.recv().await.unwrap(); + next_send.send(()).await.unwrap(); + } + })); + } + + for _ in 0..STEPS { + first_send.send(()).await.unwrap(); + current_recv.recv().await.unwrap(); + } + + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ) + }); + + group.bench_function("executor::web_server", |b| { + run( + || { + b.iter(move || { + future::block_on(async { + let (db_send, db_recv) = + async_channel::bounded::>(TASKS / 5); + let mut db_rng = fastrand::Rng::with_seed(0x12345678); + let mut web_rng = db_rng.fork(); + + // This task simulates a database. + let db_task = EX.spawn(async move { + loop { + // Wait for a new task. + let incoming = match db_recv.recv().await { + Ok(incoming) => incoming, + Err(_) => break, + }; + + // Process the task. Maybe it takes a while. + for _ in 0..db_rng.usize(..10) { + future::yield_now().await; + } + + // Send the data back. + incoming.send(db_rng.usize(..)).await.ok(); + } + }); + + // This task simulates a web server waiting for new tasks. + let server_task = EX.spawn(async move { + for i in 0..TASKS { + // Get a new connection. + if web_rng.usize(..=16) == 16 { + future::yield_now().await; + } + + let mut web_rng = web_rng.fork(); + let db_send = db_send.clone(); + let task = EX.spawn(async move { + // Check if the data is cached... + if web_rng.bool() { + // ...it's in cache! + future::yield_now().await; + return; + } + + // Otherwise we have to make a DB call or two. + for _ in 0..web_rng.usize(STEPS / 2..STEPS) { + let (resp_send, resp_recv) = async_channel::bounded(1); + db_send.send(resp_send).await.unwrap(); + criterion::black_box(resp_recv.recv().await.unwrap()); + } + + // Send the data back... + for _ in 0..web_rng.usize(3..16) { + future::yield_now().await; + } + }); + + task.detach(); + + if i & 16 == 0 { + future::yield_now().await; + } + } + }); + + // Spawn and wait for it to stop. + server_task.await; + db_task.await; + }); + }) + }, + *multithread, + ) + }); } } From 924b4530a72209b417fae5fb53e32d63a9824fcb Mon Sep 17 00:00:00 2001 From: James Liu Date: Sun, 12 May 2024 16:22:32 -0700 Subject: [PATCH 081/110] feat: Implement static executors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resolves #111. Creates a `StaticExecutor` type under a feature flag and allows constructing it from an `Executor` via `Executor::leak`. Unlike the executor it came from, it's a wrapper around a `State` and omits all changes to `active`. Note, unlike the API proposed in #111, this PR also includes a unsafe `StaticExecutor::spawn_scoped` for spawning non-'static tasks, where the caller is responsible for ensuring that the task doesn't outlive the borrowed state. This would be required for Bevy to migrate to this type, where we're currently using lifetime transmutation on `Executor` to enable `Thread::scope`-like APIs for working with borrowed state. `StaticExecutor` does not have an external lifetime parameter so this approach is infeasible without such an API. The performance gains while using the type are substantial: ``` single_thread/executor::spawn_one time: [1.6157 µs 1.6238 µs 1.6362 µs] Found 6 outliers among 100 measurements (6.00%) 3 (3.00%) high mild 3 (3.00%) high severe single_thread/executor::spawn_batch time: [28.169 µs 29.650 µs 32.196 µs] Found 19 outliers among 100 measurements (19.00%) 10 (10.00%) low severe 3 (3.00%) low mild 3 (3.00%) high mild 3 (3.00%) high severe single_thread/executor::spawn_many_local time: [6.1952 ms 6.2230 ms 6.2578 ms] Found 4 outliers among 100 measurements (4.00%) 1 (1.00%) high mild 3 (3.00%) high severe single_thread/executor::spawn_recursively time: [50.202 ms 50.479 ms 50.774 ms] Found 6 outliers among 100 measurements (6.00%) 5 (5.00%) high mild 1 (1.00%) high severe single_thread/executor::yield_now time: [5.8795 ms 5.8883 ms 5.8977 ms] Found 3 outliers among 100 measurements (3.00%) 3 (3.00%) high mild multi_thread/executor::spawn_one time: [1.2565 µs 1.2979 µs 1.3470 µs] Found 8 outliers among 100 measurements (8.00%) 7 (7.00%) high mild 1 (1.00%) high severe multi_thread/executor::spawn_batch time: [38.009 µs 43.693 µs 52.882 µs] Found 22 outliers among 100 measurements (22.00%) 21 (21.00%) high mild 1 (1.00%) high severe Benchmarking multi_thread/executor::spawn_many_local: Warming up for 3.0000 s Warning: Unable to complete 100 samples in 5.0s. You may wish to increase target time to 386.6s, or reduce sample count to 10. multi_thread/executor::spawn_many_local time: [27.492 ms 27.652 ms 27.814 ms] Found 4 outliers among 100 measurements (4.00%) 1 (1.00%) low mild 3 (3.00%) high mild Benchmarking multi_thread/executor::spawn_recursively: Warming up for 3.0000 s Warning: Unable to complete 100 samples in 5.0s. You may wish to increase target time to 16.6s, or reduce sample count to 30. multi_thread/executor::spawn_recursively time: [165.82 ms 166.04 ms 166.26 ms] Found 1 outliers among 100 measurements (1.00%) 1 (1.00%) high mild multi_thread/executor::yield_now time: [22.469 ms 22.649 ms 22.798 ms] Found 8 outliers among 100 measurements (8.00%) 5 (5.00%) low severe 3 (3.00%) low mild single_thread/leaked_executor::spawn_one time: [1.4717 µs 1.4778 µs 1.4832 µs] Found 9 outliers among 100 measurements (9.00%) 3 (3.00%) low severe 2 (2.00%) low mild 3 (3.00%) high mild 1 (1.00%) high severe single_thread/leaked_executor::spawn_many_local time: [4.2622 ms 4.3065 ms 4.3489 ms] Found 2 outliers among 100 measurements (2.00%) 2 (2.00%) low mild single_thread/leaked_executor::spawn_recursively time: [26.566 ms 26.899 ms 27.228 ms] single_thread/leaked_executor::yield_now time: [5.7200 ms 5.7270 ms 5.7342 ms] Found 1 outliers among 100 measurements (1.00%) 1 (1.00%) high mild multi_thread/leaked_executor::spawn_one time: [1.3755 µs 1.4321 µs 1.4892 µs] Found 1 outliers among 100 measurements (1.00%) 1 (1.00%) high mild multi_thread/leaked_executor::spawn_many_local time: [4.1838 ms 4.2394 ms 4.2989 ms] Found 7 outliers among 100 measurements (7.00%) 7 (7.00%) high mild multi_thread/leaked_executor::spawn_recursively time: [43.074 ms 43.159 ms 43.241 ms] Found 1 outliers among 100 measurements (1.00%) 1 (1.00%) low mild multi_thread/leaked_executor::yield_now time: [23.210 ms 23.257 ms 23.302 ms] Found 1 outliers among 100 measurements (1.00%) 1 (1.00%) low mild ``` --- .github/workflows/ci.yml | 5 + Cargo.toml | 9 +- benches/executor.rs | 619 +++++++++++++++++++++++++++------------ src/lib.rs | 86 ++++-- src/static_executors.rs | 479 ++++++++++++++++++++++++++++++ 5 files changed, 972 insertions(+), 226 deletions(-) create mode 100644 src/static_executors.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index be05945..db2bd1a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,6 +45,7 @@ jobs: if: startsWith(matrix.rust, 'nightly') run: cargo check -Z features=dev_dep - run: cargo test + - run: cargo test --all-features - run: cargo check --all --all-features --target wasm32-unknown-unknown - run: cargo hack build --all --all-features --target wasm32-unknown-unknown --no-dev-deps @@ -82,6 +83,10 @@ jobs: env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout + - run: cargo miri test --all-features + env: + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation -Zmiri-ignore-leaks + RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout security_audit: permissions: diff --git a/Cargo.toml b/Cargo.toml index 15d0cf5..16d33bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.11.0" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" -rust-version = "1.60" +rust-version = "1.63" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" @@ -14,9 +14,13 @@ keywords = ["asynchronous", "executor", "single", "multi", "spawn"] categories = ["asynchronous", "concurrency"] exclude = ["/.*"] +[features] +# Adds support for executors optimized for use in static variables. +static = [] + [dependencies] async-task = "4.4.0" -concurrent-queue = "2.0.0" +concurrent-queue = "2.5.0" fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } slab = "0.4.4" @@ -37,3 +41,4 @@ once_cell = "1.16.0" [[bench]] name = "executor" harness = false +required-features = ["static"] diff --git a/benches/executor.rs b/benches/executor.rs index f624513..74b2955 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,7 +1,7 @@ use std::mem; use std::thread::available_parallelism; -use async_executor::Executor; +use async_executor::{Executor, StaticExecutor}; use criterion::{criterion_group, criterion_main, Criterion}; use futures_lite::{future, prelude::*}; @@ -10,6 +10,7 @@ const STEPS: usize = 300; const LIGHT_TASKS: usize = 25_000; static EX: Executor<'_> = Executor::new(); +static STATIC_EX: StaticExecutor = StaticExecutor::new(); fn run(f: impl FnOnce(), multithread: bool) { let limit = if multithread { @@ -27,6 +28,22 @@ fn run(f: impl FnOnce(), multithread: bool) { }); } +fn run_static(f: impl FnOnce(), multithread: bool) { + let limit = if multithread { + available_parallelism().unwrap().get() + } else { + 1 + }; + + let (s, r) = async_channel::bounded::<()>(1); + easy_parallel::Parallel::new() + .each(0..limit, |_| future::block_on(STATIC_EX.run(r.recv()))) + .finish(move || { + let _s = s; + f() + }); +} + fn create(c: &mut Criterion) { c.bench_function("executor::create", |b| { b.iter(|| { @@ -38,224 +55,442 @@ fn create(c: &mut Criterion) { } fn running_benches(c: &mut Criterion) { - for (group_name, multithread) in [("single_thread", false), ("multi_thread", true)].iter() { - let mut group = c.benchmark_group(group_name.to_string()); - - group.bench_function("executor::spawn_one", |b| { - run( - || { - b.iter(|| { - future::block_on(async { EX.spawn(async {}).await }); - }); - }, - *multithread, - ); - }); - - group.bench_function("executor::spawn_batch", |b| { - run( - || { - let mut handles = vec![]; + for (prefix, with_static) in [("executor", false), ("static_executor", true)] { + for (group_name, multithread) in [("single_thread", false), ("multi_thread", true)].iter() { + let mut group = c.benchmark_group(group_name.to_string()); + + group.bench_function(format!("{}::spawn_one", prefix), |b| { + if with_static { + run_static( + || { + b.iter(|| { + future::block_on(async { STATIC_EX.spawn(async {}).await }); + }); + }, + *multithread, + ); + } else { + run( + || { + b.iter(|| { + future::block_on(async { EX.spawn(async {}).await }); + }); + }, + *multithread, + ); + } + }); - b.iter(|| { - EX.spawn_many((0..250).map(|_| future::yield_now()), &mut handles); - }); + if !with_static { + group.bench_function("executor::spawn_batch", |b| { + run( + || { + let mut handles = vec![]; - handles.clear(); - }, - *multithread, - ) - }); + b.iter(|| { + EX.spawn_many((0..250).map(|_| future::yield_now()), &mut handles); + }); - group.bench_function("executor::spawn_many_local", |b| { - run( - || { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..LIGHT_TASKS { - tasks.push(EX.spawn(async {})); - } - for task in tasks { - task.await; - } - }); - }); - }, - *multithread, - ); - }); + handles.clear(); + }, + *multithread, + ) + }); + } - group.bench_function("executor::spawn_recursively", |b| { - #[allow(clippy::manual_async_fn)] - fn go(i: usize) -> impl Future + Send + 'static { - async move { - if i != 0 { - EX.spawn(async move { - let fut = go(i - 1).boxed(); - fut.await; - }) - .await; + group.bench_function(format!("{}::spawn_many_local", prefix), |b| { + if with_static { + run_static( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..LIGHT_TASKS { + tasks.push(STATIC_EX.spawn(async {})); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); + } else { + run( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..LIGHT_TASKS { + tasks.push(EX.spawn(async {})); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); + } + }); + + group.bench_function(format!("{}::spawn_recursively", prefix), |b| { + #[allow(clippy::manual_async_fn)] + fn go(i: usize) -> impl Future + Send + 'static { + async move { + if i != 0 { + EX.spawn(async move { + let fut = go(i - 1).boxed(); + fut.await; + }) + .await; + } } } - } - run( - || { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..TASKS { - tasks.push(EX.spawn(go(STEPS))); - } - for task in tasks { - task.await; - } - }); - }); - }, - *multithread, - ); - }); + #[allow(clippy::manual_async_fn)] + fn go_static(i: usize) -> impl Future + Send + 'static { + async move { + if i != 0 { + STATIC_EX + .spawn(async move { + let fut = go_static(i - 1).boxed(); + fut.await; + }) + .await; + } + } + } - group.bench_function("executor::yield_now", |b| { - run( - || { - b.iter(move || { - future::block_on(async { - let mut tasks = Vec::new(); - for _ in 0..TASKS { - tasks.push(EX.spawn(async move { - for _ in 0..STEPS { - future::yield_now().await; + if with_static { + run_static( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(STATIC_EX.spawn(go_static(STEPS))); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); + } else { + run( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(EX.spawn(go(STEPS))); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); + } + }); + + group.bench_function(format!("{}::yield_now", prefix), |b| { + if with_static { + run_static( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(STATIC_EX.spawn(async move { + for _ in 0..STEPS { + future::yield_now().await; + } + })); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); + } else { + run( + || { + b.iter(move || { + future::block_on(async { + let mut tasks = Vec::new(); + for _ in 0..TASKS { + tasks.push(EX.spawn(async move { + for _ in 0..STEPS { + future::yield_now().await; + } + })); + } + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ); + } + }); + + group.bench_function(format!("{}::channels", prefix), |b| { + if with_static { + run_static( + || { + b.iter(move || { + future::block_on(async { + // Create channels. + let mut tasks = Vec::new(); + let (first_send, first_recv) = async_channel::bounded(1); + let mut current_recv = first_recv; + + for _ in 0..TASKS { + let (next_send, next_recv) = async_channel::bounded(1); + let current_recv = + mem::replace(&mut current_recv, next_recv); + + tasks.push(STATIC_EX.spawn(async move { + // Send a notification on to the next task. + for _ in 0..STEPS { + current_recv.recv().await.unwrap(); + next_send.send(()).await.unwrap(); + } + })); } - })); - } - for task in tasks { - task.await; - } - }); - }); - }, - *multithread, - ); - }); - group.bench_function("executor::channels", |b| { - run( - || { - b.iter(move || { - future::block_on(async { - // Create channels. - let mut tasks = Vec::new(); - let (first_send, first_recv) = async_channel::bounded(1); - let mut current_recv = first_recv; - - for _ in 0..TASKS { - let (next_send, next_recv) = async_channel::bounded(1); - let current_recv = mem::replace(&mut current_recv, next_recv); - - tasks.push(EX.spawn(async move { - // Send a notification on to the next task. for _ in 0..STEPS { + first_send.send(()).await.unwrap(); current_recv.recv().await.unwrap(); - next_send.send(()).await.unwrap(); } - })); - } - - for _ in 0..STEPS { - first_send.send(()).await.unwrap(); - current_recv.recv().await.unwrap(); - } - - for task in tasks { - task.await; - } - }); - }); - }, - *multithread, - ) - }); - group.bench_function("executor::web_server", |b| { - run( - || { - b.iter(move || { - future::block_on(async { - let (db_send, db_recv) = - async_channel::bounded::>(TASKS / 5); - let mut db_rng = fastrand::Rng::with_seed(0x12345678); - let mut web_rng = db_rng.fork(); - - // This task simulates a database. - let db_task = EX.spawn(async move { - loop { - // Wait for a new task. - let incoming = match db_recv.recv().await { - Ok(incoming) => incoming, - Err(_) => break, - }; - - // Process the task. Maybe it takes a while. - for _ in 0..db_rng.usize(..10) { - future::yield_now().await; + for task in tasks { + task.await; } - - // Send the data back. - incoming.send(db_rng.usize(..)).await.ok(); - } + }); }); + }, + *multithread, + ) + } else { + run( + || { + b.iter(move || { + future::block_on(async { + // Create channels. + let mut tasks = Vec::new(); + let (first_send, first_recv) = async_channel::bounded(1); + let mut current_recv = first_recv; + + for _ in 0..TASKS { + let (next_send, next_recv) = async_channel::bounded(1); + let current_recv = + mem::replace(&mut current_recv, next_recv); + + tasks.push(EX.spawn(async move { + // Send a notification on to the next task. + for _ in 0..STEPS { + current_recv.recv().await.unwrap(); + next_send.send(()).await.unwrap(); + } + })); + } - // This task simulates a web server waiting for new tasks. - let server_task = EX.spawn(async move { - for i in 0..TASKS { - // Get a new connection. - if web_rng.usize(..=16) == 16 { - future::yield_now().await; + for _ in 0..STEPS { + first_send.send(()).await.unwrap(); + current_recv.recv().await.unwrap(); } - let mut web_rng = web_rng.fork(); - let db_send = db_send.clone(); - let task = EX.spawn(async move { - // Check if the data is cached... - if web_rng.bool() { - // ...it's in cache! - future::yield_now().await; - return; + for task in tasks { + task.await; + } + }); + }); + }, + *multithread, + ) + } + }); + + group.bench_function(format!("{}::web_server", prefix), |b| { + if with_static { + run_static( + || { + b.iter(move || { + future::block_on(async { + let (db_send, db_recv) = + async_channel::bounded::>( + TASKS / 5, + ); + let mut db_rng = fastrand::Rng::with_seed(0x12345678); + let mut web_rng = db_rng.fork(); + + // This task simulates a database. + let db_task = STATIC_EX.spawn(async move { + loop { + // Wait for a new task. + let incoming = match db_recv.recv().await { + Ok(incoming) => incoming, + Err(_) => break, + }; + + // Process the task. Maybe it takes a while. + for _ in 0..db_rng.usize(..10) { + future::yield_now().await; + } + + // Send the data back. + incoming.send(db_rng.usize(..)).await.ok(); } + }); - // Otherwise we have to make a DB call or two. - for _ in 0..web_rng.usize(STEPS / 2..STEPS) { - let (resp_send, resp_recv) = async_channel::bounded(1); - db_send.send(resp_send).await.unwrap(); - criterion::black_box(resp_recv.recv().await.unwrap()); + // This task simulates a web server waiting for new tasks. + let server_task = STATIC_EX.spawn(async move { + for i in 0..TASKS { + // Get a new connection. + if web_rng.usize(..=16) == 16 { + future::yield_now().await; + } + + let mut web_rng = web_rng.fork(); + let db_send = db_send.clone(); + let task = STATIC_EX.spawn(async move { + // Check if the data is cached... + if web_rng.bool() { + // ...it's in cache! + future::yield_now().await; + return; + } + + // Otherwise we have to make a DB call or two. + for _ in 0..web_rng.usize(STEPS / 2..STEPS) { + let (resp_send, resp_recv) = + async_channel::bounded(1); + db_send.send(resp_send).await.unwrap(); + criterion::black_box( + resp_recv.recv().await.unwrap(), + ); + } + + // Send the data back... + for _ in 0..web_rng.usize(3..16) { + future::yield_now().await; + } + }); + + task.detach(); + + if i & 16 == 0 { + future::yield_now().await; + } } + }); - // Send the data back... - for _ in 0..web_rng.usize(3..16) { - future::yield_now().await; + // Spawn and wait for it to stop. + server_task.await; + db_task.await; + }); + }) + }, + *multithread, + ) + } else { + run( + || { + b.iter(move || { + future::block_on(async { + let (db_send, db_recv) = + async_channel::bounded::>( + TASKS / 5, + ); + let mut db_rng = fastrand::Rng::with_seed(0x12345678); + let mut web_rng = db_rng.fork(); + + // This task simulates a database. + let db_task = EX.spawn(async move { + loop { + // Wait for a new task. + let incoming = match db_recv.recv().await { + Ok(incoming) => incoming, + Err(_) => break, + }; + + // Process the task. Maybe it takes a while. + for _ in 0..db_rng.usize(..10) { + future::yield_now().await; + } + + // Send the data back. + incoming.send(db_rng.usize(..)).await.ok(); } }); - task.detach(); - - if i & 16 == 0 { - future::yield_now().await; - } - } - }); + // This task simulates a web server waiting for new tasks. + let server_task = EX.spawn(async move { + for i in 0..TASKS { + // Get a new connection. + if web_rng.usize(..=16) == 16 { + future::yield_now().await; + } + + let mut web_rng = web_rng.fork(); + let db_send = db_send.clone(); + let task = EX.spawn(async move { + // Check if the data is cached... + if web_rng.bool() { + // ...it's in cache! + future::yield_now().await; + return; + } + + // Otherwise we have to make a DB call or two. + for _ in 0..web_rng.usize(STEPS / 2..STEPS) { + let (resp_send, resp_recv) = + async_channel::bounded(1); + db_send.send(resp_send).await.unwrap(); + criterion::black_box( + resp_recv.recv().await.unwrap(), + ); + } + + // Send the data back... + for _ in 0..web_rng.usize(3..16) { + future::yield_now().await; + } + }); + + task.detach(); + + if i & 16 == 0 { + future::yield_now().await; + } + } + }); - // Spawn and wait for it to stop. - server_task.await; - db_task.await; - }); - }) - }, - *multithread, - ) - }); + // Spawn and wait for it to stop. + server_task.await; + db_task.await; + }); + }) + }, + *multithread, + ) + } + }); + } } } diff --git a/src/lib.rs b/src/lib.rs index a663be8..7c5d49d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,6 +37,7 @@ #![doc( html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] use std::fmt; use std::marker::PhantomData; @@ -51,8 +52,13 @@ use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; use slab::Slab; +#[cfg(feature = "static")] +mod static_executors; + #[doc(no_inline)] pub use async_task::{FallibleTask, Task}; +#[cfg(feature = "static")] +pub use static_executors::*; /// An async executor. /// @@ -292,18 +298,7 @@ impl<'a> Executor<'a> { /// assert!(ex.try_tick()); // a task was found /// ``` pub fn try_tick(&self) -> bool { - match self.state().queue.pop() { - Err(_) => false, - Ok(runnable) => { - // Notify another ticker now to pick up where this ticker left off, just in case - // running the task takes a long time. - self.state().notify(); - - // Run the task. - runnable.run(); - true - } - } + self.state().try_tick() } /// Runs a single task. @@ -326,9 +321,7 @@ impl<'a> Executor<'a> { /// future::block_on(ex.tick()); // runs the task /// ``` pub async fn tick(&self) { - let state = self.state(); - let runnable = Ticker::new(state).runnable().await; - runnable.run(); + self.state().tick().await; } /// Runs the executor until the given future completes. @@ -347,22 +340,7 @@ impl<'a> Executor<'a> { /// assert_eq!(res, 6); /// ``` pub async fn run(&self, future: impl Future) -> T { - let mut runner = Runner::new(self.state()); - let mut rng = fastrand::Rng::new(); - - // A future that runs tasks forever. - let run_forever = async { - loop { - for _ in 0..200 { - let runnable = runner.runnable(&mut rng).await; - runnable.run(); - } - future::yield_now().await; - } - }; - - // Run `future` and `run_forever` concurrently until `future` completes. - future.or(run_forever).await + self.state().run(future).await } /// Returns a function that schedules a runnable task when it gets woken up. @@ -701,7 +679,7 @@ struct State { impl State { /// Creates state for a new executor. - fn new() -> State { + const fn new() -> State { State { queue: ConcurrentQueue::unbounded(), local_queues: RwLock::new(Vec::new()), @@ -729,6 +707,45 @@ impl State { } } } + + pub(crate) fn try_tick(&self) -> bool { + match self.queue.pop() { + Err(_) => false, + Ok(runnable) => { + // Notify another ticker now to pick up where this ticker left off, just in case + // running the task takes a long time. + self.notify(); + + // Run the task. + runnable.run(); + true + } + } + } + + pub(crate) async fn tick(&self) { + let runnable = Ticker::new(self).runnable().await; + runnable.run(); + } + + pub async fn run(&self, future: impl Future) -> T { + let mut runner = Runner::new(self); + let mut rng = fastrand::Rng::new(); + + // A future that runs tasks forever. + let run_forever = async { + loop { + for _ in 0..200 { + let runnable = runner.runnable(&mut rng).await; + runnable.run(); + } + future::yield_now().await; + } + }; + + // Run `future` and `run_forever` concurrently until `future` completes. + future.or(run_forever).await + } } /// A list of sleeping tickers. @@ -1068,6 +1085,11 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_ // in state_ptr. let state = unsafe { &*ptr }; + debug_state(state, name, f) +} + +/// Debug implementation for `Executor` and `LocalExecutor`. +fn debug_state(state: &State, name: &str, f: &mut fmt::Formatter<'_>) -> fmt::Result { /// Debug wrapper for the number of active tasks. struct ActiveTasks<'a>(&'a Mutex>); diff --git a/src/static_executors.rs b/src/static_executors.rs new file mode 100644 index 0000000..c1724e9 --- /dev/null +++ b/src/static_executors.rs @@ -0,0 +1,479 @@ +use crate::{debug_state, Executor, LocalExecutor, State}; +use async_task::{Builder, Runnable, Task}; +use slab::Slab; +use std::{ + cell::UnsafeCell, + fmt, + future::Future, + marker::PhantomData, + panic::{RefUnwindSafe, UnwindSafe}, +}; + +impl Executor<'static> { + /// Consumes the [`Executor`] and intentionally leaks it. + /// + /// Largely equivalent to calling `Box::leak(Box::new(executor))`, but the produced + /// [`StaticExecutor`]'s functions are optimized to require fewer synchronizing operations + /// when spawning, running, and finishing tasks. + /// + /// `StaticExecutor` cannot be converted back into a `Executor`, so this operation is + /// irreversible without the use of unsafe. + /// + /// # Example + /// + /// ``` + /// use async_executor::Executor; + /// use futures_lite::future; + /// + /// let ex = Executor::new().leak(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// + /// future::block_on(ex.run(task)); + /// ``` + pub fn leak(self) -> &'static StaticExecutor { + let ptr = self.state_ptr(); + // SAFETY: So long as an Executor lives, it's state pointer will always be valid + // when accessed through state_ptr. This executor will live for the full 'static + // lifetime so this isn't an arbitrary lifetime extension. + let state: &'static State = unsafe { &*ptr }; + + std::mem::forget(self); + + let mut active = state.active.lock().unwrap(); + if !active.is_empty() { + // Reschedule all of the active tasks. + for waker in active.drain() { + waker.wake(); + } + // Overwrite to ensure that the slab is deallocated. + *active = Slab::new(); + } + + // SAFETY: StaticExecutor has the same memory layout as State as it's repr(transparent). + // The lifetime is not altered: 'static -> 'static. + let static_executor: &'static StaticExecutor = unsafe { std::mem::transmute(state) }; + static_executor + } +} + +impl LocalExecutor<'static> { + /// Consumes the [`LocalExecutor`] and intentionally leaks it. + /// + /// Largely equivalent to calling `Box::leak(Box::new(executor))`, but the produced + /// [`StaticLocalExecutor`]'s functions are optimized to require fewer synchronizing operations + /// when spawning, running, and finishing tasks. + /// + /// `StaticLocalExecutor` cannot be converted back into a `Executor`, so this operation is + /// irreversible without the use of unsafe. + /// + /// # Example + /// + /// ``` + /// use async_executor::LocalExecutor; + /// use futures_lite::future; + /// + /// let ex = LocalExecutor::new().leak(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// + /// future::block_on(ex.run(task)); + /// ``` + pub fn leak(self) -> &'static StaticLocalExecutor { + let ptr = self.inner.state_ptr(); + // SAFETY: So long as a LocalExecutor lives, it's state pointer will always be valid + // when accessed through state_ptr. This executor will live for the full 'static + // lifetime so this isn't an arbitrary lifetime extension. + let state: &'static State = unsafe { &*ptr }; + + std::mem::forget(self); + + let mut active = state.active.lock().unwrap(); + if !active.is_empty() { + // Reschedule all of the active tasks. + for waker in active.drain() { + waker.wake(); + } + // Overwrite to ensure that the slab is deallocated. + *active = Slab::new(); + } + + // SAFETY: StaticLocalExecutor has the same memory layout as State as it's repr(transparent). + // The lifetime is not altered: 'static -> 'static. + let static_executor: &'static StaticLocalExecutor = unsafe { std::mem::transmute(state) }; + static_executor + } +} + +/// A static-lifetimed async [`Executor`]. +/// +/// This is primarily intended to be used in [`static`] variables, or types intended to be used, or can be created in non-static +/// contexts via [`Executor::leak`]. +/// +/// Spawning, running, and finishing tasks are optimized with the assumption that the executor will never be `Drop`'ed. +/// A static executor may require signficantly less overhead in both single-threaded and mulitthreaded use cases. +/// +/// As this type does not implement `Drop`, losing the handle to the executor or failing +/// to consistently drive the executor with [`tick`] or [`run`] will cause the all spawned +/// tasks to permanently leak. Any tasks at the time will not be cancelled. +/// +/// [`static`]: https://doc.rust-lang.org/std/keyword.static.html +#[repr(transparent)] +pub struct StaticExecutor { + state: State, +} + +// SAFETY: Executor stores no thread local state that can be accessed via other thread. +unsafe impl Send for StaticExecutor {} +// SAFETY: Executor internally synchronizes all of it's operations internally. +unsafe impl Sync for StaticExecutor {} + +impl UnwindSafe for StaticExecutor {} +impl RefUnwindSafe for StaticExecutor {} + +impl fmt::Debug for StaticExecutor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + debug_state(&self.state, "StaticExecutor", f) + } +} + +impl StaticExecutor { + /// Creates a new StaticExecutor. + /// + /// # Examples + /// + /// ``` + /// use async_executor::StaticExecutor; + /// + /// static EXECUTOR: StaticExecutor = StaticExecutor::new(); + /// ``` + pub const fn new() -> Self { + Self { + state: State::new(), + } + } + + /// Spawns a task onto the executor. + /// + /// Note: unlike [`Executor::spawn`], this function requires being called with a `'static` + /// borrow on the executor. + /// + /// # Examples + /// + /// ``` + /// use async_executor::StaticExecutor; + /// + /// static EXECUTOR: StaticExecutor = StaticExecutor::new(); + /// + /// let task = EXECUTOR.spawn(async { + /// println!("Hello world"); + /// }); + /// ``` + pub fn spawn( + &'static self, + future: impl Future + Send + 'static, + ) -> Task { + let (runnable, task) = Builder::new() + .propagate_panic(true) + .spawn(|()| future, self.schedule()); + runnable.schedule(); + task + } + + /// Spawns a non-`'static` task onto the executor. + /// + /// ## Safety + /// + /// The caller must ensure that the returned task terminates + /// or is cancelled before the end of 'a. + pub unsafe fn spawn_scoped<'a, T: Send + 'a>( + &'static self, + future: impl Future + Send + 'a, + ) -> Task { + // SAFETY: + // + // - `future` is `Send` + // - `future` is not `'static`, but the caller guarantees that the + // task, and thus its `Runnable` must not live longer than `'a`. + // - `self.schedule()` is `Send`, `Sync` and `'static`, as checked below. + // Therefore we do not need to worry about what is done with the + // `Waker`. + let (runnable, task) = unsafe { + Builder::new() + .propagate_panic(true) + .spawn_unchecked(|()| future, self.schedule()) + }; + runnable.schedule(); + task + } + + /// Attempts to run a task if at least one is scheduled. + /// + /// Running a scheduled task means simply polling its future once. + /// + /// # Examples + /// + /// ``` + /// use async_executor::StaticExecutor; + /// + /// static EXECUTOR: StaticExecutor = StaticExecutor::new(); + /// + /// assert!(!EXECUTOR.try_tick()); // no tasks to run + /// + /// let task = EXECUTOR.spawn(async { + /// println!("Hello world"); + /// }); + /// + /// assert!(EXECUTOR.try_tick()); // a task was found + /// ``` + pub fn try_tick(&self) -> bool { + self.state.try_tick() + } + + /// Runs a single task. + /// + /// Running a task means simply polling its future once. + /// + /// If no tasks are scheduled when this method is called, it will wait until one is scheduled. + /// + /// # Examples + /// + /// ``` + /// use async_executor::StaticExecutor; + /// use futures_lite::future; + /// + /// static EXECUTOR: StaticExecutor = StaticExecutor::new(); + /// + /// let task = EXECUTOR.spawn(async { + /// println!("Hello world"); + /// }); + /// + /// future::block_on(EXECUTOR.tick()); // runs the task + /// ``` + pub async fn tick(&self) { + self.state.tick().await; + } + + /// Runs the executor until the given future completes. + /// + /// # Examples + /// + /// ``` + /// use async_executor::StaticExecutor; + /// use futures_lite::future; + /// + /// static EXECUTOR: StaticExecutor = StaticExecutor::new(); + /// + /// let task = EXECUTOR.spawn(async { 1 + 2 }); + /// let res = future::block_on(EXECUTOR.run(async { task.await * 2 })); + /// + /// assert_eq!(res, 6); + /// ``` + pub async fn run(&self, future: impl Future) -> T { + self.state.run(future).await + } + + /// Returns a function that schedules a runnable task when it gets woken up. + fn schedule(&'static self) -> impl Fn(Runnable) + Send + Sync + 'static { + let state: &'static State = &self.state; + // TODO: If possible, push into the current local queue and notify the ticker. + move |runnable| { + state.queue.push(runnable).unwrap(); + state.notify(); + } + } +} + +impl Default for StaticExecutor { + fn default() -> Self { + Self::new() + } +} + +/// A static async [`LocalExecutor`] created from [`LocalExecutor::leak`]. +/// +/// This is primarily intended to be used in [`thread_local`] variables, or can be created in non-static +/// contexts via [`LocalExecutor::leak`]. +/// +/// Spawning, running, and finishing tasks are optimized with the assumption that the executor will never be `Drop`'ed. +/// A static executor may require signficantly less overhead in both single-threaded and mulitthreaded use cases. +/// +/// As this type does not implement `Drop`, losing the handle to the executor or failing +/// to consistently drive the executor with [`tick`] or [`run`] will cause the all spawned +/// tasks to permanently leak. Any tasks at the time will not be cancelled. +/// +/// [`thread_local]: https://doc.rust-lang.org/std/macro.thread_local.html +#[repr(transparent)] +pub struct StaticLocalExecutor { + state: State, + marker_: PhantomData>, +} + +impl UnwindSafe for StaticLocalExecutor {} +impl RefUnwindSafe for StaticLocalExecutor {} + +impl fmt::Debug for StaticLocalExecutor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + debug_state(&self.state, "StaticLocalExecutor", f) + } +} + +impl StaticLocalExecutor { + /// Creates a new StaticLocalExecutor. + /// + /// # Examples + /// + /// ``` + /// use async_executor::StaticLocalExecutor; + /// + /// thread_local! { + /// static EXECUTOR: StaticLocalExecutor = StaticLocalExecutor::new(); + /// } + /// ``` + pub const fn new() -> Self { + Self { + state: State::new(), + marker_: PhantomData, + } + } + + /// Spawns a task onto the executor. + /// + /// Note: unlike [`LocalExecutor::spawn`], this function requires being called with a `'static` + /// borrow on the executor. + /// + /// # Examples + /// + /// ``` + /// use async_executor::LocalExecutor; + /// + /// let ex = LocalExecutor::new().leak(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// ``` + pub fn spawn(&'static self, future: impl Future + 'static) -> Task { + let (runnable, task) = Builder::new() + .propagate_panic(true) + .spawn_local(|()| future, self.schedule()); + runnable.schedule(); + task + } + + /// Spawns a non-`'static` task onto the executor. + /// + /// ## Safety + /// + /// The caller must ensure that the returned task terminates + /// or is cancelled before the end of 'a. + pub unsafe fn spawn_scoped<'a, T: 'a>( + &'static self, + future: impl Future + 'a, + ) -> Task { + // SAFETY: + // + // - `future` is not `Send` but `StaticLocalExecutor` is `!Sync`, + // `try_tick`, `tick` and `run` can only be called from the origin + // thread of the `StaticLocalExecutor`. Similarly, `spawn_scoped` can only + // be called from the origin thread, ensuring that `future` and the executor + // share the same origin thread. The `Runnable` can be scheduled from other + // threads, but because of the above `Runnable` can only be called or + // dropped on the origin thread. + // - `future` is not `'static`, but the caller guarantees that the + // task, and thus its `Runnable` must not live longer than `'a`. + // - `self.schedule()` is `Send`, `Sync` and `'static`, as checked below. + // Therefore we do not need to worry about what is done with the + // `Waker`. + let (runnable, task) = unsafe { + Builder::new() + .propagate_panic(true) + .spawn_unchecked(|()| future, self.schedule()) + }; + runnable.schedule(); + task + } + + /// Attempts to run a task if at least one is scheduled. + /// + /// Running a scheduled task means simply polling its future once. + /// + /// # Examples + /// + /// ``` + /// use async_executor::LocalExecutor; + /// + /// let ex = LocalExecutor::new().leak(); + /// assert!(!ex.try_tick()); // no tasks to run + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// assert!(ex.try_tick()); // a task was found + /// ``` + pub fn try_tick(&self) -> bool { + self.state.try_tick() + } + + /// Runs a single task. + /// + /// Running a task means simply polling its future once. + /// + /// If no tasks are scheduled when this method is called, it will wait until one is scheduled. + /// + /// # Examples + /// + /// ``` + /// use async_executor::LocalExecutor; + /// use futures_lite::future; + /// + /// let ex = LocalExecutor::new().leak(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// future::block_on(ex.tick()); // runs the task + /// ``` + pub async fn tick(&self) { + self.state.tick().await; + } + + /// Runs the executor until the given future completes. + /// + /// # Examples + /// + /// ``` + /// use async_executor::LocalExecutor; + /// use futures_lite::future; + /// + /// let ex = LocalExecutor::new().leak(); + /// + /// let task = ex.spawn(async { 1 + 2 }); + /// let res = future::block_on(ex.run(async { task.await * 2 })); + /// + /// assert_eq!(res, 6); + /// ``` + pub async fn run(&self, future: impl Future) -> T { + self.state.run(future).await + } + + /// Returns a function that schedules a runnable task when it gets woken up. + fn schedule(&'static self) -> impl Fn(Runnable) + Send + Sync + 'static { + let state: &'static State = &self.state; + // TODO: If possible, push into the current local queue and notify the ticker. + move |runnable| { + state.queue.push(runnable).unwrap(); + state.notify(); + } + } +} + +impl Default for StaticLocalExecutor { + fn default() -> Self { + Self::new() + } +} From 7577d565f58c368d83af2f51018d1c5c9b95fa9d Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sat, 25 May 2024 12:04:44 -0700 Subject: [PATCH 082/110] v1.12.0 Signed-off-by: John Nunley --- CHANGELOG.md | 7 ++++++- Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca8ebc0..61928a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 1.12.0 + +- Add static executors, which are an optimization over executors that are kept + around forever. (#112) + # Version 1.11.0 - Re-export the `async_task::FallibleTask` primitive. (#113) @@ -58,7 +63,7 @@ # Version 1.5.1 -- Implement a better form of debug output for Executor and LocalExecutor. (#33) +- Implement a better form of debug output for Executor and LocalExecutor. (#33) # Version 1.5.0 diff --git a/Cargo.toml b/Cargo.toml index 16d33bc..d93727c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.11.0" +version = "1.12.0" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.63" From 054c9d1fc92551b420dbd26ca03c5c3951061914 Mon Sep 17 00:00:00 2001 From: Daria Sukhonina Date: Sun, 9 Jun 2024 18:32:28 +0300 Subject: [PATCH 083/110] feat: relax Send bound on LocalExecutor::spawn_many --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 7c5d49d..574f31d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -563,7 +563,7 @@ impl<'a> LocalExecutor<'a> { /// /// [`spawn`]: LocalExecutor::spawn /// [`Executor::spawn_many`]: Executor::spawn_many - pub fn spawn_many + Send + 'a>( + pub fn spawn_many + 'a>( &self, futures: impl IntoIterator, handles: &mut impl Extend>, From 1dd477b2de877faf3f8afdcb266a6dd0ed8a4263 Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Sat, 6 Jul 2024 11:49:46 +0200 Subject: [PATCH 084/110] Fix rustdoc warnings for static executors For example: warning: unresolved link to `tick` --> src/static_executors.rs:306:47 | 306 | /// to consistently drive the executor with [`tick`] or [`run`] will cause the all spawned | ^^^^ no item named `tick` in scope | = help: to escape `[` and `]` characters, add '\' before them like `\[` or `\]` = note: `#[warn(rustdoc::broken_intra_doc_links)]` on by default --- src/static_executors.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/static_executors.rs b/src/static_executors.rs index c1724e9..c43679d 100644 --- a/src/static_executors.rs +++ b/src/static_executors.rs @@ -118,8 +118,9 @@ impl LocalExecutor<'static> { /// A static executor may require signficantly less overhead in both single-threaded and mulitthreaded use cases. /// /// As this type does not implement `Drop`, losing the handle to the executor or failing -/// to consistently drive the executor with [`tick`] or [`run`] will cause the all spawned -/// tasks to permanently leak. Any tasks at the time will not be cancelled. +/// to consistently drive the executor with [`StaticExecutor::tick`] or +/// [`StaticExecutor::run`] will cause the all spawned tasks to permanently leak. Any +/// tasks at the time will not be cancelled. /// /// [`static`]: https://doc.rust-lang.org/std/keyword.static.html #[repr(transparent)] @@ -303,8 +304,9 @@ impl Default for StaticExecutor { /// A static executor may require signficantly less overhead in both single-threaded and mulitthreaded use cases. /// /// As this type does not implement `Drop`, losing the handle to the executor or failing -/// to consistently drive the executor with [`tick`] or [`run`] will cause the all spawned -/// tasks to permanently leak. Any tasks at the time will not be cancelled. +/// to consistently drive the executor with [`StaticLocalExecutor::tick`] or +/// [`StaticLocalExecutor::run`] will cause the all spawned tasks to permanently leak. Any +/// tasks at the time will not be cancelled. /// /// [`thread_local]: https://doc.rust-lang.org/std/macro.thread_local.html #[repr(transparent)] From 0fc974e8a13ad960165bfc0455c55fa589887a3a Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Sat, 6 Jul 2024 11:50:27 +0200 Subject: [PATCH 085/110] Configure docs.rs to document all features --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index d93727c..38c8be7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,3 +42,6 @@ once_cell = "1.16.0" name = "executor" harness = false required-features = ["static"] + +[package.metadata.docs.rs] +all-features = true From 675be0f1b7c674cea033ad3e24b016f4169d7ec3 Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Sat, 6 Jul 2024 11:50:38 +0200 Subject: [PATCH 086/110] Show static feature requirement on docs.rs --- src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib.rs b/src/lib.rs index 574f31d..40c6192 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -58,6 +58,7 @@ mod static_executors; #[doc(no_inline)] pub use async_task::{FallibleTask, Task}; #[cfg(feature = "static")] +#[cfg_attr(docsrs, doc(cfg(any(feature = "static"))))] pub use static_executors::*; /// An async executor. From 21d436ddf631284fdb246c2efe7b5cf98988476c Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 14 Jul 2024 19:18:40 -0700 Subject: [PATCH 087/110] v0.13.0 Signed-off-by: John Nunley --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61928a7..699a204 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# Version 1.13.0 + +- Relax the `Send` bound on `LocalExecutor::spawn_many`. (#120) +- Ensure all features are documented on `docs.rs`. (#122) + # Version 1.12.0 - Add static executors, which are an optimization over executors that are kept diff --git a/Cargo.toml b/Cargo.toml index 38c8be7..d1bd975 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.12.0" +version = "1.13.0" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.63" From 57ffea098119504283e320f1a636183a13d9a071 Mon Sep 17 00:00:00 2001 From: Lin Yihai Date: Thu, 15 Aug 2024 16:40:48 +0800 Subject: [PATCH 088/110] chore: fix clippy complains ident --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 40c6192..5d14c2c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -832,8 +832,8 @@ struct Ticker<'a> { /// /// States a ticker can be in: /// 1) Woken. - /// 2a) Sleeping and unnotified. - /// 2b) Sleeping and notified. + /// 2a) Sleeping and unnotified. + /// 2b) Sleeping and notified. sleeping: usize, } From e399c252ae017886210a05b7e02736a2cc9fd0b0 Mon Sep 17 00:00:00 2001 From: Lin Yihai Date: Fri, 23 Aug 2024 21:02:01 +0800 Subject: [PATCH 089/110] Fix doc build (#125) --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 5d14c2c..2ec014a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,7 +37,7 @@ #![doc( html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] -#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use std::fmt; use std::marker::PhantomData; From 62a83b15d76202d7908cd49350e22952b7b3b079 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Thu, 5 Sep 2024 19:50:34 -0700 Subject: [PATCH 090/110] v1.13.1 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 699a204..7b421fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.13.1 + +- Fix docs.rs build. (#125) + # Version 1.13.0 - Relax the `Send` bound on `LocalExecutor::spawn_many`. (#120) diff --git a/Cargo.toml b/Cargo.toml index d1bd975..4025c43 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.13.0" +version = "1.13.1" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.63" From fc33b2b83efbd309888618f912c7108e6994479c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=9E=97=E4=BC=9F?= Date: Wed, 18 Sep 2024 15:14:09 +0800 Subject: [PATCH 091/110] Fix build badge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a968713..fed40b9 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # async-executor -[![Build](https://github.com/smol-rs/async-executor/workflows/Build%20and%20test/badge.svg)]( +[![Build](https://github.com/smol-rs/async-executor/actions/workflows/ci.yml/badge.svg)]( https://github.com/smol-rs/async-executor/actions) [![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( https://github.com/smol-rs/async-executor) From 9335b7e88d7e512203f44555467b0f20990437b4 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 18 Oct 2024 14:49:53 -0700 Subject: [PATCH 092/110] ci: Use "v2.0.0" branch for security check Signed-off-by: John Nunley --- .github/workflows/ci.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index db2bd1a..eba1057 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -96,7 +96,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + # rustsec/audit-check used to do this automatically + - name: Generate Cargo.lock + run: cargo generate-lockfile # https://github.com/rustsec/audit-check/issues/2 - - uses: rustsec/audit-check@master + - uses: rustsec/audit-check@v2.0.0 with: token: ${{ secrets.GITHUB_TOKEN }} From 0c216e80cdadc4dc5bf8e41a4544dfce47f4a680 Mon Sep 17 00:00:00 2001 From: Alisa Sireneva Date: Sat, 30 Nov 2024 08:16:11 +0300 Subject: [PATCH 093/110] Ignore poisoning of `active` Closes #135. This enables the executor to be used in presence of panics in user callbacks, such as the iterator and `impl Extend` in `spawn_many`. Mutex poisoning is more of a lint than a safety requirement, as containers (such as `Slab`) and wakers have to be sound in presence of panics anyway. In this particular case, the exact behavior of `active` is not relied upon for soundness. --- src/lib.rs | 25 +++++++++++++++---------- tests/drop.rs | 3 +++ 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2ec014a..710a650 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -44,7 +44,7 @@ use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; -use std::sync::{Arc, Mutex, RwLock, TryLockError}; +use std::sync::{Arc, Mutex, MutexGuard, RwLock, TryLockError}; use std::task::{Poll, Waker}; use async_task::{Builder, Runnable}; @@ -143,7 +143,7 @@ impl<'a> Executor<'a> { /// assert!(ex.is_empty()); /// ``` pub fn is_empty(&self) -> bool { - self.state().active.lock().unwrap().is_empty() + self.state().active().is_empty() } /// Spawns a task onto the executor. @@ -160,7 +160,7 @@ impl<'a> Executor<'a> { /// }); /// ``` pub fn spawn(&self, future: impl Future + Send + 'a) -> Task { - let mut active = self.state().active.lock().unwrap(); + let mut active = self.state().active(); // SAFETY: `T` and the future are `Send`. unsafe { self.spawn_inner(future, &mut active) } @@ -211,7 +211,7 @@ impl<'a> Executor<'a> { futures: impl IntoIterator, handles: &mut impl Extend>, ) { - let mut active = Some(self.state().active.lock().unwrap()); + let mut active = Some(self.state().active()); // Convert the futures into tasks. let tasks = futures.into_iter().enumerate().map(move |(i, future)| { @@ -221,7 +221,7 @@ impl<'a> Executor<'a> { // Yield the lock every once in a while to ease contention. if i.wrapping_sub(1) % 500 == 0 { drop(active.take()); - active = Some(self.state().active.lock().unwrap()); + active = Some(self.state().active()); } task @@ -246,7 +246,7 @@ impl<'a> Executor<'a> { let index = entry.key(); let state = self.state_as_arc(); let future = async move { - let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index))); + let _guard = CallOnDrop(move || drop(state.active().try_remove(index))); future.await }; @@ -415,7 +415,7 @@ impl Drop for Executor<'_> { // via Arc::into_raw in state_ptr. let state = unsafe { Arc::from_raw(ptr) }; - let mut active = state.active.lock().unwrap_or_else(|e| e.into_inner()); + let mut active = state.active(); for w in active.drain() { w.wake(); } @@ -517,7 +517,7 @@ impl<'a> LocalExecutor<'a> { /// }); /// ``` pub fn spawn(&self, future: impl Future + 'a) -> Task { - let mut active = self.inner().state().active.lock().unwrap(); + let mut active = self.inner().state().active(); // SAFETY: This executor is not thread safe, so the future and its result // cannot be sent to another thread. @@ -569,7 +569,7 @@ impl<'a> LocalExecutor<'a> { futures: impl IntoIterator, handles: &mut impl Extend>, ) { - let mut active = self.inner().state().active.lock().unwrap(); + let mut active = self.inner().state().active(); // Convert all of the futures to tasks. let tasks = futures.into_iter().map(|future| { @@ -694,6 +694,11 @@ impl State { } } + /// Returns a reference to currently active tasks. + fn active(&self) -> MutexGuard<'_, Slab> { + self.active.lock().unwrap_or_else(|e| e.into_inner()) + } + /// Notifies a sleeping ticker. #[inline] fn notify(&self) { @@ -1099,7 +1104,7 @@ fn debug_state(state: &State, name: &str, f: &mut fmt::Formatter<'_>) -> fmt::Re match self.0.try_lock() { Ok(lock) => fmt::Debug::fmt(&lock.len(), f), Err(TryLockError::WouldBlock) => f.write_str(""), - Err(TryLockError::Poisoned(_)) => f.write_str(""), + Err(TryLockError::Poisoned(err)) => fmt::Debug::fmt(&err.into_inner().len(), f), } } } diff --git a/tests/drop.rs b/tests/drop.rs index 54a0741..5d089b5 100644 --- a/tests/drop.rs +++ b/tests/drop.rs @@ -133,6 +133,9 @@ fn iterator_panics_mid_run() { ) }); assert!(panic.is_err()); + + let task = ex.spawn(future::ready(0)); + assert_eq!(future::block_on(ex.run(task)), 0); } struct CallOnDrop(F); From 76bea84eaaa2545490f1f70cdc409bfa913ddc8e Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 6 Oct 2024 10:13:44 -0700 Subject: [PATCH 094/110] deps: Bump slab to v0.4.7 Closes #131 Signed-off-by: John Nunley --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 4025c43..6cda0eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ async-task = "4.4.0" concurrent-queue = "2.5.0" fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } -slab = "0.4.4" +slab = "0.4.7" [target.'cfg(target_family = "wasm")'.dependencies] futures-lite = { version = "2.0.0", default-features = false, features = ["std"] } From 47739f1d03364f0124e5492a8ed2433cd1a506fb Mon Sep 17 00:00:00 2001 From: Alisa Sireneva Date: Sat, 30 Nov 2024 08:44:12 +0300 Subject: [PATCH 095/110] Reduce memory footprint By creating the future manually instead of relying on `async { .. }`, we workaround rustc's inefficient future layouting. On [a simple benchmark](https://github.com/hez2010/async-runtimes-benchmarks-2024) spawning 1M of tasks, this reduces memory use from about 512 bytes per future to about 340 bytes per future. More context: https://github.com/hez2010/async-runtimes-benchmarks-2024/pull/1 --- Cargo.toml | 1 + src/lib.rs | 35 ++++++++++++++++++++++++++++++----- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6cda0eb..b501a7d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ async-task = "4.4.0" concurrent-queue = "2.5.0" fastrand = "2.0.0" futures-lite = { version = "2.0.0", default-features = false } +pin-project-lite = "0.2" slab = "0.4.7" [target.'cfg(target_family = "wasm")'.dependencies] diff --git a/src/lib.rs b/src/lib.rs index 710a650..baeda1d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -42,14 +42,16 @@ use std::fmt; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::pin::Pin; use std::rc::Rc; use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; use std::sync::{Arc, Mutex, MutexGuard, RwLock, TryLockError}; -use std::task::{Poll, Waker}; +use std::task::{Context, Poll, Waker}; use async_task::{Builder, Runnable}; use concurrent_queue::ConcurrentQueue; use futures_lite::{future, prelude::*}; +use pin_project_lite::pin_project; use slab::Slab; #[cfg(feature = "static")] @@ -245,10 +247,7 @@ impl<'a> Executor<'a> { let entry = active.vacant_entry(); let index = entry.key(); let state = self.state_as_arc(); - let future = async move { - let _guard = CallOnDrop(move || drop(state.active().try_remove(index))); - future.await - }; + let future = AsyncCallOnDrop::new(future, move || drop(state.active().try_remove(index))); // Create the task and register it in the set of active tasks. // @@ -1155,6 +1154,32 @@ impl Drop for CallOnDrop { } } +pin_project! { + /// A wrapper around a future, running a closure when dropped. + struct AsyncCallOnDrop { + #[pin] + future: Fut, + cleanup: CallOnDrop, + } +} + +impl AsyncCallOnDrop { + fn new(future: Fut, cleanup: Cleanup) -> Self { + Self { + future, + cleanup: CallOnDrop(cleanup), + } + } +} + +impl Future for AsyncCallOnDrop { + type Output = Fut::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.project().future.poll(cx) + } +} + fn _ensure_send_and_sync() { use futures_lite::future::pending; From 60a218595f148c0ce16478975f23bac17524b820 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 20 Jan 2025 00:40:46 +0900 Subject: [PATCH 096/110] ci: Use reusable workflows for fmt and security_audit --- .github/workflows/ci.yml | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eba1057..7a0ba04 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,6 +26,16 @@ defaults: shell: bash jobs: + fmt: + uses: smol-rs/.github/.github/workflows/fmt.yml@main + security_audit: + uses: smol-rs/.github/.github/workflows/security_audit.yml@main + permissions: + checks: write + contents: read + issues: write + secrets: inherit + test: runs-on: ${{ matrix.os }} strategy: @@ -65,14 +75,6 @@ jobs: run: rustup update stable - run: cargo clippy --all-features --all-targets - fmt: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install Rust - run: rustup update stable - - run: cargo fmt --all --check - miri: runs-on: ubuntu-latest steps: @@ -87,19 +89,3 @@ jobs: env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation -Zmiri-ignore-leaks RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout - - security_audit: - permissions: - checks: write - contents: read - issues: write - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # rustsec/audit-check used to do this automatically - - name: Generate Cargo.lock - run: cargo generate-lockfile - # https://github.com/rustsec/audit-check/issues/2 - - uses: rustsec/audit-check@v2.0.0 - with: - token: ${{ secrets.GITHUB_TOKEN }} From 3d912bba9e39e7ac67ef60d9ad42485d0011ba32 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 29 Apr 2025 02:29:42 +0900 Subject: [PATCH 097/110] Release 1.13.2 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b421fb..957cdba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# Version 1.13.2 + +- Fix build failure with minimal-versions. (#132) +- Prevent executor from becoming unusable by panic of the iterator passed by the user to the `spawn_many`. (#136) +- Reduce memory footprint. (#137) + # Version 1.13.1 - Fix docs.rs build. (#125) diff --git a/Cargo.toml b/Cargo.toml index b501a7d..3f9c94a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.13.1" +version = "1.13.2" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.63" From 87a287a73a14108b18aa1c6d6f51fd6d0911cd7a Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 6 Jul 2025 17:23:48 +0900 Subject: [PATCH 098/110] Fix clippy::uninlined_format_args warning ``` warning: variables can be used directly in the `format!` string --> examples/priority.rs:75:13 | 75 | println!("{:?}", priority); | ^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args = note: `#[warn(clippy::uninlined_format_args)]` on by default help: change this to | 75 - println!("{:?}", priority); 75 + println!("{priority:?}"); | warning: variables can be used directly in the `format!` string --> examples/priority.rs:77:13 | 77 | println!("{:?}", priority); | ^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args help: change this to | 77 - println!("{:?}", priority); 77 + println!("{priority:?}"); | warning: variables can be used directly in the `format!` string --> benches/executor.rs:62:34 | 62 | group.bench_function(format!("{}::spawn_one", prefix), |b| { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args = note: `#[warn(clippy::uninlined_format_args)]` on by default help: change this to | 62 - group.bench_function(format!("{}::spawn_one", prefix), |b| { 62 + group.bench_function(format!("{prefix}::spawn_one"), |b| { | warning: variables can be used directly in the `format!` string --> benches/executor.rs:101:34 | 101 | group.bench_function(format!("{}::spawn_many_local", prefix), |b| { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args help: change this to | 101 - group.bench_function(format!("{}::spawn_many_local", prefix), |b| { 101 + group.bench_function(format!("{prefix}::spawn_many_local"), |b| { | warning: variables can be used directly in the `format!` string --> benches/executor.rs:139:34 | 139 | group.bench_function(format!("{}::spawn_recursively", prefix), |b| { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args help: change this to | 139 - group.bench_function(format!("{}::spawn_recursively", prefix), |b| { 139 + group.bench_function(format!("{prefix}::spawn_recursively"), |b| { | warning: variables can be used directly in the `format!` string --> benches/executor.rs:204:34 | 204 | group.bench_function(format!("{}::yield_now", prefix), |b| { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args help: change this to | 204 - group.bench_function(format!("{}::yield_now", prefix), |b| { 204 + group.bench_function(format!("{prefix}::yield_now"), |b| { | warning: variables can be used directly in the `format!` string --> benches/executor.rs:250:34 | 250 | group.bench_function(format!("{}::channels", prefix), |b| { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args help: change this to | 250 - group.bench_function(format!("{}::channels", prefix), |b| { 250 + group.bench_function(format!("{prefix}::channels"), |b| { | warning: variables can be used directly in the `format!` string --> benches/executor.rs:328:34 | 328 | group.bench_function(format!("{}::web_server", prefix), |b| { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args help: change this to | 328 - group.bench_function(format!("{}::web_server", prefix), |b| { 328 + group.bench_function(format!("{prefix}::web_server"), |b| { | ``` --- benches/executor.rs | 12 ++++++------ examples/priority.rs | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/benches/executor.rs b/benches/executor.rs index 74b2955..42fedbd 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -59,7 +59,7 @@ fn running_benches(c: &mut Criterion) { for (group_name, multithread) in [("single_thread", false), ("multi_thread", true)].iter() { let mut group = c.benchmark_group(group_name.to_string()); - group.bench_function(format!("{}::spawn_one", prefix), |b| { + group.bench_function(format!("{prefix}::spawn_one"), |b| { if with_static { run_static( || { @@ -98,7 +98,7 @@ fn running_benches(c: &mut Criterion) { }); } - group.bench_function(format!("{}::spawn_many_local", prefix), |b| { + group.bench_function(format!("{prefix}::spawn_many_local"), |b| { if with_static { run_static( || { @@ -136,7 +136,7 @@ fn running_benches(c: &mut Criterion) { } }); - group.bench_function(format!("{}::spawn_recursively", prefix), |b| { + group.bench_function(format!("{prefix}::spawn_recursively"), |b| { #[allow(clippy::manual_async_fn)] fn go(i: usize) -> impl Future + Send + 'static { async move { @@ -201,7 +201,7 @@ fn running_benches(c: &mut Criterion) { } }); - group.bench_function(format!("{}::yield_now", prefix), |b| { + group.bench_function(format!("{prefix}::yield_now"), |b| { if with_static { run_static( || { @@ -247,7 +247,7 @@ fn running_benches(c: &mut Criterion) { } }); - group.bench_function(format!("{}::channels", prefix), |b| { + group.bench_function(format!("{prefix}::channels"), |b| { if with_static { run_static( || { @@ -325,7 +325,7 @@ fn running_benches(c: &mut Criterion) { } }); - group.bench_function(format!("{}::web_server", prefix), |b| { + group.bench_function(format!("{prefix}::web_server"), |b| { if with_static { run_static( || { diff --git a/examples/priority.rs b/examples/priority.rs index 60d5c9a..b1dc01e 100644 --- a/examples/priority.rs +++ b/examples/priority.rs @@ -72,9 +72,9 @@ fn main() { // Spawn a task with this priority. tasks.push(EX.spawn(priority, async move { - println!("{:?}", priority); + println!("{priority:?}"); future::yield_now().await; - println!("{:?}", priority); + println!("{priority:?}"); })); } From b3269e136320a63aacd00b34ca4e0904b6f1ef2d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 6 Jul 2025 17:25:23 +0900 Subject: [PATCH 099/110] Ignore clippy::unused_unit lint ``` warning: unneeded unit return type --> src/lib.rs:1159:42 | 1159 | struct AsyncCallOnDrop { | ^^^^^^^ help: remove the `-> ()` | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unused_unit = note: `#[warn(clippy::unused_unit)]` on by default ``` --- src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib.rs b/src/lib.rs index baeda1d..74e02db 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -38,6 +38,7 @@ html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![allow(clippy::unused_unit)] // false positive fixed in Rust 1.89 use std::fmt; use std::marker::PhantomData; From 95ec34dd704548132195c9e4f5b4558d4f5ea4fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 26 Jul 2025 15:27:16 +0900 Subject: [PATCH 100/110] Update criterion requirement from 0.5 to 0.6 (#142) Updates the requirements on [criterion](https://github.com/bheisler/criterion.rs) to permit the latest version. - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.5.0...0.6.0) --- updated-dependencies: - dependency-name: criterion dependency-version: 0.6.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Taiki Endo --- Cargo.toml | 2 +- benches/executor.rs | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3f9c94a..c96b4d1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ futures-lite = { version = "2.0.0", default-features = false, features = ["std"] async-channel = "2.0.0" async-io = "2.1.0" async-lock = "3.0.0" -criterion = { version = "0.5", default-features = false, features = ["cargo_bench_support"] } +criterion = { version = "0.6", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" fastrand = "2.0.0" futures-lite = "2.0.0" diff --git a/benches/executor.rs b/benches/executor.rs index 42fedbd..5fc140f 100644 --- a/benches/executor.rs +++ b/benches/executor.rs @@ -1,3 +1,6 @@ +#![allow(clippy::incompatible_msrv)] // false positive: https://github.com/rust-lang/rust-clippy/issues/12257#issuecomment-2093667187 + +use std::hint::black_box; use std::mem; use std::thread::available_parallelism; @@ -380,9 +383,7 @@ fn running_benches(c: &mut Criterion) { let (resp_send, resp_recv) = async_channel::bounded(1); db_send.send(resp_send).await.unwrap(); - criterion::black_box( - resp_recv.recv().await.unwrap(), - ); + black_box(resp_recv.recv().await.unwrap()); } // Send the data back... @@ -461,9 +462,7 @@ fn running_benches(c: &mut Criterion) { let (resp_send, resp_recv) = async_channel::bounded(1); db_send.send(resp_send).await.unwrap(); - criterion::black_box( - resp_recv.recv().await.unwrap(), - ); + black_box(resp_recv.recv().await.unwrap()); } // Send the data back... From 58411d687671694a768d1eea83fab7d0edf39406 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 05:34:15 +0000 Subject: [PATCH 101/110] Update criterion requirement from 0.6 to 0.7 Updates the requirements on [criterion](https://github.com/bheisler/criterion.rs) to permit the latest version. - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.6.0...0.7.0) --- updated-dependencies: - dependency-name: criterion dependency-version: 0.7.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index c96b4d1..c8feeb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ futures-lite = { version = "2.0.0", default-features = false, features = ["std"] async-channel = "2.0.0" async-io = "2.1.0" async-lock = "3.0.0" -criterion = { version = "0.6", default-features = false, features = ["cargo_bench_support"] } +criterion = { version = "0.7", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" fastrand = "2.0.0" futures-lite = "2.0.0" From 08c3fc4d5dbdcb0aa04feae34cdb6bbc60e7b515 Mon Sep 17 00:00:00 2001 From: James Liu Date: Sun, 24 Aug 2025 10:23:11 +0000 Subject: [PATCH 102/110] Don't explicitly panic or block when avoidable (#147) * Don't explicitly panic when avoidable * Update src/lib.rs Co-authored-by: Taiki Endo * Do the same for static executors --------- Co-authored-by: Taiki Endo --- src/lib.rs | 73 +++++++++++++++++++++++++---------------- src/static_executors.rs | 11 ++++--- 2 files changed, 52 insertions(+), 32 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 74e02db..85c161b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,7 +46,7 @@ use std::panic::{RefUnwindSafe, UnwindSafe}; use std::pin::Pin; use std::rc::Rc; use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; -use std::sync::{Arc, Mutex, MutexGuard, RwLock, TryLockError}; +use std::sync::{Arc, Mutex, MutexGuard, PoisonError, RwLock, TryLockError}; use std::task::{Context, Poll, Waker}; use async_task::{Builder, Runnable}; @@ -350,7 +350,8 @@ impl<'a> Executor<'a> { // TODO: If possible, push into the current local queue and notify the ticker. move |runnable| { - state.queue.push(runnable).unwrap(); + let result = state.queue.push(runnable); + debug_assert!(result.is_ok()); // Since we use unbounded queue, push will never fail. state.notify(); } } @@ -696,7 +697,7 @@ impl State { /// Returns a reference to currently active tasks. fn active(&self) -> MutexGuard<'_, Slab> { - self.active.lock().unwrap_or_else(|e| e.into_inner()) + self.active.lock().unwrap_or_else(PoisonError::into_inner) } /// Notifies a sleeping ticker. @@ -707,7 +708,11 @@ impl State { .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) .is_ok() { - let waker = self.sleepers.lock().unwrap().notify(); + let waker = self + .sleepers + .lock() + .unwrap_or_else(PoisonError::into_inner) + .notify(); if let Some(w) = waker { w.wake(); } @@ -852,7 +857,11 @@ impl Ticker<'_> { /// /// Returns `false` if the ticker was already sleeping and unnotified. fn sleep(&mut self, waker: &Waker) -> bool { - let mut sleepers = self.state.sleepers.lock().unwrap(); + let mut sleepers = self + .state + .sleepers + .lock() + .unwrap_or_else(PoisonError::into_inner); match self.sleeping { // Move to sleeping state. @@ -878,7 +887,11 @@ impl Ticker<'_> { /// Moves the ticker into woken state. fn wake(&mut self) { if self.sleeping != 0 { - let mut sleepers = self.state.sleepers.lock().unwrap(); + let mut sleepers = self + .state + .sleepers + .lock() + .unwrap_or_else(PoisonError::into_inner); sleepers.remove(self.sleeping); self.state @@ -926,7 +939,11 @@ impl Drop for Ticker<'_> { fn drop(&mut self) { // If this ticker is in sleeping state, it must be removed from the sleepers list. if self.sleeping != 0 { - let mut sleepers = self.state.sleepers.lock().unwrap(); + let mut sleepers = self + .state + .sleepers + .lock() + .unwrap_or_else(PoisonError::into_inner); let notified = sleepers.remove(self.sleeping); self.state @@ -971,7 +988,7 @@ impl Runner<'_> { state .local_queues .write() - .unwrap() + .unwrap_or_else(PoisonError::into_inner) .push(runner.local.clone()); runner } @@ -993,25 +1010,25 @@ impl Runner<'_> { } // Try stealing from other runners. - let local_queues = self.state.local_queues.read().unwrap(); - - // Pick a random starting point in the iterator list and rotate the list. - let n = local_queues.len(); - let start = rng.usize(..n); - let iter = local_queues - .iter() - .chain(local_queues.iter()) - .skip(start) - .take(n); - - // Remove this runner's local queue. - let iter = iter.filter(|local| !Arc::ptr_eq(local, &self.local)); - - // Try stealing from each local queue in the list. - for local in iter { - steal(local, &self.local); - if let Ok(r) = self.local.pop() { - return Some(r); + if let Ok(local_queues) = self.state.local_queues.try_read() { + // Pick a random starting point in the iterator list and rotate the list. + let n = local_queues.len(); + let start = rng.usize(..n); + let iter = local_queues + .iter() + .chain(local_queues.iter()) + .skip(start) + .take(n); + + // Remove this runner's local queue. + let iter = iter.filter(|local| !Arc::ptr_eq(local, &self.local)); + + // Try stealing from each local queue in the list. + for local in iter { + steal(local, &self.local); + if let Ok(r) = self.local.pop() { + return Some(r); + } } } @@ -1037,7 +1054,7 @@ impl Drop for Runner<'_> { self.state .local_queues .write() - .unwrap() + .unwrap_or_else(PoisonError::into_inner) .retain(|local| !Arc::ptr_eq(local, &self.local)); // Re-schedule remaining tasks in the local queue. diff --git a/src/static_executors.rs b/src/static_executors.rs index c43679d..7b4e667 100644 --- a/src/static_executors.rs +++ b/src/static_executors.rs @@ -7,6 +7,7 @@ use std::{ future::Future, marker::PhantomData, panic::{RefUnwindSafe, UnwindSafe}, + sync::PoisonError, }; impl Executor<'static> { @@ -42,7 +43,7 @@ impl Executor<'static> { std::mem::forget(self); - let mut active = state.active.lock().unwrap(); + let mut active = state.active.lock().unwrap_or_else(PoisonError::into_inner); if !active.is_empty() { // Reschedule all of the active tasks. for waker in active.drain() { @@ -92,7 +93,7 @@ impl LocalExecutor<'static> { std::mem::forget(self); - let mut active = state.active.lock().unwrap(); + let mut active = state.active.lock().unwrap_or_else(PoisonError::into_inner); if !active.is_empty() { // Reschedule all of the active tasks. for waker in active.drain() { @@ -283,7 +284,8 @@ impl StaticExecutor { let state: &'static State = &self.state; // TODO: If possible, push into the current local queue and notify the ticker. move |runnable| { - state.queue.push(runnable).unwrap(); + let result = state.queue.push(runnable); + debug_assert!(result.is_ok()); // Since we use unbounded queue, push will never fail. state.notify(); } } @@ -468,7 +470,8 @@ impl StaticLocalExecutor { let state: &'static State = &self.state; // TODO: If possible, push into the current local queue and notify the ticker. move |runnable| { - state.queue.push(runnable).unwrap(); + let result = state.queue.push(runnable); + debug_assert!(result.is_ok()); // Since we use unbounded queue, push will never fail. state.notify(); } } From 1e8d245f64e1d19c4a4dee7cbba9c3d43149f708 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Sun, 24 Aug 2025 16:46:45 -0700 Subject: [PATCH 103/110] v1.13.3 Signed-off-by: John Nunley --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 957cdba..89311fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# Version 1.13.3 + +- Avoid places where the code had a possibility to block or panic. (#147) + # Version 1.13.2 - Fix build failure with minimal-versions. (#132) diff --git a/Cargo.toml b/Cargo.toml index c8feeb5..a497731 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.13.2" +version = "1.13.3" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.63" From c10412c6c78884a66993c36a6060393aff2c2b8f Mon Sep 17 00:00:00 2001 From: James Liu Date: Wed, 27 Aug 2025 02:17:25 +0000 Subject: [PATCH 104/110] Pin an Executor's State to minimize atomic operations. (#146) Use `Pin<&'a State>` to point to the state instead of cloning `Arc`s, this should reduce the amount of atomic operations required. This should be sound given that the state are never moved until the Executor is dropped. This still uses `Arc` instead of `Box` as miri will treat a former Box as a uniquely owned pointer and cannot be shared (backed by `Unique` vs `Shared`). --- Cargo.toml | 2 +- src/lib.rs | 86 ++++++++++++++++++++--------------------- src/static_executors.rs | 32 +++++++++------ 3 files changed, 63 insertions(+), 57 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a497731..ab39b0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ name = "async-executor" version = "1.13.3" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" -rust-version = "1.63" +rust-version = "1.65" description = "Async executor" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-executor" diff --git a/src/lib.rs b/src/lib.rs index 85c161b..615b5c4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -90,7 +90,7 @@ pub use static_executors::*; /// ``` pub struct Executor<'a> { /// The executor state. - state: AtomicPtr, + pub(crate) state: AtomicPtr, /// Makes the `'a` lifetime invariant. _marker: PhantomData>, @@ -163,10 +163,11 @@ impl<'a> Executor<'a> { /// }); /// ``` pub fn spawn(&self, future: impl Future + Send + 'a) -> Task { - let mut active = self.state().active(); + let state = self.state(); + let mut active = state.active(); // SAFETY: `T` and the future are `Send`. - unsafe { self.spawn_inner(future, &mut active) } + unsafe { Self::spawn_inner(state, future, &mut active) } } /// Spawns many tasks onto the executor. @@ -214,12 +215,13 @@ impl<'a> Executor<'a> { futures: impl IntoIterator, handles: &mut impl Extend>, ) { - let mut active = Some(self.state().active()); + let state = self.state(); + let mut active = Some(state.as_ref().active()); // Convert the futures into tasks. let tasks = futures.into_iter().enumerate().map(move |(i, future)| { // SAFETY: `T` and the future are `Send`. - let task = unsafe { self.spawn_inner(future, active.as_mut().unwrap()) }; + let task = unsafe { Self::spawn_inner(state, future, active.as_mut().unwrap()) }; // Yield the lock every once in a while to ease contention. if i.wrapping_sub(1) % 500 == 0 { @@ -240,14 +242,13 @@ impl<'a> Executor<'a> { /// /// If this is an `Executor`, `F` and `T` must be `Send`. unsafe fn spawn_inner( - &self, + state: Pin<&'a State>, future: impl Future + 'a, active: &mut Slab, ) -> Task { // Remove the task from the set of active tasks when the future finishes. let entry = active.vacant_entry(); let index = entry.key(); - let state = self.state_as_arc(); let future = AsyncCallOnDrop::new(future, move || drop(state.active().try_remove(index))); // Create the task and register it in the set of active tasks. @@ -269,12 +270,16 @@ impl<'a> Executor<'a> { // the `Executor` is drained of all of its runnables. This ensures that // runnables are dropped and this precondition is satisfied. // - // `self.schedule()` is `Send`, `Sync` and `'static`, as checked below. - // Therefore we do not need to worry about what is done with the - // `Waker`. + // `Self::schedule` is `Send` and `Sync`, as checked below. + // Therefore we do not need to worry about which thread the `Waker` is used + // and dropped on. + // + // `Self::schedule` may not be `'static`, but we make sure that the `Waker` does + // not outlive `'a`. When the executor is dropped, the `active` field is + // drained and all of the `Waker`s are woken. let (runnable, task) = Builder::new() .propagate_panic(true) - .spawn_unchecked(|()| future, self.schedule()); + .spawn_unchecked(|()| future, Self::schedule(state)); entry.insert(runnable.waker()); runnable.schedule(); @@ -345,9 +350,7 @@ impl<'a> Executor<'a> { } /// Returns a function that schedules a runnable task when it gets woken up. - fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static { - let state = self.state_as_arc(); - + fn schedule(state: Pin<&'a State>) -> impl Fn(Runnable) + Send + Sync + 'a { // TODO: If possible, push into the current local queue and notify the ticker. move |runnable| { let result = state.queue.push(runnable); @@ -358,12 +361,11 @@ impl<'a> Executor<'a> { /// Returns a pointer to the inner state. #[inline] - fn state_ptr(&self) -> *const State { + fn state(&self) -> Pin<&'a State> { #[cold] fn alloc_state(atomic_ptr: &AtomicPtr) -> *mut State { let state = Arc::new(State::new()); - // TODO: Switch this to use cast_mut once the MSRV can be bumped past 1.65 - let ptr = Arc::into_raw(state) as *mut State; + let ptr = Arc::into_raw(state).cast_mut(); if let Err(actual) = atomic_ptr.compare_exchange( std::ptr::null_mut(), ptr, @@ -382,26 +384,10 @@ impl<'a> Executor<'a> { if ptr.is_null() { ptr = alloc_state(&self.state); } - ptr - } - /// Returns a reference to the inner state. - #[inline] - fn state(&self) -> &State { // SAFETY: So long as an Executor lives, it's state pointer will always be valid - // when accessed through state_ptr. - unsafe { &*self.state_ptr() } - } - - // Clones the inner state Arc - #[inline] - fn state_as_arc(&self) -> Arc { - // SAFETY: So long as an Executor lives, it's state pointer will always be a valid - // Arc when accessed through state_ptr. - let arc = unsafe { Arc::from_raw(self.state_ptr()) }; - let clone = arc.clone(); - std::mem::forget(arc); - clone + // and will never be moved until it's dropped. + Pin::new(unsafe { &*ptr }) } } @@ -416,7 +402,7 @@ impl Drop for Executor<'_> { // via Arc::into_raw in state_ptr. let state = unsafe { Arc::from_raw(ptr) }; - let mut active = state.active(); + let mut active = state.pin().active(); for w in active.drain() { w.wake(); } @@ -518,11 +504,12 @@ impl<'a> LocalExecutor<'a> { /// }); /// ``` pub fn spawn(&self, future: impl Future + 'a) -> Task { - let mut active = self.inner().state().active(); + let state = self.inner().state(); + let mut active = state.active(); // SAFETY: This executor is not thread safe, so the future and its result // cannot be sent to another thread. - unsafe { self.inner().spawn_inner(future, &mut active) } + unsafe { Executor::spawn_inner(state, future, &mut active) } } /// Spawns many tasks onto the executor. @@ -570,13 +557,14 @@ impl<'a> LocalExecutor<'a> { futures: impl IntoIterator, handles: &mut impl Extend>, ) { - let mut active = self.inner().state().active(); + let state = self.inner().state(); + let mut active = state.active(); // Convert all of the futures to tasks. let tasks = futures.into_iter().map(|future| { // SAFETY: This executor is not thread safe, so the future and its result // cannot be sent to another thread. - unsafe { self.inner().spawn_inner(future, &mut active) } + unsafe { Executor::spawn_inner(state, future, &mut active) } // As only one thread can spawn or poll tasks at a time, there is no need // to release lock contention here. @@ -695,9 +683,16 @@ impl State { } } + fn pin(&self) -> Pin<&Self> { + Pin::new(self) + } + /// Returns a reference to currently active tasks. - fn active(&self) -> MutexGuard<'_, Slab> { - self.active.lock().unwrap_or_else(PoisonError::into_inner) + fn active(self: Pin<&Self>) -> MutexGuard<'_, Slab> { + self.get_ref() + .active + .lock() + .unwrap_or_else(PoisonError::into_inner) } /// Notifies a sleeping ticker. @@ -1209,13 +1204,14 @@ fn _ensure_send_and_sync() { is_sync::>(Executor::new()); let ex = Executor::new(); + let state = ex.state(); is_send(ex.run(pending::<()>())); is_sync(ex.run(pending::<()>())); is_send(ex.tick()); is_sync(ex.tick()); - is_send(ex.schedule()); - is_sync(ex.schedule()); - is_static(ex.schedule()); + is_send(Executor::schedule(state)); + is_sync(Executor::schedule(state)); + is_static(Executor::schedule(state)); /// ```compile_fail /// use async_executor::LocalExecutor; diff --git a/src/static_executors.rs b/src/static_executors.rs index 7b4e667..2ad0403 100644 --- a/src/static_executors.rs +++ b/src/static_executors.rs @@ -7,7 +7,7 @@ use std::{ future::Future, marker::PhantomData, panic::{RefUnwindSafe, UnwindSafe}, - sync::PoisonError, + sync::{atomic::Ordering, PoisonError}, }; impl Executor<'static> { @@ -35,11 +35,16 @@ impl Executor<'static> { /// future::block_on(ex.run(task)); /// ``` pub fn leak(self) -> &'static StaticExecutor { - let ptr = self.state_ptr(); - // SAFETY: So long as an Executor lives, it's state pointer will always be valid - // when accessed through state_ptr. This executor will live for the full 'static - // lifetime so this isn't an arbitrary lifetime extension. - let state: &'static State = unsafe { &*ptr }; + let ptr = self.state.load(Ordering::Relaxed); + + let state: &'static State = if ptr.is_null() { + Box::leak(Box::new(State::new())) + } else { + // SAFETY: So long as an Executor lives, it's state pointer will always be valid + // when accessed through state_ptr. This executor will live for the full 'static + // lifetime so this isn't an arbitrary lifetime extension. + unsafe { &*ptr } + }; std::mem::forget(self); @@ -85,11 +90,16 @@ impl LocalExecutor<'static> { /// future::block_on(ex.run(task)); /// ``` pub fn leak(self) -> &'static StaticLocalExecutor { - let ptr = self.inner.state_ptr(); - // SAFETY: So long as a LocalExecutor lives, it's state pointer will always be valid - // when accessed through state_ptr. This executor will live for the full 'static - // lifetime so this isn't an arbitrary lifetime extension. - let state: &'static State = unsafe { &*ptr }; + let ptr = self.inner.state.load(Ordering::Relaxed); + + let state: &'static State = if ptr.is_null() { + Box::leak(Box::new(State::new())) + } else { + // SAFETY: So long as an Executor lives, it's state pointer will always be valid + // when accessed through state_ptr. This executor will live for the full 'static + // lifetime so this isn't an arbitrary lifetime extension. + unsafe { &*ptr } + }; std::mem::forget(self); From b0f396c8e5366ed4eecc2d9adb03a1d8e70e5bb4 Mon Sep 17 00:00:00 2001 From: wojciech-graj <71249593+wojciech-graj@users.noreply.github.com> Date: Wed, 29 Oct 2025 10:59:35 +0100 Subject: [PATCH 105/110] Fix failing docs build (#152) The doc_auto_cfg attribute has been renamed to doc_cfg https://github.com/rust-lang/rust/issues/43781 --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 615b5c4..b65c33c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,7 +37,7 @@ #![doc( html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(docsrs, feature(doc_cfg))] #![allow(clippy::unused_unit)] // false positive fixed in Rust 1.89 use std::fmt; From 86bcbf839b8fb432053eae948fb56fc01c3cf8ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 01:58:41 +0000 Subject: [PATCH 106/110] Update criterion requirement from 0.7 to 0.8 Updates the requirements on [criterion](https://github.com/criterion-rs/criterion.rs) to permit the latest version. - [Release notes](https://github.com/criterion-rs/criterion.rs/releases) - [Changelog](https://github.com/criterion-rs/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/criterion-rs/criterion.rs/compare/criterion-plot-v0.7.0...criterion-v0.8.0) --- updated-dependencies: - dependency-name: criterion dependency-version: 0.8.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index ab39b0d..4fb566f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ futures-lite = { version = "2.0.0", default-features = false, features = ["std"] async-channel = "2.0.0" async-io = "2.1.0" async-lock = "3.0.0" -criterion = { version = "0.7", default-features = false, features = ["cargo_bench_support"] } +criterion = { version = "0.8", default-features = false, features = ["cargo_bench_support"] } easy-parallel = "3.1.0" fastrand = "2.0.0" futures-lite = "2.0.0" From cc6941ac539e4141674cf20c392f3696e0f0b0a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Kr=C3=B6ning?= Date: Fri, 25 Jul 2025 10:54:50 +0200 Subject: [PATCH 107/110] refactor: return `Self` --- src/lib.rs | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index b65c33c..dbe33d6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -120,8 +120,8 @@ impl<'a> Executor<'a> { /// /// let ex = Executor::new(); /// ``` - pub const fn new() -> Executor<'a> { - Executor { + pub const fn new() -> Self { + Self { state: AtomicPtr::new(std::ptr::null_mut()), _marker: PhantomData, } @@ -413,8 +413,8 @@ impl Drop for Executor<'_> { } impl<'a> Default for Executor<'a> { - fn default() -> Executor<'a> { - Executor::new() + fn default() -> Self { + Self::new() } } @@ -461,8 +461,8 @@ impl<'a> LocalExecutor<'a> { /// /// let local_ex = LocalExecutor::new(); /// ``` - pub const fn new() -> LocalExecutor<'a> { - LocalExecutor { + pub const fn new() -> Self { + Self { inner: Executor::new(), _marker: PhantomData, } @@ -551,7 +551,6 @@ impl<'a> LocalExecutor<'a> { /// ``` /// /// [`spawn`]: LocalExecutor::spawn - /// [`Executor::spawn_many`]: Executor::spawn_many pub fn spawn_many + 'a>( &self, futures: impl IntoIterator, @@ -644,8 +643,8 @@ impl<'a> LocalExecutor<'a> { } impl<'a> Default for LocalExecutor<'a> { - fn default() -> LocalExecutor<'a> { - LocalExecutor::new() + fn default() -> Self { + Self::new() } } @@ -669,8 +668,8 @@ struct State { impl State { /// Creates state for a new executor. - const fn new() -> State { - State { + const fn new() -> Self { + Self { queue: ConcurrentQueue::unbounded(), local_queues: RwLock::new(Vec::new()), notified: AtomicBool::new(true), @@ -842,10 +841,10 @@ struct Ticker<'a> { sleeping: usize, } -impl Ticker<'_> { +impl<'a> Ticker<'a> { /// Creates a ticker. - fn new(state: &State) -> Ticker<'_> { - Ticker { state, sleeping: 0 } + fn new(state: &'a State) -> Self { + Self { state, sleeping: 0 } } /// Moves the ticker into sleeping and unnotified state. @@ -971,10 +970,10 @@ struct Runner<'a> { ticks: usize, } -impl Runner<'_> { +impl<'a> Runner<'a> { /// Creates a runner and registers it in the executor state. - fn new(state: &State) -> Runner<'_> { - let runner = Runner { + fn new(state: &'a State) -> Self { + let runner = Self { state, ticker: Ticker::new(state), local: Arc::new(ConcurrentQueue::bounded(512)), From 150ca2238b72d3b2461ebbbce3340de4be7cfa4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Kr=C3=B6ning?= Date: Thu, 24 Jul 2025 18:10:24 +0200 Subject: [PATCH 108/110] refactor: import items from core and alloc if possible --- src/lib.rs | 32 ++++++++++++++++++-------------- src/static_executors.rs | 17 +++++++++-------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index dbe33d6..89f95d3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,14 +40,18 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![allow(clippy::unused_unit)] // false positive fixed in Rust 1.89 -use std::fmt; -use std::marker::PhantomData; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::pin::Pin; -use std::rc::Rc; -use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; -use std::sync::{Arc, Mutex, MutexGuard, PoisonError, RwLock, TryLockError}; -use std::task::{Context, Poll, Waker}; +extern crate alloc; + +use alloc::rc::Rc; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::fmt; +use core::marker::PhantomData; +use core::panic::{RefUnwindSafe, UnwindSafe}; +use core::pin::Pin; +use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use core::task::{Context, Poll, Waker}; +use std::sync::{Mutex, MutexGuard, PoisonError, RwLock, TryLockError}; use async_task::{Builder, Runnable}; use concurrent_queue::ConcurrentQueue; @@ -90,10 +94,10 @@ pub use static_executors::*; /// ``` pub struct Executor<'a> { /// The executor state. - pub(crate) state: AtomicPtr, + state: AtomicPtr, /// Makes the `'a` lifetime invariant. - _marker: PhantomData>, + _marker: PhantomData>, } // SAFETY: Executor stores no thread local state that can be accessed via other thread. @@ -122,7 +126,7 @@ impl<'a> Executor<'a> { /// ``` pub const fn new() -> Self { Self { - state: AtomicPtr::new(std::ptr::null_mut()), + state: AtomicPtr::new(core::ptr::null_mut()), _marker: PhantomData, } } @@ -186,7 +190,7 @@ impl<'a> Executor<'a> { /// ``` /// use async_executor::Executor; /// use futures_lite::{stream, prelude::*}; - /// use std::future::ready; + /// use core::future::ready; /// /// # futures_lite::future::block_on(async { /// let mut ex = Executor::new(); @@ -367,7 +371,7 @@ impl<'a> Executor<'a> { let state = Arc::new(State::new()); let ptr = Arc::into_raw(state).cast_mut(); if let Err(actual) = atomic_ptr.compare_exchange( - std::ptr::null_mut(), + core::ptr::null_mut(), ptr, Ordering::AcqRel, Ordering::Acquire, @@ -527,7 +531,7 @@ impl<'a> LocalExecutor<'a> { /// ``` /// use async_executor::LocalExecutor; /// use futures_lite::{stream, prelude::*}; - /// use std::future::ready; + /// use core::future::ready; /// /// # futures_lite::future::block_on(async { /// let mut ex = LocalExecutor::new(); diff --git a/src/static_executors.rs b/src/static_executors.rs index 2ad0403..18353d9 100644 --- a/src/static_executors.rs +++ b/src/static_executors.rs @@ -1,14 +1,15 @@ use crate::{debug_state, Executor, LocalExecutor, State}; use async_task::{Builder, Runnable, Task}; -use slab::Slab; -use std::{ +use core::{ cell::UnsafeCell, fmt, future::Future, marker::PhantomData, panic::{RefUnwindSafe, UnwindSafe}, - sync::{atomic::Ordering, PoisonError}, + sync::atomic::Ordering, }; +use slab::Slab; +use std::sync::PoisonError; impl Executor<'static> { /// Consumes the [`Executor`] and intentionally leaks it. @@ -46,7 +47,7 @@ impl Executor<'static> { unsafe { &*ptr } }; - std::mem::forget(self); + core::mem::forget(self); let mut active = state.active.lock().unwrap_or_else(PoisonError::into_inner); if !active.is_empty() { @@ -60,7 +61,7 @@ impl Executor<'static> { // SAFETY: StaticExecutor has the same memory layout as State as it's repr(transparent). // The lifetime is not altered: 'static -> 'static. - let static_executor: &'static StaticExecutor = unsafe { std::mem::transmute(state) }; + let static_executor: &'static StaticExecutor = unsafe { core::mem::transmute(state) }; static_executor } } @@ -101,7 +102,7 @@ impl LocalExecutor<'static> { unsafe { &*ptr } }; - std::mem::forget(self); + core::mem::forget(self); let mut active = state.active.lock().unwrap_or_else(PoisonError::into_inner); if !active.is_empty() { @@ -115,7 +116,7 @@ impl LocalExecutor<'static> { // SAFETY: StaticLocalExecutor has the same memory layout as State as it's repr(transparent). // The lifetime is not altered: 'static -> 'static. - let static_executor: &'static StaticLocalExecutor = unsafe { std::mem::transmute(state) }; + let static_executor: &'static StaticLocalExecutor = unsafe { core::mem::transmute(state) }; static_executor } } @@ -133,7 +134,7 @@ impl LocalExecutor<'static> { /// [`StaticExecutor::run`] will cause the all spawned tasks to permanently leak. Any /// tasks at the time will not be cancelled. /// -/// [`static`]: https://doc.rust-lang.org/std/keyword.static.html +/// [`static`]: https://doc.rust-lang.org/core/keyword.static.html #[repr(transparent)] pub struct StaticExecutor { state: State, From 071235715a5b69dd87bdffded3fcfd4b566915fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=88=CE=BB=CE=BB=CE=B5=CE=BD=20=CE=95=CE=BC=CE=AF=CE=BB?= =?UTF-8?q?=CE=B9=CE=B1=20=CE=86=CE=BD=CE=BD=CE=B1=20Zscheile?= Date: Wed, 21 Jan 2026 18:27:44 +0100 Subject: [PATCH 109/110] chore: add `use Box` (for no-std compatibility) --- src/static_executors.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/static_executors.rs b/src/static_executors.rs index 18353d9..3f9ec89 100644 --- a/src/static_executors.rs +++ b/src/static_executors.rs @@ -1,4 +1,5 @@ use crate::{debug_state, Executor, LocalExecutor, State}; +use alloc::boxed::Box; use async_task::{Builder, Runnable, Task}; use core::{ cell::UnsafeCell, From 543403e773ec4941a6272e29c2ff649bfd01a4f8 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 20 Jan 2026 21:33:21 +0900 Subject: [PATCH 110/110] Release 1.14.0 --- CHANGELOG.md | 8 ++++++++ Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89311fb..94cb58d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +# Version 1.14.0 + +- Bump MSRV to 1.65. (#146) +- Fix docs.rs build. (#152) +- Upstreaming parts of the Hermit `no_std` patchset: + - Use `Self` where possible (#155) + - Import items from `core` and `alloc` if possible (#160) + # Version 1.13.3 - Avoid places where the code had a possibility to block or panic. (#147) diff --git a/Cargo.toml b/Cargo.toml index 4fb566f..827438e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "async-executor" # When publishing a new version: # - Update CHANGELOG.md # - Create "v1.x.y" git tag -version = "1.13.3" +version = "1.14.0" authors = ["Stjepan Glavina ", "John Nunley "] edition = "2021" rust-version = "1.65"