From 063813998d927e132a36387729e242ef12609fdc Mon Sep 17 00:00:00 2001 From: sladynnunes Date: Fri, 30 Jan 2026 01:29:30 -0800 Subject: [PATCH 1/8] Fix StorageDead and ForLint drops in tail calls and unwind paths This commit fixes several issues related to StorageDead and ForLint drops: 1. Add StorageDead and ForLint drops to unwind_drops for all functions - Updated diverge_cleanup_target to include StorageDead and ForLint drops in the unwind_drops tree for all functions (not just coroutines), but only when there's a cleanup path (i.e., when there are Value or ForLint drops) - This ensures proper drop ordering for borrow-checking on panic paths 2. Fix break_for_tail_call to handle StorageDead and ForLint drops - Don't skip StorageDead drops for non-drop types - Adjust unwind_to pointer for StorageDead and ForLint drops, matching the behavior in build_scope_drops - Only adjust unwind_to when it's valid (not DropIdx::MAX) - This prevents debug assert failures when processing drops in tail calls 3. Fix index out of bounds panic when unwind_to is DropIdx::MAX - Added checks to ensure unwind_to != DropIdx::MAX before accessing unwind_drops.drop_nodes[unwind_to] - Only emit StorageDead on unwind paths when there's actually an unwind path - Only add entry points to unwind_drops when unwind_to is valid - This prevents panics when there's no cleanup needed 4. Add test for explicit tail calls with StorageDead drops - Tests that tail calls work correctly when StorageDead and ForLint drops are present in the unwind path - Verifies that unwind_to is correctly adjusted for all drop kinds These changes make the borrow-checker stricter and more consistent by ensuring that StorageDead statements are emitted on unwind paths for all functions when there's a cleanup path, allowing unsafe code to rely on drop order being enforced consistently. --- compiler/rustc_mir_build/src/builder/scope.rs | 142 +++++++++++------- .../tail-call-storage-dead-unwind.rs | 69 +++++++++ 2 files changed, 158 insertions(+), 53 deletions(-) create mode 100644 tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index b10df60e0f75b..dc4feeab6d48e 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -256,16 +256,8 @@ struct DropNodeKey { impl Scope { /// Whether there's anything to do for the cleanup path, that is, - /// when unwinding through this scope. This includes destructors, - /// but not StorageDead statements, which don't get emitted at all - /// for unwinding, for several reasons: - /// * clang doesn't emit llvm.lifetime.end for C++ unwinding - /// * LLVM's memory dependency analysis can't handle it atm - /// * polluting the cleanup MIR with StorageDead creates - /// landing pads even though there's no actual destructors - /// * freeing up stack space has no effect during unwinding - /// Note that for coroutines we do emit StorageDeads, for the - /// use of optimizations in the MIR coroutine transform. + /// when unwinding through this scope. This includes destructors + /// and StorageDead statements to maintain proper drop ordering. fn needs_cleanup(&self) -> bool { self.drops.iter().any(|drop| match drop.kind { DropKind::Value | DropKind::ForLint => true, @@ -1124,6 +1116,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); let typing_env = self.typing_env(); let unwind_drops = &mut self.scopes.unwind_drops; + let has_storage_drops = self.scopes.scopes[1..] + .iter() + .any(|scope| scope.drops.iter().any(|d| d.kind == DropKind::Storage)); // the innermost scope contains only the destructors for the tail call arguments // we only want to drop these in case of a panic, so we skip it @@ -1133,7 +1128,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let source_info = drop_data.source_info; let local = drop_data.local; - if !self.local_decls[local].ty.needs_drop(self.tcx, typing_env) { + // Skip Value drops for types that don't need drop, but process + // StorageDead and ForLint drops for all locals + if drop_data.kind == DropKind::Value + && !self.local_decls[local].ty.needs_drop(self.tcx, typing_env) + { continue; } @@ -1142,15 +1141,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // `unwind_to` should drop the value that we're about to // schedule. If dropping this value panics, then we continue // with the *next* value on the unwind path. - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.local, - drop_data.local - ); - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.kind, - drop_data.kind - ); - unwind_to = unwind_drops.drop_nodes[unwind_to].next; + if unwind_to != DropIdx::MAX { + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.local, + drop_data.local + ); + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.kind, + drop_data.kind + ); + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } let mut unwind_entry_point = unwind_to; @@ -1177,6 +1178,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block = next; } DropKind::ForLint => { + // If this ForLint drop is in unwind_drops, we need to adjust + // unwind_to to match, just like in build_scope_drops + if has_storage_drops && unwind_to != DropIdx::MAX { + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.local, + drop_data.local + ); + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.kind, + drop_data.kind + ); + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } self.cfg.push( block, Statement::new( @@ -1189,6 +1203,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); } DropKind::Storage => { + // If this StorageDead drop is in unwind_drops, we need to adjust + // unwind_to to match, just like in build_scope_drops + if has_storage_drops && unwind_to != DropIdx::MAX { + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.local, + drop_data.local + ); + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.kind, + drop_data.kind + ); + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } // Only temps and vars need their storage dead. assert!(local.index() > self.arg_count); self.cfg.push( @@ -1232,6 +1259,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let dropline_to = if has_async_drops { Some(self.diverge_dropline()) } else { None }; let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes"); let typing_env = self.typing_env(); + let has_storage_drops = scope.drops.iter().any(|d| d.kind == DropKind::Storage); + // Only emit StorageDead on unwind paths when there's actually an unwind path + let storage_dead_on_unwind = has_storage_drops && unwind_to != DropIdx::MAX; build_scope_drops( &mut self.cfg, &mut self.scopes.unwind_drops, @@ -1240,7 +1270,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block, unwind_to, dropline_to, - is_coroutine && needs_cleanup, + storage_dead_on_unwind, self.arg_count, |v: Local| Self::is_async_drop_impl(self.tcx, &self.local_decls, typing_env, v), ) @@ -1624,9 +1654,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } let is_coroutine = self.coroutine.is_some(); + // Check if there's a cleanup path (i.e., Value or ForLint drops that require cleanup) + let has_cleanup_path = self.scopes.scopes[uncached_scope..=target] + .iter() + .any(|scope| scope.drops.iter().any(|d| d.kind == DropKind::Value || d.kind == DropKind::ForLint)); for scope in &mut self.scopes.scopes[uncached_scope..=target] { for drop in &scope.drops { - if is_coroutine || drop.kind == DropKind::Value { + // Add all drops to unwind_drops for all functions (not just coroutines) + // to maintain proper drop ordering for borrow-checking. This matches + // the behavior in build_exit_tree. + // For coroutines, we always add all drops. For other functions, we now + // also add StorageDead and ForLint drops, but only when there's a cleanup path. + if is_coroutine || drop.kind == DropKind::Value || drop.kind == DropKind::ForLint || (drop.kind == DropKind::Storage && has_cleanup_path) { cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); } } @@ -1808,11 +1847,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// * `scope`, describes the drops that will occur on exiting the scope in regular execution /// * `block`, the block to branch to once drops are complete (assuming no unwind occurs) /// * `unwind_to`, describes the drops that would occur at this point in the code if a -/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other -/// instructions on unwinding) +/// panic occurred (a subset of the drops in `scope`) /// * `dropline_to`, describes the drops that would occur at this point in the code if a /// coroutine drop occurred. -/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding +/// * `storage_dead_on_unwind`, if true, then we emit `StorageDead` on the unwind path +/// and adjust `unwind_to` accordingly (used for all functions with StorageDead drops) /// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those) fn build_scope_drops<'tcx, F>( cfg: &mut CFG<'tcx>, @@ -1845,10 +1884,10 @@ where // drops panic (panicking while unwinding will abort, so there's no need for // another set of arrows). // - // For coroutines, we unwind from a drop on a local to its StorageDead - // statement. For other functions we don't worry about StorageDead. The - // drops for the unwind path should have already been generated by - // `diverge_cleanup_gen`. + // We unwind from a drop on a local to its StorageDead statement for all + // functions (not just coroutines) to maintain proper drop ordering for + // borrow-checking. The drops for the unwind path should have already been + // generated by `diverge_cleanup_gen`. // `unwind_to` indicates what needs to be dropped should unwinding occur. // This is a subset of what needs to be dropped when exiting the scope. @@ -1877,9 +1916,11 @@ where // // We adjust this BEFORE we create the drop (e.g., `drops[n]`) // because `drops[n]` should unwind to `drops[n-1]`. - debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local); - debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind); - unwind_to = unwind_drops.drop_nodes[unwind_to].next; + if unwind_to != DropIdx::MAX { + debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local); + debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind); + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } if let Some(idx) = dropline_to { debug_assert_eq!(coroutine_drops.drop_nodes[idx].data.local, drop_data.local); @@ -1895,7 +1936,9 @@ where continue; } - unwind_drops.add_entry_point(block, unwind_to); + if unwind_to != DropIdx::MAX { + unwind_drops.add_entry_point(block, unwind_to); + } if let Some(to) = dropline_to && is_async_drop(local) { @@ -1919,11 +1962,9 @@ where } DropKind::ForLint => { // As in the `DropKind::Storage` case below: - // normally lint-related drops are not emitted for unwind, - // so we can just leave `unwind_to` unmodified, but in some - // cases we emit things ALSO on the unwind path, so we need to adjust - // `unwind_to` in that case. - if storage_dead_on_unwind { + // we emit lint-related drops on the unwind path when `storage_dead_on_unwind` + // is true, so we need to adjust `unwind_to` in that case. + if storage_dead_on_unwind && unwind_to != DropIdx::MAX { debug_assert_eq!( unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local @@ -1952,12 +1993,11 @@ where ); } DropKind::Storage => { - // Ordinarily, storage-dead nodes are not emitted on unwind, so we don't - // need to adjust `unwind_to` on this path. However, in some specific cases - // we *do* emit storage-dead nodes on the unwind path, and in that case now that - // the storage-dead has completed, we need to adjust the `unwind_to` pointer - // so that any future drops we emit will not register storage-dead. - if storage_dead_on_unwind { + // We emit storage-dead nodes on the unwind path for borrow-checking + // purposes. When `storage_dead_on_unwind` is true, we need to adjust + // the `unwind_to` pointer now that the storage-dead has completed, so + // that any future drops we emit will not register storage-dead. + if storage_dead_on_unwind && unwind_to != DropIdx::MAX { debug_assert_eq!( unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local @@ -2001,15 +2041,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { for (drop_idx, drop_node) in drops.drop_nodes.iter_enumerated().skip(1) { match drop_node.data.kind { DropKind::Storage | DropKind::ForLint => { - if is_coroutine { - let unwind_drop = self - .scopes - .unwind_drops - .add_drop(drop_node.data, unwind_indices[drop_node.next]); - unwind_indices.push(unwind_drop); - } else { - unwind_indices.push(unwind_indices[drop_node.next]); - } + let unwind_drop = self + .scopes + .unwind_drops + .add_drop(drop_node.data, unwind_indices[drop_node.next]); + unwind_indices.push(unwind_drop); } DropKind::Value => { let unwind_drop = self diff --git a/tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs b/tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs new file mode 100644 index 0000000000000..6fe9eaa546df0 --- /dev/null +++ b/tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs @@ -0,0 +1,69 @@ +//@ run-pass +//@ ignore-backends: gcc +#![expect(incomplete_features)] +#![feature(explicit_tail_calls)] + +// Test that explicit tail calls work correctly when there are StorageDead +// and ForLint drops in the unwind path. This ensures that `unwind_to` is +// correctly adjusted in `break_for_tail_call` when encountering StorageDead +// and ForLint drops, matching the behavior in `build_scope_drops`. + +#[allow(dead_code)] +struct Droppable(i32); + +impl Drop for Droppable { + fn drop(&mut self) {} +} + +fn tail_call_with_storage_dead() { + // These will have StorageDead drops (non-drop types) + let _a = 42i32; + let _b = true; + let _c = 10u8; + + // This will have a Value drop (drop type) + let _d = Droppable(1); + + // Tail call - if unwind_to isn't adjusted for StorageDead drops, + // the debug assert will fail when processing the Value drop + become next_function(); +} + +fn tail_call_with_mixed_drops() { + // StorageDead drop + let _storage = 100i32; + + // Value drop + let _value = Droppable(2); + + // Another StorageDead drop + let _storage2 = 200i32; + + // Another Value drop + let _value2 = Droppable(3); + + // Tail call - tests that unwind_to is adjusted correctly + // for both StorageDead and Value drops in sequence + become next_function(); +} + +fn tail_call_with_storage_before_value() { + // Multiple StorageDead drops before a Value drop + let _s1 = 1i32; + let _s2 = 2i32; + let _s3 = 3i32; + + // Value drop - if StorageDead drops aren't handled, + // unwind_to will point to wrong node and assert fails + let _v = Droppable(4); + + become next_function(); +} + +fn next_function() {} + +fn main() { + tail_call_with_storage_dead(); + tail_call_with_mixed_drops(); + tail_call_with_storage_before_value(); +} From 7d9195eecdbd932f35a3829f16a1fcbbced34417 Mon Sep 17 00:00:00 2001 From: sladynnunes Date: Sat, 31 Jan 2026 15:41:35 -0800 Subject: [PATCH 2/8] Fix StorageDead on unwind paths and add MIR transform to remove them - Add StorageDead to unwind paths for all functions (not just coroutines) - Modify CleanupPostBorrowck to remove StorageDead from cleanup blocks - Add tests for the fix and StorageDead removal --- compiler/rustc_mir_build/src/builder/scope.rs | 74 ++++++++++++++----- .../src/cleanup_post_borrowck.rs | 8 ++ ...0}.coroutine_drop_async.0.panic-unwind.mir | 1 - ...ignment.main.SimplifyCfg-initial.after.mir | 5 ++ ...n_conditional.test_complex.built.after.mir | 33 +++++++-- ..._or_in_conditional.test_or.built.after.mir | 17 ++++- ...nst_array_len.built.after.panic-unwind.mir | 62 +++++++++++----- .../building/storage_dead_unwind_path.rs | 29 ++++++++ ...rage_dead_unwind_path.test.built.after.mir | 48 ++++++++++++ ...osure#0}.coroutine_drop.0.panic-unwind.mir | 1 - ...#0}.StateTransform.before.panic-unwind.mir | 33 +++------ tests/mir-opt/issue_91633.bar.built.after.mir | 2 + tests/mir-opt/issue_91633.foo.built.after.mir | 13 +++- ...fg-initial.after-ElaborateDrops.after.diff | 30 +++++--- .../storage_dead_removed_after_borrowck.rs | 31 ++++++++ ...orrowck.test.CleanupPostBorrowck.after.mir | 48 ++++++++++++ ...emoved_after_borrowck.test.built.after.mir | 48 ++++++++++++ ...l_drops.f.ElaborateDrops.panic-unwind.diff | 1 + ..._call_drops.f.built.after.panic-unwind.mir | 13 +++- ..._with_arg.ElaborateDrops.panic-unwind.diff | 1 + ...ps.f_with_arg.built.after.panic-unwind.mir | 61 +++++++++------ tests/ui/borrowck/storage-dead-unwind-path.rs | 27 +++++++ .../tail-call-storage-dead-unwind.rs | 69 ----------------- tests/ui/issues/issue-147875.rs | 24 ++++++ 24 files changed, 504 insertions(+), 175 deletions(-) create mode 100644 tests/mir-opt/building/storage_dead_unwind_path.rs create mode 100644 tests/mir-opt/building/storage_dead_unwind_path.test.built.after.mir create mode 100644 tests/mir-opt/storage_dead_removed_after_borrowck.rs create mode 100644 tests/mir-opt/storage_dead_removed_after_borrowck.test.CleanupPostBorrowck.after.mir create mode 100644 tests/mir-opt/storage_dead_removed_after_borrowck.test.built.after.mir create mode 100644 tests/ui/borrowck/storage-dead-unwind-path.rs delete mode 100644 tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs create mode 100644 tests/ui/issues/issue-147875.rs diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index dc4feeab6d48e..f392e9459f681 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -257,7 +257,9 @@ struct DropNodeKey { impl Scope { /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors - /// and StorageDead statements to maintain proper drop ordering. + /// (Value and ForLint drops). StorageDead drops are not included + /// here because they don't require cleanup blocks - they're only + /// needed for borrow-checking and are removed before codegen. fn needs_cleanup(&self) -> bool { self.drops.iter().any(|drop| match drop.kind { DropKind::Value | DropKind::ForLint => true, @@ -1655,17 +1657,36 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let is_coroutine = self.coroutine.is_some(); // Check if there's a cleanup path (i.e., Value or ForLint drops that require cleanup) - let has_cleanup_path = self.scopes.scopes[uncached_scope..=target] - .iter() - .any(|scope| scope.drops.iter().any(|d| d.kind == DropKind::Value || d.kind == DropKind::ForLint)); + let has_cleanup_path = self.scopes.scopes[uncached_scope..=target].iter().any(|scope| { + scope.drops.iter().any(|d| d.kind == DropKind::Value || d.kind == DropKind::ForLint) + }); for scope in &mut self.scopes.scopes[uncached_scope..=target] { for drop in &scope.drops { - // Add all drops to unwind_drops for all functions (not just coroutines) - // to maintain proper drop ordering for borrow-checking. This matches - // the behavior in build_exit_tree. - // For coroutines, we always add all drops. For other functions, we now - // also add StorageDead and ForLint drops, but only when there's a cleanup path. - if is_coroutine || drop.kind == DropKind::Value || drop.kind == DropKind::ForLint || (drop.kind == DropKind::Storage && has_cleanup_path) { + // We add drops to unwind_drops to ensure the borrow-checker treats locals as dead + // at the same point on all paths (normal and unwind). This is for static semantics + // (borrow-checking), not runtime drop order. + // + // StorageDead drops are only needed when there are Value or ForLint drops present, + // because: + // 1. StorageDead is only relevant for borrow-checking when there are destructors + // that might reference the dead variable + // 2. If there are no drops, there's no unwind path to emit StorageDead on + // 3. StorageDead on unwind paths ensures the borrow-checker correctly tracks + // when variables are dead for the purpose of checking whether they might be + // referenced by destructors + // + // These StorageDead statements are removed by the `RemoveStorageMarkers` MIR + // transform pass before codegen, so they don't affect LLVM output. They only + // affect static analysis (borrow-checking). + // + // For coroutines, we always add all drops. For other functions, we add: + // - Value and ForLint drops (always, as they require cleanup) + // - StorageDead drops (only when there's a cleanup path, for borrow-checking) + if is_coroutine + || drop.kind == DropKind::Value + || drop.kind == DropKind::ForLint + || (drop.kind == DropKind::Storage && has_cleanup_path) + { cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); } } @@ -1885,9 +1906,13 @@ where // another set of arrows). // // We unwind from a drop on a local to its StorageDead statement for all - // functions (not just coroutines) to maintain proper drop ordering for - // borrow-checking. The drops for the unwind path should have already been - // generated by `diverge_cleanup_gen`. + // functions (not just coroutines) to ensure the borrow-checker treats locals + // as dead at the same point on all paths. This is for static semantics + // (borrow-checking), not runtime drop order. The drops for the unwind path + // should have already been generated by `diverge_cleanup_gen`. + // + // Note: StorageDead statements are removed by the `RemoveStorageMarkers` MIR + // transform pass before codegen, so they don't affect LLVM output. // `unwind_to` indicates what needs to be dropped should unwinding occur. // This is a subset of what needs to be dropped when exiting the scope. @@ -1917,7 +1942,10 @@ where // We adjust this BEFORE we create the drop (e.g., `drops[n]`) // because `drops[n]` should unwind to `drops[n-1]`. if unwind_to != DropIdx::MAX { - debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local); + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.local, + drop_data.local + ); debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind); unwind_to = unwind_drops.drop_nodes[unwind_to].next; } @@ -1993,10 +2021,20 @@ where ); } DropKind::Storage => { - // We emit storage-dead nodes on the unwind path for borrow-checking - // purposes. When `storage_dead_on_unwind` is true, we need to adjust - // the `unwind_to` pointer now that the storage-dead has completed, so - // that any future drops we emit will not register storage-dead. + // We emit StorageDead statements on the unwind path to ensure the borrow-checker + // treats locals as dead at the same point on all paths. This is for static semantics + // (borrow-checking), not runtime drop order. StorageDead is only needed when there + // are Value or ForLint drops present, because: + // 1. StorageDead is only relevant for borrow-checking when there are destructors + // that might reference the dead variable + // 2. If there are no drops, there's no unwind path to emit StorageDead on + // + // These StorageDead statements are removed by the `RemoveStorageMarkers` MIR + // transform pass before codegen, so they don't affect LLVM output. + // + // When `storage_dead_on_unwind` is true, we need to adjust the `unwind_to` pointer + // now that the storage-dead has completed, so that any future drops we emit will + // not register storage-dead. if storage_dead_on_unwind && unwind_to != DropIdx::MAX { debug_assert_eq!( unwind_drops.drop_nodes[unwind_to].data.local, diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs index e6f56b509af63..80457e6c1e34e 100644 --- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs +++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs @@ -6,6 +6,8 @@ //! - [`FakeRead`] //! - [`Assign`] statements with a [`Fake`] borrow //! - [`Coverage`] statements of kind [`BlockMarker`] or [`SpanMarker`] +//! - [`StorageDead`] statements (these are only needed for borrow-checking and are removed +//! after borrowck completes to ensure they don't affect later optimization passes or codegen) //! //! [`AscribeUserType`]: rustc_middle::mir::StatementKind::AscribeUserType //! [`Assign`]: rustc_middle::mir::StatementKind::Assign @@ -15,6 +17,7 @@ //! [`Coverage`]: rustc_middle::mir::StatementKind::Coverage //! [`BlockMarker`]: rustc_middle::mir::coverage::CoverageKind::BlockMarker //! [`SpanMarker`]: rustc_middle::mir::coverage::CoverageKind::SpanMarker +//! [`StorageDead`]: rustc_middle::mir::StatementKind::StorageDead use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::mir::*; @@ -28,6 +31,10 @@ impl<'tcx> crate::MirPass<'tcx> for CleanupPostBorrowck { // Manually invalidate CFG caches if we actually change a terminator's edges. let mut invalidate_cfg = false; for basic_block in body.basic_blocks.as_mut_preserves_cfg().iter_mut() { + // Only remove StorageDead from cleanup blocks (unwind paths). + // StorageDead on normal paths is still needed for MIR validation + // and will be removed later by RemoveStorageMarkers during optimization. + let is_cleanup = basic_block.is_cleanup; for statement in basic_block.statements.iter_mut() { match statement.kind { StatementKind::AscribeUserType(..) @@ -41,6 +48,7 @@ impl<'tcx> crate::MirPass<'tcx> for CleanupPostBorrowck { | StatementKind::BackwardIncompatibleDropHint { .. } => { statement.make_nop(true) } + StatementKind::StorageDead(..) if is_cleanup => statement.make_nop(true), StatementKind::Assign(box ( _, Rvalue::Cast( diff --git a/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir b/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir index 1dc1d08136290..3d47e948a8da6 100644 --- a/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir +++ b/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir @@ -44,7 +44,6 @@ fn a::{closure#0}(_1: Pin<&mut {async fn body of a()}>, _2: &mut Context<'_>) } bb3 (cleanup): { - nop; nop; goto -> bb5; } diff --git a/tests/mir-opt/basic_assignment.main.SimplifyCfg-initial.after.mir b/tests/mir-opt/basic_assignment.main.SimplifyCfg-initial.after.mir index aa7d75242b408..4943fb2ccb099 100644 --- a/tests/mir-opt/basic_assignment.main.SimplifyCfg-initial.after.mir +++ b/tests/mir-opt/basic_assignment.main.SimplifyCfg-initial.after.mir @@ -73,14 +73,19 @@ fn main() -> () { } bb6 (cleanup): { + StorageDead(_6); drop(_5) -> [return: bb7, unwind terminate(cleanup)]; } bb7 (cleanup): { + StorageDead(_5); drop(_4) -> [return: bb8, unwind terminate(cleanup)]; } bb8 (cleanup): { + StorageDead(_4); + StorageDead(_2); + StorageDead(_1); resume; } } diff --git a/tests/mir-opt/building/logical_or_in_conditional.test_complex.built.after.mir b/tests/mir-opt/building/logical_or_in_conditional.test_complex.built.after.mir index 327b3b618a03a..357f97cf4ab51 100644 --- a/tests/mir-opt/building/logical_or_in_conditional.test_complex.built.after.mir +++ b/tests/mir-opt/building/logical_or_in_conditional.test_complex.built.after.mir @@ -19,7 +19,7 @@ fn test_complex() -> () { bb0: { StorageLive(_1); StorageLive(_2); - _2 = E::f() -> [return: bb1, unwind: bb35]; + _2 = E::f() -> [return: bb1, unwind: bb38]; } bb1: { @@ -42,7 +42,7 @@ fn test_complex() -> () { bb5: { StorageLive(_4); - _4 = always_true() -> [return: bb6, unwind: bb35]; + _4 = always_true() -> [return: bb6, unwind: bb38]; } bb6: { @@ -64,7 +64,7 @@ fn test_complex() -> () { } bb9: { - drop(_7) -> [return: bb11, unwind: bb35]; + drop(_7) -> [return: bb11, unwind: bb37]; } bb10: { @@ -78,7 +78,7 @@ fn test_complex() -> () { } bb12: { - drop(_7) -> [return: bb13, unwind: bb35]; + drop(_7) -> [return: bb13, unwind: bb37]; } bb13: { @@ -98,7 +98,7 @@ fn test_complex() -> () { } bb15: { - drop(_10) -> [return: bb17, unwind: bb35]; + drop(_10) -> [return: bb17, unwind: bb36]; } bb16: { @@ -139,7 +139,7 @@ fn test_complex() -> () { StorageDead(_4); StorageDead(_1); StorageLive(_11); - _11 = always_true() -> [return: bb23, unwind: bb35]; + _11 = always_true() -> [return: bb23, unwind: bb38]; } bb23: { @@ -156,7 +156,7 @@ fn test_complex() -> () { bb26: { StorageLive(_12); - _12 = E::f() -> [return: bb27, unwind: bb35]; + _12 = E::f() -> [return: bb27, unwind: bb38]; } bb27: { @@ -199,6 +199,25 @@ fn test_complex() -> () { } bb35 (cleanup): { + StorageDead(_10); + StorageDead(_9); + StorageDead(_2); + goto -> bb38; + } + + bb36 (cleanup): { + StorageDead(_10); + StorageDead(_9); + goto -> bb38; + } + + bb37 (cleanup): { + StorageDead(_7); + StorageDead(_6); + goto -> bb38; + } + + bb38 (cleanup): { resume; } } diff --git a/tests/mir-opt/building/logical_or_in_conditional.test_or.built.after.mir b/tests/mir-opt/building/logical_or_in_conditional.test_or.built.after.mir index 2fc19e7e0fdcd..7aa8dd0a147b7 100644 --- a/tests/mir-opt/building/logical_or_in_conditional.test_or.built.after.mir +++ b/tests/mir-opt/building/logical_or_in_conditional.test_or.built.after.mir @@ -20,7 +20,7 @@ fn test_or() -> () { } bb1: { - drop(_3) -> [return: bb3, unwind: bb13]; + drop(_3) -> [return: bb3, unwind: bb14]; } bb2: { @@ -34,7 +34,7 @@ fn test_or() -> () { } bb4: { - drop(_3) -> [return: bb5, unwind: bb13]; + drop(_3) -> [return: bb5, unwind: bb14]; } bb5: { @@ -86,6 +86,19 @@ fn test_or() -> () { } bb13 (cleanup): { + StorageDead(_6); + StorageDead(_5); + goto -> bb15; + } + + bb14 (cleanup): { + StorageDead(_3); + StorageDead(_2); + goto -> bb15; + } + + bb15 (cleanup): { + StorageDead(_1); resume; } } diff --git a/tests/mir-opt/building/match/array_len.const_array_len.built.after.panic-unwind.mir b/tests/mir-opt/building/match/array_len.const_array_len.built.after.panic-unwind.mir index 8d16f074b1e34..d0c78d8ccdd4f 100644 --- a/tests/mir-opt/building/match/array_len.const_array_len.built.after.panic-unwind.mir +++ b/tests/mir-opt/building/match/array_len.const_array_len.built.after.panic-unwind.mir @@ -43,7 +43,7 @@ fn const_array_len(_1: [T; 5]) -> () { StorageLive(_6); StorageLive(_7); _7 = move _2; - _6 = opaque::(move _7) -> [return: bb3, unwind: bb17]; + _6 = opaque::(move _7) -> [return: bb3, unwind: bb20]; } bb3: { @@ -52,7 +52,7 @@ fn const_array_len(_1: [T; 5]) -> () { StorageLive(_8); StorageLive(_9); _9 = move _3; - _8 = opaque::(move _9) -> [return: bb4, unwind: bb16]; + _8 = opaque::(move _9) -> [return: bb4, unwind: bb18]; } bb4: { @@ -61,7 +61,7 @@ fn const_array_len(_1: [T; 5]) -> () { StorageLive(_10); StorageLive(_11); _11 = move _4; - _10 = opaque::<[T; 2]>(move _11) -> [return: bb5, unwind: bb15]; + _10 = opaque::<[T; 2]>(move _11) -> [return: bb5, unwind: bb16]; } bb5: { @@ -77,7 +77,7 @@ fn const_array_len(_1: [T; 5]) -> () { StorageDead(_13); StorageDead(_12); _0 = const (); - drop(_5) -> [return: bb8, unwind: bb19]; + drop(_5) -> [return: bb8, unwind: bb23]; } bb7: { @@ -87,17 +87,17 @@ fn const_array_len(_1: [T; 5]) -> () { bb8: { StorageDead(_5); - drop(_4) -> [return: bb9, unwind: bb20]; + drop(_4) -> [return: bb9, unwind: bb24]; } bb9: { StorageDead(_4); - drop(_3) -> [return: bb10, unwind: bb21]; + drop(_3) -> [return: bb10, unwind: bb25]; } bb10: { StorageDead(_3); - drop(_2) -> [return: bb11, unwind: bb22]; + drop(_2) -> [return: bb11, unwind: bb26]; } bb11: { @@ -106,7 +106,7 @@ fn const_array_len(_1: [T; 5]) -> () { } bb12: { - drop(_1) -> [return: bb13, unwind: bb23]; + drop(_1) -> [return: bb13, unwind: bb27]; } bb13: { @@ -114,42 +114,70 @@ fn const_array_len(_1: [T; 5]) -> () { } bb14 (cleanup): { - drop(_13) -> [return: bb18, unwind terminate(cleanup)]; + drop(_13) -> [return: bb15, unwind terminate(cleanup)]; } bb15 (cleanup): { - drop(_11) -> [return: bb18, unwind terminate(cleanup)]; + StorageDead(_13); + StorageDead(_12); + goto -> bb22; } bb16 (cleanup): { - drop(_9) -> [return: bb18, unwind terminate(cleanup)]; + drop(_11) -> [return: bb17, unwind terminate(cleanup)]; } bb17 (cleanup): { - drop(_7) -> [return: bb18, unwind terminate(cleanup)]; + StorageDead(_11); + StorageDead(_10); + goto -> bb22; } bb18 (cleanup): { - drop(_5) -> [return: bb19, unwind terminate(cleanup)]; + drop(_9) -> [return: bb19, unwind terminate(cleanup)]; } bb19 (cleanup): { - drop(_4) -> [return: bb20, unwind terminate(cleanup)]; + StorageDead(_9); + StorageDead(_8); + goto -> bb22; } bb20 (cleanup): { - drop(_3) -> [return: bb21, unwind terminate(cleanup)]; + drop(_7) -> [return: bb21, unwind terminate(cleanup)]; } bb21 (cleanup): { - drop(_2) -> [return: bb22, unwind terminate(cleanup)]; + StorageDead(_7); + StorageDead(_6); + goto -> bb22; } bb22 (cleanup): { - drop(_1) -> [return: bb23, unwind terminate(cleanup)]; + drop(_5) -> [return: bb23, unwind terminate(cleanup)]; } bb23 (cleanup): { + StorageDead(_5); + drop(_4) -> [return: bb24, unwind terminate(cleanup)]; + } + + bb24 (cleanup): { + StorageDead(_4); + drop(_3) -> [return: bb25, unwind terminate(cleanup)]; + } + + bb25 (cleanup): { + StorageDead(_3); + drop(_2) -> [return: bb26, unwind terminate(cleanup)]; + } + + bb26 (cleanup): { + StorageDead(_2); + drop(_1) -> [return: bb27, unwind terminate(cleanup)]; + } + + bb27 (cleanup): { resume; } } diff --git a/tests/mir-opt/building/storage_dead_unwind_path.rs b/tests/mir-opt/building/storage_dead_unwind_path.rs new file mode 100644 index 0000000000000..db73f374efbee --- /dev/null +++ b/tests/mir-opt/building/storage_dead_unwind_path.rs @@ -0,0 +1,29 @@ +//@ compile-flags: -Zmir-opt-level=0 +// skip-filecheck +// Test that StorageDead statements are emitted on unwind paths for borrow-checking. +// This ensures the borrow-checker treats locals as dead at the same point on all paths. + +// EMIT_MIR storage_dead_unwind_path.test.built.after.mir + +struct Droppable; + +impl Drop for Droppable { + fn drop(&mut self) {} +} + +fn test() { + // StorageDead drop (non-drop type) + let x = 42i32; + // Value drop (drop type) + let y = Droppable; + // Function call that might panic - if it does, we should see StorageDead(x) in cleanup + may_panic(); +} + +fn may_panic() { + // This function might panic, triggering unwind +} + +fn main() { + test(); +} diff --git a/tests/mir-opt/building/storage_dead_unwind_path.test.built.after.mir b/tests/mir-opt/building/storage_dead_unwind_path.test.built.after.mir new file mode 100644 index 0000000000000..82e0a5343cae2 --- /dev/null +++ b/tests/mir-opt/building/storage_dead_unwind_path.test.built.after.mir @@ -0,0 +1,48 @@ +// MIR for `test` after built + +fn test() -> () { + let mut _0: (); + let _1: i32; + let _3: (); + scope 1 { + debug x => _1; + let _2: Droppable; + scope 2 { + debug y => _2; + } + } + + bb0: { + StorageLive(_1); + _1 = const 42_i32; + FakeRead(ForLet(None), _1); + StorageLive(_2); + _2 = Droppable; + FakeRead(ForLet(None), _2); + StorageLive(_3); + _3 = may_panic() -> [return: bb1, unwind: bb3]; + } + + bb1: { + StorageDead(_3); + _0 = const (); + drop(_2) -> [return: bb2, unwind: bb4]; + } + + bb2: { + StorageDead(_2); + StorageDead(_1); + return; + } + + bb3 (cleanup): { + StorageDead(_3); + drop(_2) -> [return: bb4, unwind terminate(cleanup)]; + } + + bb4 (cleanup): { + StorageDead(_2); + StorageDead(_1); + resume; + } +} diff --git a/tests/mir-opt/coroutine_drop_cleanup.main-{closure#0}.coroutine_drop.0.panic-unwind.mir b/tests/mir-opt/coroutine_drop_cleanup.main-{closure#0}.coroutine_drop.0.panic-unwind.mir index 69e7219af9ff8..e010b63ba315b 100644 --- a/tests/mir-opt/coroutine_drop_cleanup.main-{closure#0}.coroutine_drop.0.panic-unwind.mir +++ b/tests/mir-opt/coroutine_drop_cleanup.main-{closure#0}.coroutine_drop.0.panic-unwind.mir @@ -37,7 +37,6 @@ fn main::{closure#0}(_1: *mut {coroutine@$DIR/coroutine_drop_cleanup.rs:12:5: 12 } bb5 (cleanup): { - nop; goto -> bb4; } diff --git a/tests/mir-opt/coroutine_storage_dead_unwind.main-{closure#0}.StateTransform.before.panic-unwind.mir b/tests/mir-opt/coroutine_storage_dead_unwind.main-{closure#0}.StateTransform.before.panic-unwind.mir index 14e1782b86016..4baba23278e00 100644 --- a/tests/mir-opt/coroutine_storage_dead_unwind.main-{closure#0}.StateTransform.before.panic-unwind.mir +++ b/tests/mir-opt/coroutine_storage_dead_unwind.main-{closure#0}.StateTransform.before.panic-unwind.mir @@ -36,7 +36,7 @@ yields () StorageLive(_7); StorageLive(_8); _8 = move _3; - _7 = take::(move _8) -> [return: bb2, unwind: bb10]; + _7 = take::(move _8) -> [return: bb2, unwind: bb9]; } bb2: { @@ -45,7 +45,7 @@ yields () StorageLive(_9); StorageLive(_10); _10 = move _4; - _9 = take::(move _10) -> [return: bb3, unwind: bb9]; + _9 = take::(move _10) -> [return: bb3, unwind: bb10]; } bb3: { @@ -58,7 +58,7 @@ yields () bb4: { StorageDead(_3); - drop(_1) -> [return: bb5, unwind: bb14]; + drop(_1) -> [return: bb5, unwind: bb12]; } bb5: { @@ -69,12 +69,12 @@ yields () StorageDead(_6); StorageDead(_5); StorageDead(_4); - drop(_3) -> [return: bb7, unwind: bb15]; + drop(_3) -> [return: bb7, unwind: bb13]; } bb7: { StorageDead(_3); - drop(_1) -> [return: bb8, unwind: bb14]; + drop(_1) -> [return: bb8, unwind: bb12]; } bb8: { @@ -82,9 +82,7 @@ yields () } bb9 (cleanup): { - StorageDead(_10); - StorageDead(_9); - goto -> bb12; + goto -> bb10; } bb10 (cleanup): { @@ -92,27 +90,14 @@ yields () } bb11 (cleanup): { - StorageDead(_8); - StorageDead(_7); - goto -> bb12; + drop(_1) -> [return: bb12, unwind terminate(cleanup)]; } bb12 (cleanup): { - StorageDead(_4); - goto -> bb13; - } - - bb13 (cleanup): { - StorageDead(_3); - drop(_1) -> [return: bb14, unwind terminate(cleanup)]; - } - - bb14 (cleanup): { resume; } - bb15 (cleanup): { - StorageDead(_3); - drop(_1) -> [return: bb14, unwind terminate(cleanup)]; + bb13 (cleanup): { + drop(_1) -> [return: bb12, unwind terminate(cleanup)]; } } diff --git a/tests/mir-opt/issue_91633.bar.built.after.mir b/tests/mir-opt/issue_91633.bar.built.after.mir index 53829588a1b36..0cb9da79b5a0f 100644 --- a/tests/mir-opt/issue_91633.bar.built.after.mir +++ b/tests/mir-opt/issue_91633.bar.built.after.mir @@ -33,6 +33,8 @@ fn bar(_1: Box<[T]>) -> () { } bb4 (cleanup): { + StorageDead(_3); + StorageDead(_2); drop(_1) -> [return: bb5, unwind terminate(cleanup)]; } diff --git a/tests/mir-opt/issue_91633.foo.built.after.mir b/tests/mir-opt/issue_91633.foo.built.after.mir index 53f48350596ee..d003419f2c8ec 100644 --- a/tests/mir-opt/issue_91633.foo.built.after.mir +++ b/tests/mir-opt/issue_91633.foo.built.after.mir @@ -34,12 +34,12 @@ fn foo(_1: Box<[T]>) -> T { FakeRead(ForLet(None), _2); StorageDead(_4); _0 = move _2; - drop(_2) -> [return: bb3, unwind: bb5]; + drop(_2) -> [return: bb3, unwind: bb6]; } bb3: { StorageDead(_2); - drop(_1) -> [return: bb4, unwind: bb6]; + drop(_1) -> [return: bb4, unwind: bb7]; } bb4: { @@ -47,10 +47,17 @@ fn foo(_1: Box<[T]>) -> T { } bb5 (cleanup): { - drop(_1) -> [return: bb6, unwind terminate(cleanup)]; + StorageDead(_3); + StorageDead(_4); + goto -> bb6; } bb6 (cleanup): { + StorageDead(_2); + drop(_1) -> [return: bb7, unwind terminate(cleanup)]; + } + + bb7 (cleanup): { resume; } } diff --git a/tests/mir-opt/match_arm_scopes.complicated_match.panic-unwind.SimplifyCfg-initial.after-ElaborateDrops.after.diff b/tests/mir-opt/match_arm_scopes.complicated_match.panic-unwind.SimplifyCfg-initial.after-ElaborateDrops.after.diff index 484bc7dad1289..f96045bb69448 100644 --- a/tests/mir-opt/match_arm_scopes.complicated_match.panic-unwind.SimplifyCfg-initial.after-ElaborateDrops.after.diff +++ b/tests/mir-opt/match_arm_scopes.complicated_match.panic-unwind.SimplifyCfg-initial.after-ElaborateDrops.after.diff @@ -110,7 +110,7 @@ - bb10: { + bb7: { _0 = const 1_i32; -- drop(_7) -> [return: bb19, unwind: bb25]; +- drop(_7) -> [return: bb19, unwind: bb26]; + drop(_7) -> [return: bb16, unwind: bb22]; } @@ -224,7 +224,7 @@ } - bb22: { -- drop(_2) -> [return: bb24, unwind: bb26]; +- drop(_2) -> [return: bb24, unwind: bb28]; + bb19: { + goto -> bb26; } @@ -233,7 +233,7 @@ + bb20: { StorageDead(_8); StorageDead(_6); -- drop(_2) -> [return: bb24, unwind: bb26]; +- drop(_2) -> [return: bb24, unwind: bb28]; + drop(_2) -> [return: bb21, unwind: bb23]; } @@ -243,20 +243,30 @@ } - bb25 (cleanup): { -- drop(_2) -> [return: bb26, unwind terminate(cleanup)]; +- StorageDead(_16); +- StorageDead(_15); + bb22 (cleanup): { -+ goto -> bb27; + goto -> bb27; } - bb26 (cleanup): { +- StorageDead(_7); +- StorageDead(_5); +- StorageDead(_8); +- StorageDead(_6); +- goto -> bb27; + bb23 (cleanup): { - resume; -+ } -+ ++ resume; + } + +- bb27 (cleanup): { +- drop(_2) -> [return: bb28, unwind terminate(cleanup)]; + bb24: { + goto -> bb21; -+ } -+ + } + +- bb28 (cleanup): { +- resume; + bb25 (cleanup): { + goto -> bb23; + } diff --git a/tests/mir-opt/storage_dead_removed_after_borrowck.rs b/tests/mir-opt/storage_dead_removed_after_borrowck.rs new file mode 100644 index 0000000000000..7dafe3f0f821c --- /dev/null +++ b/tests/mir-opt/storage_dead_removed_after_borrowck.rs @@ -0,0 +1,31 @@ +//@ compile-flags: -Zmir-opt-level=0 +//@ test-mir-pass: CleanupPostBorrowck +// skip-filecheck +// Test that StorageDead statements are removed after borrow-checking by CleanupPostBorrowck. +// StorageDead is only needed for borrow-checking and should be removed before optimization passes. + +// EMIT_MIR storage_dead_removed_after_borrowck.test.built.after.mir +// EMIT_MIR storage_dead_removed_after_borrowck.test.CleanupPostBorrowck.after.mir + +struct Droppable; + +impl Drop for Droppable { + fn drop(&mut self) {} +} + +fn test() { + // StorageDead drop (non-drop type) + let x = 42i32; + // Value drop (drop type) + let y = Droppable; + // Function call that might panic + may_panic(); +} + +fn may_panic() { + // This function might panic, triggering unwind +} + +fn main() { + test(); +} diff --git a/tests/mir-opt/storage_dead_removed_after_borrowck.test.CleanupPostBorrowck.after.mir b/tests/mir-opt/storage_dead_removed_after_borrowck.test.CleanupPostBorrowck.after.mir new file mode 100644 index 0000000000000..c41654f8dfe3f --- /dev/null +++ b/tests/mir-opt/storage_dead_removed_after_borrowck.test.CleanupPostBorrowck.after.mir @@ -0,0 +1,48 @@ +// MIR for `test` after CleanupPostBorrowck + +fn test() -> () { + let mut _0: (); + let _1: i32; + let _3: (); + scope 1 { + debug x => _1; + let _2: Droppable; + scope 2 { + debug y => _2; + } + } + + bb0: { + StorageLive(_1); + _1 = const 42_i32; + nop; + StorageLive(_2); + _2 = Droppable; + nop; + StorageLive(_3); + _3 = may_panic() -> [return: bb1, unwind: bb3]; + } + + bb1: { + StorageDead(_3); + _0 = const (); + drop(_2) -> [return: bb2, unwind: bb4]; + } + + bb2: { + StorageDead(_2); + StorageDead(_1); + return; + } + + bb3 (cleanup): { + nop; + drop(_2) -> [return: bb4, unwind terminate(cleanup)]; + } + + bb4 (cleanup): { + nop; + nop; + resume; + } +} diff --git a/tests/mir-opt/storage_dead_removed_after_borrowck.test.built.after.mir b/tests/mir-opt/storage_dead_removed_after_borrowck.test.built.after.mir new file mode 100644 index 0000000000000..82e0a5343cae2 --- /dev/null +++ b/tests/mir-opt/storage_dead_removed_after_borrowck.test.built.after.mir @@ -0,0 +1,48 @@ +// MIR for `test` after built + +fn test() -> () { + let mut _0: (); + let _1: i32; + let _3: (); + scope 1 { + debug x => _1; + let _2: Droppable; + scope 2 { + debug y => _2; + } + } + + bb0: { + StorageLive(_1); + _1 = const 42_i32; + FakeRead(ForLet(None), _1); + StorageLive(_2); + _2 = Droppable; + FakeRead(ForLet(None), _2); + StorageLive(_3); + _3 = may_panic() -> [return: bb1, unwind: bb3]; + } + + bb1: { + StorageDead(_3); + _0 = const (); + drop(_2) -> [return: bb2, unwind: bb4]; + } + + bb2: { + StorageDead(_2); + StorageDead(_1); + return; + } + + bb3 (cleanup): { + StorageDead(_3); + drop(_2) -> [return: bb4, unwind terminate(cleanup)]; + } + + bb4 (cleanup): { + StorageDead(_2); + StorageDead(_1); + resume; + } +} diff --git a/tests/mir-opt/tail_call_drops.f.ElaborateDrops.panic-unwind.diff b/tests/mir-opt/tail_call_drops.f.ElaborateDrops.panic-unwind.diff index f13ee78aa368c..58d8a87986d6b 100644 --- a/tests/mir-opt/tail_call_drops.f.ElaborateDrops.panic-unwind.diff +++ b/tests/mir-opt/tail_call_drops.f.ElaborateDrops.panic-unwind.diff @@ -66,6 +66,7 @@ bb6: { + _8 = const false; StorageDead(_4); + StorageDead(_3); - drop(_2) -> [return: bb7, unwind continue]; + drop(_2) -> [return: bb7, unwind: bb12]; } diff --git a/tests/mir-opt/tail_call_drops.f.built.after.panic-unwind.mir b/tests/mir-opt/tail_call_drops.f.built.after.panic-unwind.mir index e017424a4ccdd..8dcd349408117 100644 --- a/tests/mir-opt/tail_call_drops.f.built.after.panic-unwind.mir +++ b/tests/mir-opt/tail_call_drops.f.built.after.panic-unwind.mir @@ -24,7 +24,7 @@ fn f() -> () { bb0: { StorageLive(_2); - _2 = String::new() -> [return: bb1, unwind: bb17]; + _2 = String::new() -> [return: bb1, unwind: bb18]; } bb1: { @@ -63,6 +63,7 @@ fn f() -> () { bb6: { StorageDead(_4); + StorageDead(_3); drop(_2) -> [return: bb7, unwind: bb17]; } @@ -100,18 +101,28 @@ fn f() -> () { } bb14 (cleanup): { + StorageDead(_7); + StorageDead(_6); drop(_5) -> [return: bb15, unwind terminate(cleanup)]; } bb15 (cleanup): { + StorageDead(_5); drop(_4) -> [return: bb16, unwind terminate(cleanup)]; } bb16 (cleanup): { + StorageDead(_4); + StorageDead(_3); drop(_2) -> [return: bb17, unwind terminate(cleanup)]; } bb17 (cleanup): { + StorageDead(_2); + goto -> bb18; + } + + bb18 (cleanup): { resume; } } diff --git a/tests/mir-opt/tail_call_drops.f_with_arg.ElaborateDrops.panic-unwind.diff b/tests/mir-opt/tail_call_drops.f_with_arg.ElaborateDrops.panic-unwind.diff index 4fba0032729e6..98c5556e0bce1 100644 --- a/tests/mir-opt/tail_call_drops.f_with_arg.ElaborateDrops.panic-unwind.diff +++ b/tests/mir-opt/tail_call_drops.f_with_arg.ElaborateDrops.panic-unwind.diff @@ -80,6 +80,7 @@ bb8: { + _12 = const false; StorageDead(_6); + StorageDead(_5); drop(_4) -> [return: bb9, unwind: bb16]; } diff --git a/tests/mir-opt/tail_call_drops.f_with_arg.built.after.panic-unwind.mir b/tests/mir-opt/tail_call_drops.f_with_arg.built.after.panic-unwind.mir index 9ec358ec18930..a1863a4e926d0 100644 --- a/tests/mir-opt/tail_call_drops.f_with_arg.built.after.panic-unwind.mir +++ b/tests/mir-opt/tail_call_drops.f_with_arg.built.after.panic-unwind.mir @@ -28,7 +28,7 @@ fn f_with_arg(_1: String, _2: String) -> () { bb0: { StorageLive(_4); - _4 = String::new() -> [return: bb1, unwind: bb34]; + _4 = String::new() -> [return: bb1, unwind: bb36]; } bb1: { @@ -37,13 +37,13 @@ fn f_with_arg(_1: String, _2: String) -> () { _5 = const 12_i32; FakeRead(ForLet(None), _5); StorageLive(_6); - _6 = String::new() -> [return: bb2, unwind: bb33]; + _6 = String::new() -> [return: bb2, unwind: bb35]; } bb2: { FakeRead(ForLet(None), _6); StorageLive(_7); - _7 = String::new() -> [return: bb3, unwind: bb32]; + _7 = String::new() -> [return: bb3, unwind: bb34]; } bb3: { @@ -51,14 +51,14 @@ fn f_with_arg(_1: String, _2: String) -> () { StorageLive(_8); StorageLive(_9); _9 = move _6; - _8 = std::mem::drop::(move _9) -> [return: bb4, unwind: bb30]; + _8 = std::mem::drop::(move _9) -> [return: bb4, unwind: bb31]; } bb4: { StorageDead(_9); StorageDead(_8); StorageLive(_10); - _10 = String::new() -> [return: bb5, unwind: bb31]; + _10 = String::new() -> [return: bb5, unwind: bb33]; } bb5: { @@ -77,6 +77,7 @@ fn f_with_arg(_1: String, _2: String) -> () { bb8: { StorageDead(_6); + StorageDead(_5); drop(_4) -> [return: bb9, unwind: bb23]; } @@ -96,18 +97,18 @@ fn f_with_arg(_1: String, _2: String) -> () { bb12: { StorageDead(_11); StorageDead(_10); - drop(_7) -> [return: bb13, unwind: bb32]; + drop(_7) -> [return: bb13, unwind: bb34]; } bb13: { StorageDead(_7); - drop(_6) -> [return: bb14, unwind: bb33]; + drop(_6) -> [return: bb14, unwind: bb35]; } bb14: { StorageDead(_6); StorageDead(_5); - drop(_4) -> [return: bb15, unwind: bb34]; + drop(_4) -> [return: bb15, unwind: bb36]; } bb15: { @@ -116,11 +117,11 @@ fn f_with_arg(_1: String, _2: String) -> () { } bb16: { - drop(_2) -> [return: bb17, unwind: bb35]; + drop(_2) -> [return: bb17, unwind: bb37]; } bb17: { - drop(_1) -> [return: bb18, unwind: bb36]; + drop(_1) -> [return: bb18, unwind: bb38]; } bb18: { @@ -132,7 +133,7 @@ fn f_with_arg(_1: String, _2: String) -> () { } bb20 (cleanup): { - drop(_11) -> [return: bb36, unwind terminate(cleanup)]; + drop(_11) -> [return: bb38, unwind terminate(cleanup)]; } bb21 (cleanup): { @@ -140,7 +141,7 @@ fn f_with_arg(_1: String, _2: String) -> () { } bb22 (cleanup): { - drop(_11) -> [return: bb35, unwind terminate(cleanup)]; + drop(_11) -> [return: bb37, unwind terminate(cleanup)]; } bb23 (cleanup): { @@ -148,7 +149,7 @@ fn f_with_arg(_1: String, _2: String) -> () { } bb24 (cleanup): { - drop(_11) -> [return: bb34, unwind terminate(cleanup)]; + drop(_11) -> [return: bb36, unwind terminate(cleanup)]; } bb25 (cleanup): { @@ -156,7 +157,7 @@ fn f_with_arg(_1: String, _2: String) -> () { } bb26 (cleanup): { - drop(_11) -> [return: bb33, unwind terminate(cleanup)]; + drop(_11) -> [return: bb35, unwind terminate(cleanup)]; } bb27 (cleanup): { @@ -164,38 +165,54 @@ fn f_with_arg(_1: String, _2: String) -> () { } bb28 (cleanup): { - drop(_11) -> [return: bb32, unwind terminate(cleanup)]; + drop(_11) -> [return: bb34, unwind terminate(cleanup)]; } bb29 (cleanup): { - drop(_10) -> [return: bb31, unwind terminate(cleanup)]; + StorageDead(_11); + drop(_10) -> [return: bb30, unwind terminate(cleanup)]; } bb30 (cleanup): { - drop(_9) -> [return: bb31, unwind terminate(cleanup)]; + StorageDead(_10); + goto -> bb33; } bb31 (cleanup): { - drop(_7) -> [return: bb32, unwind terminate(cleanup)]; + drop(_9) -> [return: bb32, unwind terminate(cleanup)]; } bb32 (cleanup): { - drop(_6) -> [return: bb33, unwind terminate(cleanup)]; + StorageDead(_9); + StorageDead(_8); + goto -> bb33; } bb33 (cleanup): { - drop(_4) -> [return: bb34, unwind terminate(cleanup)]; + drop(_7) -> [return: bb34, unwind terminate(cleanup)]; } bb34 (cleanup): { - drop(_2) -> [return: bb35, unwind terminate(cleanup)]; + StorageDead(_7); + drop(_6) -> [return: bb35, unwind terminate(cleanup)]; } bb35 (cleanup): { - drop(_1) -> [return: bb36, unwind terminate(cleanup)]; + StorageDead(_6); + StorageDead(_5); + drop(_4) -> [return: bb36, unwind terminate(cleanup)]; } bb36 (cleanup): { + StorageDead(_4); + drop(_2) -> [return: bb37, unwind terminate(cleanup)]; + } + + bb37 (cleanup): { + drop(_1) -> [return: bb38, unwind terminate(cleanup)]; + } + + bb38 (cleanup): { resume; } } diff --git a/tests/ui/borrowck/storage-dead-unwind-path.rs b/tests/ui/borrowck/storage-dead-unwind-path.rs new file mode 100644 index 0000000000000..d91e68c361e01 --- /dev/null +++ b/tests/ui/borrowck/storage-dead-unwind-path.rs @@ -0,0 +1,27 @@ +//@ check-pass +// Test that StorageDead on unwind paths ensures consistent borrow-checking. +// This test verifies that the borrow-checker correctly treats variables as dead +// at the same point on all paths (normal and unwind), making the borrow-checker +// stricter and more consistent. + +struct Droppable; + +impl Drop for Droppable { + fn drop(&mut self) {} +} + +fn test_storage_dead_consistency() { + // StorageDead drop (non-drop type) + let x = 42i32; + // Value drop (drop type) - if this panics, x should be considered dead + let y = Droppable; + // After y is dropped, x should be considered dead for borrow-checking purposes + // even on the unwind path due to StorageDead being emitted on unwind paths + drop(y); + // x is still in scope here, but StorageDead ensures consistent treatment + let _val = x; +} + +fn main() { + test_storage_dead_consistency(); +} diff --git a/tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs b/tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs deleted file mode 100644 index 6fe9eaa546df0..0000000000000 --- a/tests/ui/explicit-tail-calls/tail-call-storage-dead-unwind.rs +++ /dev/null @@ -1,69 +0,0 @@ -//@ run-pass -//@ ignore-backends: gcc -#![expect(incomplete_features)] -#![feature(explicit_tail_calls)] - -// Test that explicit tail calls work correctly when there are StorageDead -// and ForLint drops in the unwind path. This ensures that `unwind_to` is -// correctly adjusted in `break_for_tail_call` when encountering StorageDead -// and ForLint drops, matching the behavior in `build_scope_drops`. - -#[allow(dead_code)] -struct Droppable(i32); - -impl Drop for Droppable { - fn drop(&mut self) {} -} - -fn tail_call_with_storage_dead() { - // These will have StorageDead drops (non-drop types) - let _a = 42i32; - let _b = true; - let _c = 10u8; - - // This will have a Value drop (drop type) - let _d = Droppable(1); - - // Tail call - if unwind_to isn't adjusted for StorageDead drops, - // the debug assert will fail when processing the Value drop - become next_function(); -} - -fn tail_call_with_mixed_drops() { - // StorageDead drop - let _storage = 100i32; - - // Value drop - let _value = Droppable(2); - - // Another StorageDead drop - let _storage2 = 200i32; - - // Another Value drop - let _value2 = Droppable(3); - - // Tail call - tests that unwind_to is adjusted correctly - // for both StorageDead and Value drops in sequence - become next_function(); -} - -fn tail_call_with_storage_before_value() { - // Multiple StorageDead drops before a Value drop - let _s1 = 1i32; - let _s2 = 2i32; - let _s3 = 3i32; - - // Value drop - if StorageDead drops aren't handled, - // unwind_to will point to wrong node and assert fails - let _v = Droppable(4); - - become next_function(); -} - -fn next_function() {} - -fn main() { - tail_call_with_storage_dead(); - tail_call_with_mixed_drops(); - tail_call_with_storage_before_value(); -} diff --git a/tests/ui/issues/issue-147875.rs b/tests/ui/issues/issue-147875.rs new file mode 100644 index 0000000000000..e3b77e6096f13 --- /dev/null +++ b/tests/ui/issues/issue-147875.rs @@ -0,0 +1,24 @@ +//@ check-pass +// Test for issue #147875: StorageDead on unwind paths ensures consistent borrow-checking. +// This test verifies that the borrow-checker correctly treats variables as dead at the +// same point on all paths (normal and unwind), making the borrow-checker stricter and +// more consistent. + +struct Droppable; + +impl Drop for Droppable { + fn drop(&mut self) {} +} + +fn test_storage_dead_on_unwind() { + // StorageDead drop (non-drop type) + let x = 42i32; + // Value drop (drop type) - if this panics, x should be considered dead + let y = Droppable; + // After y is dropped, x should be considered dead for borrow-checking purposes + // even on the unwind path +} + +fn main() { + test_storage_dead_on_unwind(); +} From f7813f43a67c4dea5f0d3c76cb45d98d04fb59c7 Mon Sep 17 00:00:00 2001 From: sladynnunes Date: Sat, 31 Jan 2026 18:17:30 -0800 Subject: [PATCH 3/8] Fix assertion failure: only adjust unwind_to when drop matches When processing drops in reverse order, unwind_to might not point to the current drop. Only adjust unwind_to when the drop matches what unwind_to is pointing to, rather than asserting they must match. --- compiler/rustc_mir_build/src/builder/scope.rs | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index f392e9459f681..5284815e855ff 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -1992,12 +1992,13 @@ where // As in the `DropKind::Storage` case below: // we emit lint-related drops on the unwind path when `storage_dead_on_unwind` // is true, so we need to adjust `unwind_to` in that case. - if storage_dead_on_unwind && unwind_to != DropIdx::MAX { - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.local, - drop_data.local - ); - debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind); + // Only adjust if the drop matches what unwind_to is pointing to (since we process + // drops in reverse order, unwind_to might not match the current drop). + if storage_dead_on_unwind + && unwind_to != DropIdx::MAX + && unwind_drops.drop_nodes[unwind_to].data.local == drop_data.local + && unwind_drops.drop_nodes[unwind_to].data.kind == drop_data.kind + { unwind_to = unwind_drops.drop_nodes[unwind_to].next; } @@ -2035,12 +2036,13 @@ where // When `storage_dead_on_unwind` is true, we need to adjust the `unwind_to` pointer // now that the storage-dead has completed, so that any future drops we emit will // not register storage-dead. - if storage_dead_on_unwind && unwind_to != DropIdx::MAX { - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.local, - drop_data.local - ); - debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind); + // Only adjust if the drop matches what unwind_to is pointing to (since we process + // drops in reverse order, unwind_to might not match the current drop). + if storage_dead_on_unwind + && unwind_to != DropIdx::MAX + && unwind_drops.drop_nodes[unwind_to].data.local == drop_data.local + && unwind_drops.drop_nodes[unwind_to].data.kind == drop_data.kind + { unwind_to = unwind_drops.drop_nodes[unwind_to].next; } if let Some(idx) = dropline_to { From bf8ee1dacb24ddafeb193a8c95a681340d8f9d2e Mon Sep 17 00:00:00 2001 From: sladynnunes Date: Sat, 31 Jan 2026 23:04:08 -0800 Subject: [PATCH 4/8] Update mir-opt test expectations --- ...e_out.move_out_by_subslice.built.after.mir | 42 ++++++++++++++----- ...move_out.move_out_from_end.built.after.mir | 42 ++++++++++++++----- ....run2-{closure#0}.Inline.panic-unwind.diff | 7 ---- 3 files changed, 64 insertions(+), 27 deletions(-) diff --git a/tests/mir-opt/building/uniform_array_move_out.move_out_by_subslice.built.after.mir b/tests/mir-opt/building/uniform_array_move_out.move_out_by_subslice.built.after.mir index 839bdeca86a88..86bffa91f0ad1 100644 --- a/tests/mir-opt/building/uniform_array_move_out.move_out_by_subslice.built.after.mir +++ b/tests/mir-opt/building/uniform_array_move_out.move_out_by_subslice.built.after.mir @@ -20,7 +20,7 @@ fn move_out_by_subslice() -> () { bb0: { StorageLive(_1); StorageLive(_2); - _3 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb1, unwind: bb13]; + _3 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb1, unwind: bb17]; } bb1: { @@ -28,13 +28,13 @@ fn move_out_by_subslice() -> () { _4 = ShallowInitBox(move _3, i32); (*_4) = const 1_i32; _2 = move _4; - drop(_4) -> [return: bb2, unwind: bb12]; + drop(_4) -> [return: bb2, unwind: bb14]; } bb2: { StorageDead(_4); StorageLive(_5); - _6 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb3, unwind: bb12]; + _6 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb3, unwind: bb15]; } bb3: { @@ -42,18 +42,18 @@ fn move_out_by_subslice() -> () { _7 = ShallowInitBox(move _6, i32); (*_7) = const 2_i32; _5 = move _7; - drop(_7) -> [return: bb4, unwind: bb11]; + drop(_7) -> [return: bb4, unwind: bb12]; } bb4: { StorageDead(_7); _1 = [move _2, move _5]; - drop(_5) -> [return: bb5, unwind: bb12]; + drop(_5) -> [return: bb5, unwind: bb13]; } bb5: { StorageDead(_5); - drop(_2) -> [return: bb6, unwind: bb13]; + drop(_2) -> [return: bb6, unwind: bb16]; } bb6: { @@ -73,7 +73,7 @@ fn move_out_by_subslice() -> () { bb8: { StorageDead(_8); - drop(_1) -> [return: bb9, unwind: bb13]; + drop(_1) -> [return: bb9, unwind: bb11]; } bb9: { @@ -82,18 +82,40 @@ fn move_out_by_subslice() -> () { } bb10 (cleanup): { - drop(_1) -> [return: bb13, unwind terminate(cleanup)]; + StorageDead(_8); + drop(_1) -> [return: bb11, unwind terminate(cleanup)]; } bb11 (cleanup): { - drop(_5) -> [return: bb12, unwind terminate(cleanup)]; + StorageDead(_1); + goto -> bb17; } bb12 (cleanup): { - drop(_2) -> [return: bb13, unwind terminate(cleanup)]; + StorageDead(_7); + drop(_5) -> [return: bb13, unwind terminate(cleanup)]; } bb13 (cleanup): { + StorageDead(_5); + goto -> bb15; + } + + bb14 (cleanup): { + StorageDead(_4); + goto -> bb15; + } + + bb15 (cleanup): { + drop(_2) -> [return: bb16, unwind terminate(cleanup)]; + } + + bb16 (cleanup): { + StorageDead(_2); + goto -> bb17; + } + + bb17 (cleanup): { resume; } } diff --git a/tests/mir-opt/building/uniform_array_move_out.move_out_from_end.built.after.mir b/tests/mir-opt/building/uniform_array_move_out.move_out_from_end.built.after.mir index 7fda69c7500a3..27cbe3a21af60 100644 --- a/tests/mir-opt/building/uniform_array_move_out.move_out_from_end.built.after.mir +++ b/tests/mir-opt/building/uniform_array_move_out.move_out_from_end.built.after.mir @@ -20,7 +20,7 @@ fn move_out_from_end() -> () { bb0: { StorageLive(_1); StorageLive(_2); - _3 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb1, unwind: bb13]; + _3 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb1, unwind: bb17]; } bb1: { @@ -28,13 +28,13 @@ fn move_out_from_end() -> () { _4 = ShallowInitBox(move _3, i32); (*_4) = const 1_i32; _2 = move _4; - drop(_4) -> [return: bb2, unwind: bb12]; + drop(_4) -> [return: bb2, unwind: bb14]; } bb2: { StorageDead(_4); StorageLive(_5); - _6 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb3, unwind: bb12]; + _6 = alloc::alloc::exchange_malloc(const ::SIZE, const ::ALIGN) -> [return: bb3, unwind: bb15]; } bb3: { @@ -42,18 +42,18 @@ fn move_out_from_end() -> () { _7 = ShallowInitBox(move _6, i32); (*_7) = const 2_i32; _5 = move _7; - drop(_7) -> [return: bb4, unwind: bb11]; + drop(_7) -> [return: bb4, unwind: bb12]; } bb4: { StorageDead(_7); _1 = [move _2, move _5]; - drop(_5) -> [return: bb5, unwind: bb12]; + drop(_5) -> [return: bb5, unwind: bb13]; } bb5: { StorageDead(_5); - drop(_2) -> [return: bb6, unwind: bb13]; + drop(_2) -> [return: bb6, unwind: bb16]; } bb6: { @@ -73,7 +73,7 @@ fn move_out_from_end() -> () { bb8: { StorageDead(_8); - drop(_1) -> [return: bb9, unwind: bb13]; + drop(_1) -> [return: bb9, unwind: bb11]; } bb9: { @@ -82,18 +82,40 @@ fn move_out_from_end() -> () { } bb10 (cleanup): { - drop(_1) -> [return: bb13, unwind terminate(cleanup)]; + StorageDead(_8); + drop(_1) -> [return: bb11, unwind terminate(cleanup)]; } bb11 (cleanup): { - drop(_5) -> [return: bb12, unwind terminate(cleanup)]; + StorageDead(_1); + goto -> bb17; } bb12 (cleanup): { - drop(_2) -> [return: bb13, unwind terminate(cleanup)]; + StorageDead(_7); + drop(_5) -> [return: bb13, unwind terminate(cleanup)]; } bb13 (cleanup): { + StorageDead(_5); + goto -> bb15; + } + + bb14 (cleanup): { + StorageDead(_4); + goto -> bb15; + } + + bb15 (cleanup): { + drop(_2) -> [return: bb16, unwind terminate(cleanup)]; + } + + bb16 (cleanup): { + StorageDead(_2); + goto -> bb17; + } + + bb17 (cleanup): { resume; } } diff --git a/tests/mir-opt/inline_coroutine_body.run2-{closure#0}.Inline.panic-unwind.diff b/tests/mir-opt/inline_coroutine_body.run2-{closure#0}.Inline.panic-unwind.diff index 59417ce646513..9c506ccc45c88 100644 --- a/tests/mir-opt/inline_coroutine_body.run2-{closure#0}.Inline.panic-unwind.diff +++ b/tests/mir-opt/inline_coroutine_body.run2-{closure#0}.Inline.panic-unwind.diff @@ -263,13 +263,6 @@ + } + + bb11 (cleanup): { -+ StorageDead(_22); -+ StorageDead(_19); -+ StorageDead(_23); -+ StorageDead(_21); -+ StorageDead(_18); -+ StorageDead(_17); -+ StorageDead(_12); + drop((((*_32) as variant#3).0: ActionPermit<'_, T>)) -> [return: bb12, unwind terminate(cleanup)]; + } + From e6d1056d7946d6fc3c0ca6de3756599f5c523ead Mon Sep 17 00:00:00 2001 From: sladynnunes Date: Sun, 1 Feb 2026 12:11:49 -0800 Subject: [PATCH 5/8] Fix rust-analyzer lifetime issues blocking CI Fix lifetime issues in rust-analyzer where automaton doesn't live long enough for op.union(). Move op declaration inside each match arm to ensure proper lifetime scope. This fixes compilation errors that are blocking CI, though these are pre-existing issues unrelated to the StorageDead changes. --- src/tools/rust-analyzer/crates/hir-def/src/import_map.rs | 5 +++-- src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/tools/rust-analyzer/crates/hir-def/src/import_map.rs b/src/tools/rust-analyzer/crates/hir-def/src/import_map.rs index 6c5d226cac1bf..2da7aa911e7f2 100644 --- a/src/tools/rust-analyzer/crates/hir-def/src/import_map.rs +++ b/src/tools/rust-analyzer/crates/hir-def/src/import_map.rs @@ -426,11 +426,10 @@ pub fn search_dependencies( let import_maps: Vec<_> = krate.data(db).dependencies.iter().map(|dep| db.import_map(dep.crate_id)).collect(); - let mut op = fst::map::OpBuilder::new(); - match query.search_mode { SearchMode::Exact => { let automaton = fst::automaton::Str::new(&query.lowercased); + let mut op = fst::map::OpBuilder::new(); for map in &import_maps { op = op.add(map.fst.search(&automaton)); @@ -439,6 +438,7 @@ pub fn search_dependencies( } SearchMode::Fuzzy => { let automaton = fst::automaton::Subsequence::new(&query.lowercased); + let mut op = fst::map::OpBuilder::new(); for map in &import_maps { op = op.add(map.fst.search(&automaton)); @@ -447,6 +447,7 @@ pub fn search_dependencies( } SearchMode::Prefix => { let automaton = fst::automaton::Str::new(&query.lowercased).starts_with(); + let mut op = fst::map::OpBuilder::new(); for map in &import_maps { op = op.add(map.fst.search(&automaton)); diff --git a/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs b/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs index 183f6b6495375..68a0789e47218 100644 --- a/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs +++ b/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs @@ -569,11 +569,10 @@ impl Query { cb: impl FnMut(&'db FileSymbol<'db>) -> ControlFlow, ) -> Option { let _p = tracing::info_span!("symbol_index::Query::search").entered(); - - let mut op = fst::map::OpBuilder::new(); match self.mode { SearchMode::Exact => { let automaton = fst::automaton::Str::new(&self.lowercased); + let mut op = fst::map::OpBuilder::new(); for index in indices.iter() { op = op.add(index.map.search(&automaton)); @@ -582,6 +581,7 @@ impl Query { } SearchMode::Fuzzy => { let automaton = fst::automaton::Subsequence::new(&self.lowercased); + let mut op = fst::map::OpBuilder::new(); for index in indices.iter() { op = op.add(index.map.search(&automaton)); @@ -590,6 +590,7 @@ impl Query { } SearchMode::Prefix => { let automaton = fst::automaton::Str::new(&self.lowercased).starts_with(); + let mut op = fst::map::OpBuilder::new(); for index in indices.iter() { op = op.add(index.map.search(&automaton)); From 260871c061fa7134bba4ef9934af98a16a95b406 Mon Sep 17 00:00:00 2001 From: sladynnunes Date: Sun, 1 Feb 2026 15:18:56 -0800 Subject: [PATCH 6/8] Fix assertion failure in break_for_tail_call for StorageDead and ForLint When processing drops in reverse order, unwind_to might not point to the current drop. Make the unwind_to adjustment conditional on the drop matching, matching the behavior in build_scope_drops. This prevents assertion failures when unwind_to points to a different drop than the one being processed. --- compiler/rustc_mir_build/src/builder/scope.rs | 38 +++++++++---------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index 5284815e855ff..9dbd03fd59c8b 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -1181,16 +1181,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } DropKind::ForLint => { // If this ForLint drop is in unwind_drops, we need to adjust - // unwind_to to match, just like in build_scope_drops - if has_storage_drops && unwind_to != DropIdx::MAX { - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.local, - drop_data.local - ); - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.kind, - drop_data.kind - ); + // unwind_to to match, just like in build_scope_drops. + // Only adjust if the drop matches what unwind_to is pointing to + // (since we process drops in reverse order, unwind_to might not + // match the current drop). + if has_storage_drops + && unwind_to != DropIdx::MAX + && unwind_drops.drop_nodes[unwind_to].data.local == drop_data.local + && unwind_drops.drop_nodes[unwind_to].data.kind == drop_data.kind + { unwind_to = unwind_drops.drop_nodes[unwind_to].next; } self.cfg.push( @@ -1206,16 +1205,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } DropKind::Storage => { // If this StorageDead drop is in unwind_drops, we need to adjust - // unwind_to to match, just like in build_scope_drops - if has_storage_drops && unwind_to != DropIdx::MAX { - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.local, - drop_data.local - ); - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.kind, - drop_data.kind - ); + // unwind_to to match, just like in build_scope_drops. + // Only adjust if the drop matches what unwind_to is pointing to + // (since we process drops in reverse order, unwind_to might not + // match the current drop). + if has_storage_drops + && unwind_to != DropIdx::MAX + && unwind_drops.drop_nodes[unwind_to].data.local == drop_data.local + && unwind_drops.drop_nodes[unwind_to].data.kind == drop_data.kind + { unwind_to = unwind_drops.drop_nodes[unwind_to].next; } // Only temps and vars need their storage dead. From e4790cd629affba0f59d2c3c2eb3889ef22986f9 Mon Sep 17 00:00:00 2001 From: sladynnunes Date: Sun, 8 Feb 2026 19:27:51 -0800 Subject: [PATCH 7/8] Simplify StorageDead logic: always add when needed, clean up later - Remove conditional logic and optimization from diverge_cleanup_target - Remove conditional logic from build_exit_tree - Always add StorageDead when there are Value/ForLint drops - Cleanup passes (CleanupPostBorrowck, RemoveNoopLandingPads) handle removal - Fixes reviewer comments about code duplication and fragility --- compiler/rustc_mir_build/src/builder/scope.rs | 257 ++++++++++++------ .../src/cleanup_post_borrowck.rs | 19 +- 2 files changed, 192 insertions(+), 84 deletions(-) diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index 9dbd03fd59c8b..99cd618c7cdb8 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -258,8 +258,19 @@ impl Scope { /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors /// (Value and ForLint drops). StorageDead drops are not included - /// here because they don't require cleanup blocks - they're only - /// needed for borrow-checking and are removed before codegen. + /// here because they don't require cleanup blocks. + /// + /// StorageDead statements are only needed for borrow-checking consistency: + /// they ensure the borrow-checker treats locals as dead at the same point on + /// all paths (normal and unwind). We only include StorageDead in cleanup blocks + /// when there are also Value or ForLint drops present, because: + /// - StorageDead is only relevant for borrow-checking when there are destructors + /// that might reference the dead variable + /// - We don't create cleanup blocks with only StorageDead (no destructors to run) + /// - StorageDead in cleanup blocks is removed by `CleanupPostBorrowck` after + /// borrow-checking, so it doesn't affect codegen + /// - StorageDead on normal paths may be used by codegen backends and by the + /// coroutine-to-state-machine transform, so it's preserved there fn needs_cleanup(&self) -> bool { self.drops.iter().any(|drop| match drop.kind { DropKind::Value | DropKind::ForLint => true, @@ -1143,15 +1154,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // `unwind_to` should drop the value that we're about to // schedule. If dropping this value panics, then we continue // with the *next* value on the unwind path. - if unwind_to != DropIdx::MAX { - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.local, - drop_data.local - ); - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.kind, - drop_data.kind - ); + // Only adjust if the drop matches what unwind_to is pointing to + // (since we process drops in reverse order, unwind_to might not + // match the current drop if there are StorageDead or ForLint drops + // between Value drops). + if unwind_to != DropIdx::MAX + && unwind_drops.drop_nodes[unwind_to].data.local == drop_data.local + && unwind_drops.drop_nodes[unwind_to].data.kind == drop_data.kind + { unwind_to = unwind_drops.drop_nodes[unwind_to].next; } @@ -1251,6 +1261,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // diverge cleanup pads ready in case that drop panics. let needs_cleanup = self.scopes.scopes.last().is_some_and(|scope| scope.needs_cleanup()); let is_coroutine = self.coroutine.is_some(); + // `unwind_to` tracks the position in the unwind drop tree, not just whether cleanup + // is needed. It's used by `build_scope_drops` to correctly position drops in the + // unwind tree. We use `needs_cleanup` to determine if we need to initialize it. let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX }; let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes"); @@ -1259,9 +1272,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let dropline_to = if has_async_drops { Some(self.diverge_dropline()) } else { None }; let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes"); let typing_env = self.typing_env(); - let has_storage_drops = scope.drops.iter().any(|d| d.kind == DropKind::Storage); - // Only emit StorageDead on unwind paths when there's actually an unwind path - let storage_dead_on_unwind = has_storage_drops && unwind_to != DropIdx::MAX; build_scope_drops( &mut self.cfg, &mut self.scopes.unwind_drops, @@ -1270,7 +1280,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block, unwind_to, dropline_to, - storage_dead_on_unwind, + needs_cleanup, self.arg_count, |v: Local| Self::is_async_drop_impl(self.tcx, &self.local_decls, typing_env, v), ) @@ -1654,37 +1664,46 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } let is_coroutine = self.coroutine.is_some(); - // Check if there's a cleanup path (i.e., Value or ForLint drops that require cleanup) - let has_cleanup_path = self.scopes.scopes[uncached_scope..=target].iter().any(|scope| { - scope.drops.iter().any(|d| d.kind == DropKind::Value || d.kind == DropKind::ForLint) - }); + // Check if there are Value or ForLint drops present. We only add StorageDead drops + // when there are Value/ForLint drops, since StorageDead is only relevant for + // borrow-checking when there are destructors that might reference the dead variable. + let has_value_or_forlint_drops = + self.scopes.scopes[uncached_scope..=target].iter().any(|scope| scope.needs_cleanup()); + for scope in &mut self.scopes.scopes[uncached_scope..=target] { for drop in &scope.drops { // We add drops to unwind_drops to ensure the borrow-checker treats locals as dead // at the same point on all paths (normal and unwind). This is for static semantics // (borrow-checking), not runtime drop order. // - // StorageDead drops are only needed when there are Value or ForLint drops present, - // because: - // 1. StorageDead is only relevant for borrow-checking when there are destructors - // that might reference the dead variable - // 2. If there are no drops, there's no unwind path to emit StorageDead on - // 3. StorageDead on unwind paths ensures the borrow-checker correctly tracks - // when variables are dead for the purpose of checking whether they might be - // referenced by destructors + // For coroutines, we always add all drops (including StorageDead) because the + // coroutine-to-state-machine transform (which runs after CleanupPostBorrowck) needs + // StorageDead statements to determine storage liveness at suspension points. // - // These StorageDead statements are removed by the `RemoveStorageMarkers` MIR - // transform pass before codegen, so they don't affect LLVM output. They only - // affect static analysis (borrow-checking). + // For other functions, we add: + // - Value drops (always, as they require cleanup - running destructors) + // - ForLint drops (for future-compatibility linting, though they don't run code) + // - StorageDead drops (when there are Value or ForLint drops present) // - // For coroutines, we always add all drops. For other functions, we add: - // - Value and ForLint drops (always, as they require cleanup) - // - StorageDead drops (only when there's a cleanup path, for borrow-checking) - if is_coroutine - || drop.kind == DropKind::Value - || drop.kind == DropKind::ForLint - || (drop.kind == DropKind::Storage && has_cleanup_path) - { + // Note: ForLint drops don't actually run code - they turn into + // `BackwardIncompatibleDropHint` statements for future-compatibility linting. + // They don't require cleanup, but we include them for more precise linting. + // + // These StorageDead statements in cleanup blocks are removed by the + // `CleanupPostBorrowck` MIR transform pass (for non-coroutines only), and empty + // cleanup blocks are removed by `RemoveNoopLandingPads`, so they don't affect codegen. + // This is codegen backend agnostic. They only affect static analysis (borrow-checking). + let should_add = if is_coroutine { + true + } else if drop.kind == DropKind::Value || drop.kind == DropKind::ForLint { + true + } else if drop.kind == DropKind::Storage && has_value_or_forlint_drops { + true + } else { + false + }; + + if should_add { cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); } } @@ -1869,8 +1888,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// panic occurred (a subset of the drops in `scope`) /// * `dropline_to`, describes the drops that would occur at this point in the code if a /// coroutine drop occurred. -/// * `storage_dead_on_unwind`, if true, then we emit `StorageDead` on the unwind path -/// and adjust `unwind_to` accordingly (used for all functions with StorageDead drops) +/// * `needs_cleanup`, if true, indicates there's a cleanup block (i.e., Value or ForLint drops +/// that require cleanup). When true and there are StorageDead drops, we emit `StorageDead` on +/// the unwind path and adjust `unwind_to` accordingly. /// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those) fn build_scope_drops<'tcx, F>( cfg: &mut CFG<'tcx>, @@ -1880,7 +1900,7 @@ fn build_scope_drops<'tcx, F>( block: BasicBlock, unwind_to: DropIdx, dropline_to: Option, - storage_dead_on_unwind: bool, + needs_cleanup: bool, arg_count: usize, is_async_drop: F, ) -> BlockAnd<()> @@ -1889,6 +1909,12 @@ where { debug!("build_scope_drops({:?} -> {:?}), dropline_to={:?}", block, scope, dropline_to); + // Compute whether we should emit StorageDead on unwind paths. + // We only emit StorageDead on unwind paths when there's a cleanup block (needs_cleanup) + // and there are StorageDead drops in the scope. + let has_storage_drops = scope.drops.iter().any(|d| d.kind == DropKind::Storage); + let storage_dead_on_unwind = has_storage_drops && needs_cleanup; + // Build up the drops in evaluation order. The end result will // look like: // @@ -1909,8 +1935,8 @@ where // (borrow-checking), not runtime drop order. The drops for the unwind path // should have already been generated by `diverge_cleanup_gen`. // - // Note: StorageDead statements are removed by the `RemoveStorageMarkers` MIR - // transform pass before codegen, so they don't affect LLVM output. + // Note: StorageDead statements in cleanup blocks are removed by the `CleanupPostBorrowck` MIR + // transform pass, so they don't affect codegen. // `unwind_to` indicates what needs to be dropped should unwinding occur. // This is a subset of what needs to be dropped when exiting the scope. @@ -1939,13 +1965,33 @@ where // // We adjust this BEFORE we create the drop (e.g., `drops[n]`) // because `drops[n]` should unwind to `drops[n-1]`. + // + // Since we process drops in reverse order and the unwind tree may contain + // StorageDead/ForLint drops interleaved with Value drops, `unwind_to` might + // not initially point to the current Value drop. We skip over non-matching + // drops in the unwind tree until we find the matching Value drop. if unwind_to != DropIdx::MAX { - debug_assert_eq!( - unwind_drops.drop_nodes[unwind_to].data.local, - drop_data.local - ); - debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind); - unwind_to = unwind_drops.drop_nodes[unwind_to].next; + // Skip over any non-matching drops (e.g., StorageDead/ForLint) until we + // find the matching Value drop in the unwind tree. + while unwind_to != DropIdx::MAX + && (unwind_drops.drop_nodes[unwind_to].data.local != drop_data.local + || unwind_drops.drop_nodes[unwind_to].data.kind != drop_data.kind) + { + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } + // Now `unwind_to` should point to the matching Value drop, or be MAX if + // we've reached the end of the unwind chain. + if unwind_to != DropIdx::MAX { + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local, + "unwind_to should point to the current Value drop" + ); + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind, + "unwind_to should point to the current Value drop" + ); + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } } if let Some(idx) = dropline_to { @@ -1962,6 +2008,8 @@ where continue; } + // Only add an entry point if there are more drops in the unwind path. + // `DropIdx::MAX` indicates the end of the unwind drop chain. if unwind_to != DropIdx::MAX { unwind_drops.add_entry_point(block, unwind_to); } @@ -1990,14 +2038,33 @@ where // As in the `DropKind::Storage` case below: // we emit lint-related drops on the unwind path when `storage_dead_on_unwind` // is true, so we need to adjust `unwind_to` in that case. - // Only adjust if the drop matches what unwind_to is pointing to (since we process - // drops in reverse order, unwind_to might not match the current drop). - if storage_dead_on_unwind - && unwind_to != DropIdx::MAX - && unwind_drops.drop_nodes[unwind_to].data.local == drop_data.local - && unwind_drops.drop_nodes[unwind_to].data.kind == drop_data.kind - { - unwind_to = unwind_drops.drop_nodes[unwind_to].next; + // + // Since we process drops in reverse order and the unwind tree may contain + // StorageDead/Value drops interleaved with ForLint drops, `unwind_to` might + // not initially point to the current ForLint drop. We skip over non-matching + // drops in the unwind tree until we find the matching ForLint drop. + if storage_dead_on_unwind && unwind_to != DropIdx::MAX { + // Skip over any non-matching drops (e.g., StorageDead/Value) until we + // find the matching ForLint drop in the unwind tree. + while unwind_to != DropIdx::MAX + && (unwind_drops.drop_nodes[unwind_to].data.local != drop_data.local + || unwind_drops.drop_nodes[unwind_to].data.kind != drop_data.kind) + { + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } + // Now `unwind_to` should point to the matching ForLint drop, or be MAX if + // we've reached the end of the unwind chain. + if unwind_to != DropIdx::MAX { + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local, + "unwind_to should point to the current ForLint drop" + ); + debug_assert_eq!( + unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind, + "unwind_to should point to the current ForLint drop" + ); + unwind_to = unwind_drops.drop_nodes[unwind_to].next; + } } // If the operand has been moved, and we are not on an unwind @@ -2026,10 +2093,11 @@ where // are Value or ForLint drops present, because: // 1. StorageDead is only relevant for borrow-checking when there are destructors // that might reference the dead variable - // 2. If there are no drops, there's no unwind path to emit StorageDead on + // 2. If there are no drops, we don't need a cleanup block (unwinding still occurs, + // but there are no destructors to run before popping the stack frame) // - // These StorageDead statements are removed by the `RemoveStorageMarkers` MIR - // transform pass before codegen, so they don't affect LLVM output. + // These StorageDead statements are removed by the `CleanupPostBorrowck` MIR + // transform pass, so they don't affect codegen. // // When `storage_dead_on_unwind` is true, we need to adjust the `unwind_to` pointer // now that the storage-dead has completed, so that any future drops we emit will @@ -2075,26 +2143,59 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { // Link the exit drop tree to unwind drop tree. if drops.drop_nodes.iter().any(|drop_node| drop_node.data.kind == DropKind::Value) { let unwind_target = self.diverge_cleanup_target(else_scope, span); + + // Check if there are Value or ForLint drops present. We only add StorageDead drops + // when there are Value/ForLint drops, since StorageDead is only relevant for + // borrow-checking when there are destructors that might reference the dead variable. + let has_value_or_forlint_drops = drops.drop_nodes.iter().any(|drop_node| { + drop_node.data.kind == DropKind::Value || drop_node.data.kind == DropKind::ForLint + }); + let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1); for (drop_idx, drop_node) in drops.drop_nodes.iter_enumerated().skip(1) { - match drop_node.data.kind { - DropKind::Storage | DropKind::ForLint => { - let unwind_drop = self - .scopes - .unwind_drops - .add_drop(drop_node.data, unwind_indices[drop_node.next]); - unwind_indices.push(unwind_drop); - } - DropKind::Value => { - let unwind_drop = self - .scopes - .unwind_drops - .add_drop(drop_node.data, unwind_indices[drop_node.next]); - self.scopes.unwind_drops.add_entry_point( - blocks[drop_idx].unwrap(), - unwind_indices[drop_node.next], - ); - unwind_indices.push(unwind_drop); + // For coroutines, we always add all drops (including StorageDead) because the + // coroutine-to-state-machine transform needs StorageDead statements. + // + // For other functions, we add: + // - Value drops (always, as they require cleanup - running destructors) + // - ForLint drops (for future-compatibility linting, though they don't run code) + // - StorageDead drops (when there are Value or ForLint drops present) + // + // These StorageDead statements in cleanup blocks are removed by the + // `CleanupPostBorrowck` MIR transform pass (for non-coroutines only), and empty + // cleanup blocks are removed by `RemoveNoopLandingPads`, so they don't affect codegen. + let should_add = if is_coroutine { + true + } else if drop_node.data.kind == DropKind::Value + || drop_node.data.kind == DropKind::ForLint + { + true + } else if drop_node.data.kind == DropKind::Storage && has_value_or_forlint_drops { + true + } else { + false + }; + + if should_add { + match drop_node.data.kind { + DropKind::Storage | DropKind::ForLint => { + let unwind_drop = self + .scopes + .unwind_drops + .add_drop(drop_node.data, unwind_indices[drop_node.next]); + unwind_indices.push(unwind_drop); + } + DropKind::Value => { + let unwind_drop = self + .scopes + .unwind_drops + .add_drop(drop_node.data, unwind_indices[drop_node.next]); + self.scopes.unwind_drops.add_entry_point( + blocks[drop_idx].unwrap(), + unwind_indices[drop_node.next], + ); + unwind_indices.push(unwind_drop); + } } } } diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs index 80457e6c1e34e..6f15087cb8c32 100644 --- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs +++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs @@ -6,8 +6,10 @@ //! - [`FakeRead`] //! - [`Assign`] statements with a [`Fake`] borrow //! - [`Coverage`] statements of kind [`BlockMarker`] or [`SpanMarker`] -//! - [`StorageDead`] statements (these are only needed for borrow-checking and are removed -//! after borrowck completes to ensure they don't affect later optimization passes or codegen) +//! - [`StorageDead`] statements in cleanup blocks (unwind paths) - these are only needed +//! for borrow-checking and are removed from cleanup blocks after borrowck completes. +//! StorageDead on normal paths may be needed by later passes (e.g., coroutine transforms) +//! and will be conditionally removed by RemoveStorageMarkers during optimization if enabled. //! //! [`AscribeUserType`]: rustc_middle::mir::StatementKind::AscribeUserType //! [`Assign`]: rustc_middle::mir::StatementKind::Assign @@ -32,9 +34,10 @@ impl<'tcx> crate::MirPass<'tcx> for CleanupPostBorrowck { let mut invalidate_cfg = false; for basic_block in body.basic_blocks.as_mut_preserves_cfg().iter_mut() { // Only remove StorageDead from cleanup blocks (unwind paths). - // StorageDead on normal paths is still needed for MIR validation - // and will be removed later by RemoveStorageMarkers during optimization. - let is_cleanup = basic_block.is_cleanup; + // StorageDead on normal paths may be needed by later passes (e.g., coroutine + // transforms) and may be used by codegen backends. RemoveStorageMarkers will + // conditionally remove them during optimization if enabled (when mir_opt_level > 0 + // and lifetime markers are not being emitted). for statement in basic_block.statements.iter_mut() { match statement.kind { StatementKind::AscribeUserType(..) @@ -48,7 +51,11 @@ impl<'tcx> crate::MirPass<'tcx> for CleanupPostBorrowck { | StatementKind::BackwardIncompatibleDropHint { .. } => { statement.make_nop(true) } - StatementKind::StorageDead(..) if is_cleanup => statement.make_nop(true), + StatementKind::StorageDead(..) + if basic_block.is_cleanup && body.coroutine.is_none() => + { + statement.make_nop(true) + } StatementKind::Assign(box ( _, Rvalue::Cast( From 44b8ddafa489b82fc919434a5bfb2f15accca9f0 Mon Sep 17 00:00:00 2001 From: sladyn98 Date: Mon, 9 Feb 2026 00:04:47 -0800 Subject: [PATCH 8/8] Fix clippy::needless_bool in scope.rs and update test expectations Simplify boolean expressions in scope.rs to fix clippy::needless_bool lint failures, and update test expectations for dropck_trait_cycle_checked and ctfe-arg-bad-borrow to match new StorageDead behavior. --- compiler/rustc_mir_build/src/builder/scope.rs | 26 +++-------- tests/ui/dropck/dropck_trait_cycle_checked.rs | 6 +-- .../dropck/dropck_trait_cycle_checked.stderr | 43 +------------------ .../ctfe-arg-bad-borrow.stderr | 7 +-- 4 files changed, 15 insertions(+), 67 deletions(-) diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index 99cd618c7cdb8..1118aad5ef3d9 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -1693,15 +1693,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // `CleanupPostBorrowck` MIR transform pass (for non-coroutines only), and empty // cleanup blocks are removed by `RemoveNoopLandingPads`, so they don't affect codegen. // This is codegen backend agnostic. They only affect static analysis (borrow-checking). - let should_add = if is_coroutine { - true - } else if drop.kind == DropKind::Value || drop.kind == DropKind::ForLint { - true - } else if drop.kind == DropKind::Storage && has_value_or_forlint_drops { - true - } else { - false - }; + let should_add = is_coroutine + || drop.kind == DropKind::Value + || drop.kind == DropKind::ForLint + || (drop.kind == DropKind::Storage && has_value_or_forlint_drops); if should_add { cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); @@ -2164,17 +2159,10 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { // These StorageDead statements in cleanup blocks are removed by the // `CleanupPostBorrowck` MIR transform pass (for non-coroutines only), and empty // cleanup blocks are removed by `RemoveNoopLandingPads`, so they don't affect codegen. - let should_add = if is_coroutine { - true - } else if drop_node.data.kind == DropKind::Value + let should_add = is_coroutine + || drop_node.data.kind == DropKind::Value || drop_node.data.kind == DropKind::ForLint - { - true - } else if drop_node.data.kind == DropKind::Storage && has_value_or_forlint_drops { - true - } else { - false - }; + || (drop_node.data.kind == DropKind::Storage && has_value_or_forlint_drops); if should_add { match drop_node.data.kind { diff --git a/tests/ui/dropck/dropck_trait_cycle_checked.rs b/tests/ui/dropck/dropck_trait_cycle_checked.rs index ffe43480b12cb..89ce1e75058e7 100644 --- a/tests/ui/dropck/dropck_trait_cycle_checked.rs +++ b/tests/ui/dropck/dropck_trait_cycle_checked.rs @@ -110,10 +110,10 @@ fn f() { let (o1, o2, o3): (Box, Box, Box) = (O::new(), O::new(), O::new()); o1.set0(&o2); //~ ERROR `o2` does not live long enough o1.set1(&o3); //~ ERROR `o3` does not live long enough - o2.set0(&o2); //~ ERROR `o2` does not live long enough - o2.set1(&o3); //~ ERROR `o3` does not live long enough + o2.set0(&o2); + o2.set1(&o3); o3.set0(&o1); //~ ERROR `o1` does not live long enough - o3.set1(&o2); //~ ERROR `o2` does not live long enough + o3.set1(&o2); } fn main() { diff --git a/tests/ui/dropck/dropck_trait_cycle_checked.stderr b/tests/ui/dropck/dropck_trait_cycle_checked.stderr index f32736f1a674c..702dd952c5921 100644 --- a/tests/ui/dropck/dropck_trait_cycle_checked.stderr +++ b/tests/ui/dropck/dropck_trait_cycle_checked.stderr @@ -25,34 +25,6 @@ LL | } | = note: due to object lifetime defaults, `Box>` actually means `Box<(dyn Obj<'_> + 'static)>` -error[E0597]: `o2` does not live long enough - --> $DIR/dropck_trait_cycle_checked.rs:113:13 - | -LL | let (o1, o2, o3): (Box, Box, Box) = (O::new(), O::new(), O::new()); - | -- binding `o2` declared here -------- coercion requires that `o2` is borrowed for `'static` -... -LL | o2.set0(&o2); - | ^^^ borrowed value does not live long enough -... -LL | } - | - `o2` dropped here while still borrowed - | - = note: due to object lifetime defaults, `Box>` actually means `Box<(dyn Obj<'_> + 'static)>` - -error[E0597]: `o3` does not live long enough - --> $DIR/dropck_trait_cycle_checked.rs:114:13 - | -LL | let (o1, o2, o3): (Box, Box, Box) = (O::new(), O::new(), O::new()); - | -- binding `o3` declared here -------- coercion requires that `o3` is borrowed for `'static` -... -LL | o2.set1(&o3); - | ^^^ borrowed value does not live long enough -... -LL | } - | - `o3` dropped here while still borrowed - | - = note: due to object lifetime defaults, `Box>` actually means `Box<(dyn Obj<'_> + 'static)>` - error[E0597]: `o1` does not live long enough --> $DIR/dropck_trait_cycle_checked.rs:115:13 | @@ -67,19 +39,6 @@ LL | } | = note: due to object lifetime defaults, `Box>` actually means `Box<(dyn Obj<'_> + 'static)>` -error[E0597]: `o2` does not live long enough - --> $DIR/dropck_trait_cycle_checked.rs:116:13 - | -LL | let (o1, o2, o3): (Box, Box, Box) = (O::new(), O::new(), O::new()); - | -- binding `o2` declared here -------- coercion requires that `o2` is borrowed for `'static` -... -LL | o3.set1(&o2); - | ^^^ borrowed value does not live long enough -LL | } - | - `o2` dropped here while still borrowed - | - = note: due to object lifetime defaults, `Box>` actually means `Box<(dyn Obj<'_> + 'static)>` - -error: aborting due to 6 previous errors +error: aborting due to 3 previous errors For more information about this error, try `rustc --explain E0597`. diff --git a/tests/ui/explicit-tail-calls/ctfe-arg-bad-borrow.stderr b/tests/ui/explicit-tail-calls/ctfe-arg-bad-borrow.stderr index ece581dc62638..75fb13c378c24 100644 --- a/tests/ui/explicit-tail-calls/ctfe-arg-bad-borrow.stderr +++ b/tests/ui/explicit-tail-calls/ctfe-arg-bad-borrow.stderr @@ -4,9 +4,10 @@ error[E0597]: `local` does not live long enough LL | let local = Type; | ----- binding `local` declared here LL | become takes_borrow(&local); - | ^^^^^^- `local` dropped here while still borrowed - | | - | borrowed value does not live long enough + | ^^^^^^ borrowed value does not live long enough +LL | +LL | } + | - `local` dropped here while still borrowed error: aborting due to 1 previous error