Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion wgpu-core/src/device/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use core::{
fmt,
mem::{self, ManuallyDrop},
num::NonZeroU32,
sync::atomic::{AtomicBool, Ordering},
sync::atomic::{AtomicBool, AtomicU32, Ordering},
};
use hal::ShouldBeNonZeroExt;

Expand Down Expand Up @@ -278,6 +278,8 @@ pub struct Device {
pub(crate) enum DeferredDestroy {
TextureViews(WeakVec<TextureView>),
BindGroups(WeakVec<BindGroup>),
Buffer(Weak<Buffer>),
Texture(Weak<Texture>),
}

impl fmt::Debug for Device {
Expand Down Expand Up @@ -705,6 +707,18 @@ impl Device {
}
}
}
DeferredDestroy::Buffer(buffer) => {
// Call destroy() now that we're in a safe context (no locks held).
if let Some(buffer) = buffer.upgrade() {
buffer.destroy();
}
}
DeferredDestroy::Texture(texture) => {
// Call destroy() now that we're in a safe context (no locks held).
if let Some(texture) = texture.upgrade() {
texture.destroy();
}
}
}
}
}
Expand Down Expand Up @@ -1051,6 +1065,8 @@ impl Device {
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
timestamp_normalization_bind_group,
indirect_validation_bind_groups,
destroyed: AtomicBool::new(false),
in_flight_count: AtomicU32::new(0),
};

let buffer = Arc::new(buffer);
Expand Down Expand Up @@ -1234,6 +1250,8 @@ impl Device {
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
timestamp_normalization_bind_group,
indirect_validation_bind_groups,
destroyed: AtomicBool::new(false),
in_flight_count: AtomicU32::new(0),
};

let buffer = Arc::new(buffer);
Expand Down
77 changes: 77 additions & 0 deletions wgpu-core/src/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use core::{
num::NonZeroU64,
ops::Range,
ptr::NonNull,
sync::atomic::{AtomicBool, AtomicU32, Ordering},
};
use smallvec::SmallVec;
use thiserror::Error;
Expand Down Expand Up @@ -438,6 +439,14 @@ pub struct Buffer {
pub(crate) bind_groups: Mutex<WeakVec<BindGroup>>,
pub(crate) timestamp_normalization_bind_group: Snatchable<TimestampNormalizationBindGroup>,
pub(crate) indirect_validation_bind_groups: Snatchable<crate::indirect_validation::BindGroups>,
/// Set to true when destroy() is called. This makes check_destroyed() fail immediately,
/// but the actual raw resource cleanup may be deferred if in_flight_count > 0.
pub(crate) destroyed: AtomicBool,
/// Count of command buffer trackers that currently hold a reference to this buffer.
/// Incremented when inserted into a command buffer's tracker, decremented when the
/// tracker is cleared (on command buffer submission or drop).
/// When destroy() is called and this is > 0, raw resource cleanup is deferred to drop().
pub(crate) in_flight_count: AtomicU32,
}

impl Drop for Buffer {
Expand Down Expand Up @@ -472,6 +481,11 @@ impl Buffer {
&self,
guard: &SnatchGuard,
) -> Result<(), DestroyedResourceError> {
// Check the destroyed flag first - this is set by destroy() even if
// the raw resource hasn't been snatched yet (deferred cleanup case).
if self.destroyed.load(Ordering::Acquire) {
return Err(DestroyedResourceError(self.error_ident()));
}
self.raw
.get(guard)
.map(|_| ())
Expand Down Expand Up @@ -903,8 +917,23 @@ impl Buffer {
}

pub fn destroy(self: &Arc<Self>) {
// Mark as destroyed. This makes check_destroyed() fail,
// so any future use of this buffer will return an error.
self.destroyed.store(true, Ordering::Release);

let device = &self.device;

// Check if this buffer is currently in any command buffer's tracker.
// If so, we should NOT snatch the raw resource yet - the tracker's Drop
// will call destroy() again when the count reaches 0.
if self.in_flight_count.load(Ordering::Acquire) > 0 {
// Buffer is in a recording command buffer.
// The raw resource will be cleaned up when the command buffer is
// submitted/dropped and the tracker calls destroy() again.
return;
}

// Not in any command buffer tracker, safe to snatch and cleanup now.
let temp = {
let mut snatch_guard = device.snatchable_lock.write();

Expand Down Expand Up @@ -1255,6 +1284,14 @@ pub struct Texture {
pub(crate) clear_mode: RwLock<TextureClearMode>,
pub(crate) views: Mutex<WeakVec<TextureView>>,
pub(crate) bind_groups: Mutex<WeakVec<BindGroup>>,
/// Set to true when destroy() is called. This makes check_destroyed() fail immediately,
/// but the actual raw resource cleanup may be deferred if in_flight_count > 0.
pub(crate) destroyed: AtomicBool,
/// Count of command buffer trackers that currently hold a reference to this texture.
/// Incremented when inserted into a command buffer's tracker, decremented when the
/// tracker is cleared (on command buffer submission or drop).
/// When destroy() is called and this is > 0, raw resource cleanup is deferred to drop().
pub(crate) in_flight_count: AtomicU32,
}

impl Texture {
Expand Down Expand Up @@ -1290,6 +1327,8 @@ impl Texture {
clear_mode: RwLock::new(rank::TEXTURE_CLEAR_MODE, clear_mode),
views: Mutex::new(rank::TEXTURE_VIEWS, WeakVec::new()),
bind_groups: Mutex::new(rank::TEXTURE_BIND_GROUPS, WeakVec::new()),
destroyed: AtomicBool::new(false),
in_flight_count: AtomicU32::new(0),
}
}

Expand Down Expand Up @@ -1353,13 +1392,31 @@ impl RawResourceAccess for Texture {
fn raw<'a>(&'a self, guard: &'a SnatchGuard) -> Option<&'a Self::DynResource> {
self.inner.get(guard).map(|t| t.raw())
}

fn try_raw<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<&'a Self::DynResource, DestroyedResourceError> {
// Check the destroyed flag first - this is set by destroy() even if
// the raw resource hasn't been snatched yet (deferred cleanup case).
if self.destroyed.load(Ordering::Acquire) {
return Err(DestroyedResourceError(self.error_ident()));
}
self.raw(guard)
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
}

impl Texture {
pub(crate) fn try_inner<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<&'a TextureInner, DestroyedResourceError> {
// Check the destroyed flag first - this is set by destroy() even if
// the raw resource hasn't been snatched yet (deferred cleanup case).
if self.destroyed.load(Ordering::Acquire) {
return Err(DestroyedResourceError(self.error_ident()));
}
self.inner
.get(guard)
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
Expand All @@ -1369,6 +1426,11 @@ impl Texture {
&self,
guard: &SnatchGuard,
) -> Result<(), DestroyedResourceError> {
// Check the destroyed flag first - this is set by destroy() even if
// the raw resource hasn't been snatched yet (deferred cleanup case).
if self.destroyed.load(Ordering::Acquire) {
return Err(DestroyedResourceError(self.error_ident()));
}
self.inner
.get(guard)
.map(|_| ())
Expand Down Expand Up @@ -1405,8 +1467,23 @@ impl Texture {
}

pub fn destroy(self: &Arc<Self>) {
// Mark as destroyed. This makes check_destroyed() fail,
// so any future use of this texture will return an error.
self.destroyed.store(true, Ordering::Release);

let device = &self.device;

// Check if this texture is currently in any command buffer's tracker.
// If so, we should NOT snatch the raw resource yet - the tracker's Drop
// will call destroy() again when the count reaches 0.
if self.in_flight_count.load(Ordering::Acquire) > 0 {
// Texture is in a recording command buffer.
// The raw resource will be cleaned up when the command buffer is
// submitted/dropped and the tracker calls destroy() again.
return;
}

// Not in any command buffer tracker, safe to snatch and cleanup now.
let temp = {
let raw = match self.inner.snatch(&mut device.snatchable_lock.write()) {
Some(TextureInner::Native { raw }) => raw,
Expand Down
25 changes: 25 additions & 0 deletions wgpu-core/src/track/buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use core::sync::atomic::Ordering;

use hal::BufferBarrier;
use wgt::{strict_assert, strict_assert_eq, BufferUses};
Expand Down Expand Up @@ -530,6 +531,11 @@ impl BufferTracker {
let currently_owned = unsafe { self.metadata.contains_unchecked(index) };

if !currently_owned {
// Increment the in_flight_count for the buffer being inserted.
// This is decremented when the tracker is dropped.
let buffer = unsafe { metadata_provider.get(index) };
buffer.in_flight_count.fetch_add(1, Ordering::Release);

unsafe {
insert(
Some(&mut self.start),
Expand All @@ -552,6 +558,25 @@ impl BufferTracker {
}
}

impl Drop for BufferTracker {
fn drop(&mut self) {
// Decrement in_flight_count for all buffers in this tracker.
// If this was the last command buffer using a destroyed buffer,
// queue it for deferred destruction (we can't call destroy() directly
// here as it might cause lock recursion).
for buffer in self.metadata.owned_resources() {
let prev_count = buffer.in_flight_count.fetch_sub(1, Ordering::Release);
if prev_count == 1 && buffer.destroyed.load(Ordering::Acquire) {
// This was the last command buffer using this destroyed buffer.
// Queue it for deferred destruction.
buffer.device.deferred_destroy.lock().push(
crate::device::resource::DeferredDestroy::Buffer(Arc::downgrade(buffer)),
);
}
}
}
}

/// Stores all buffer state within a device.
pub(crate) struct DeviceBufferTracker {
current_states: Vec<BufferUses>,
Expand Down
26 changes: 25 additions & 1 deletion wgpu-core/src/track/texture.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ use alloc::{
sync::{Arc, Weak},
vec::{Drain, Vec},
};
use core::iter;
use core::{iter, sync::atomic::Ordering};

impl ResourceUses for TextureUses {
const EXCLUSIVE: Self = Self::EXCLUSIVE;
Expand Down Expand Up @@ -667,6 +667,25 @@ impl TextureTrackerSetSingle for TextureTracker {
}
}

impl Drop for TextureTracker {
fn drop(&mut self) {
// Decrement in_flight_count for all textures in this tracker.
// If this was the last command buffer using a destroyed texture,
// queue it for deferred destruction (we can't call destroy() directly
// here as it might cause lock recursion).
for texture in self.metadata.owned_resources() {
let prev_count = texture.in_flight_count.fetch_sub(1, Ordering::Release);
if prev_count == 1 && texture.destroyed.load(Ordering::Acquire) {
// This was the last command buffer using this destroyed texture.
// Queue it for deferred destruction.
texture.device.deferred_destroy.lock().push(
crate::device::resource::DeferredDestroy::Texture(Arc::downgrade(texture)),
);
}
}
}
}

/// Stores all texture state within a device.
pub(crate) struct DeviceTextureTracker {
current_state_set: TextureStateSet,
Expand Down Expand Up @@ -1038,6 +1057,11 @@ unsafe fn insert_or_barrier_update(
let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };

if !currently_owned {
// Increment the in_flight_count for the texture being inserted.
// This is decremented when the tracker is dropped.
let texture = unsafe { metadata_provider.get(index) };
texture.in_flight_count.fetch_add(1, Ordering::Release);

unsafe {
insert(
Some(texture_selector),
Expand Down
10 changes: 1 addition & 9 deletions wgpu-hal/src/metal/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -323,19 +323,11 @@ struct PrivateDisabilities {
broken_layered_clear_image: bool,
}

#[derive(Debug)]
#[derive(Debug, Default)]
struct Settings {
retain_command_buffer_references: bool,
}

impl Default for Settings {
fn default() -> Self {
Self {
retain_command_buffer_references: true,
}
}
}

struct AdapterShared {
device: Mutex<metal::Device>,
disabilities: PrivateDisabilities,
Expand Down
Loading