Skip to content

Commit

Permalink
Merge pull request #2630 from RossBrunton/ross/v2devirtualevent
Browse files Browse the repository at this point in the history
Don't use inheritence for L0 V2 event handles
RossBrunton authored Jan 29, 2025

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
2 parents 8e6ea72 + a7cd756 commit 0fd6433
Showing 4 changed files with 62 additions and 64 deletions.
65 changes: 36 additions & 29 deletions source/adapters/level_zero/v2/event.cpp
Original file line number Diff line number Diff line change
@@ -87,17 +87,17 @@ uint64_t *event_profiling_data_t::eventEndTimestampAddr() {
return &recordEventEndTimestamp;
}

ur_event_handle_t_::ur_event_handle_t_(ur_context_handle_t hContext,
ze_event_handle_t hZeEvent,
v2::event_flags_t flags)
: hContext(hContext), hZeEvent(hZeEvent), flags(flags),
profilingData(hZeEvent) {}
ur_event_handle_t_::ur_event_handle_t_(
ur_context_handle_t hContext, ur_event_handle_t_::event_variant hZeEvent,
v2::event_flags_t flags, v2::event_pool *pool)
: hContext(hContext), event_pool(pool), hZeEvent(std::move(hZeEvent)),
flags(flags), profilingData(getZeEvent()) {}

void ur_event_handle_t_::resetQueueAndCommand(ur_queue_handle_t hQueue,
ur_command_t commandType) {
this->hQueue = hQueue;
this->commandType = commandType;
profilingData = event_profiling_data_t(hZeEvent);
profilingData = event_profiling_data_t(getZeEvent());
}

void ur_event_handle_t_::recordStartTimestamp() {
@@ -123,13 +123,16 @@ void ur_event_handle_t_::reset() {
// consider make an abstraction for regular/counter based
// events if there's more of this type of conditions
if (!(flags & v2::EVENT_FLAGS_COUNTER)) {
zeEventHostReset(hZeEvent);
zeEventHostReset(getZeEvent());
}
}

ze_event_handle_t ur_event_handle_t_::getZeEvent() const {
assert(hZeEvent);
return hZeEvent;
if (event_pool) {
return std::get<v2::raii::cache_borrowed_event>(hZeEvent).get();
} else {
return std::get<v2::raii::ze_event_handle_t>(hZeEvent).get();
}
}

ur_result_t ur_event_handle_t_::retain() {
@@ -138,7 +141,7 @@ ur_result_t ur_event_handle_t_::retain() {
}

ur_result_t ur_event_handle_t_::releaseDeferred() {
assert(zeEventQueryStatus(hZeEvent) == ZE_RESULT_SUCCESS);
assert(zeEventQueryStatus(getZeEvent()) == ZE_RESULT_SUCCESS);
assert(RefCount.load() == 0);

return this->forceRelease();
@@ -176,7 +179,7 @@ bool ur_event_handle_t_::isProfilingEnabled() const {

std::pair<uint64_t *, ze_event_handle_t>
ur_event_handle_t_::getEventEndTimestampAndHandle() {
return {profilingData.eventEndTimestampAddr(), hZeEvent};
return {profilingData.eventEndTimestampAddr(), getZeEvent()};
}

ur_queue_handle_t ur_event_handle_t_::getQueue() const { return hQueue; }
@@ -185,29 +188,33 @@ ur_context_handle_t ur_event_handle_t_::getContext() const { return hContext; }

ur_command_t ur_event_handle_t_::getCommandType() const { return commandType; }

ur_pooled_event_t::ur_pooled_event_t(
ur_event_handle_t_::ur_event_handle_t_(
ur_context_handle_t hContext,
v2::raii::cache_borrowed_event eventAllocation, v2::event_pool *pool)
: ur_event_handle_t_(hContext, eventAllocation.get(), pool->getFlags()),
zeEvent(std::move(eventAllocation)), pool(pool) {}

ur_result_t ur_pooled_event_t::forceRelease() {
pool->free(this);
return UR_RESULT_SUCCESS;
}
: ur_event_handle_t_(hContext, std::move(eventAllocation), pool->getFlags(),
pool) {}

ur_native_event_t::ur_native_event_t(
ur_native_handle_t hNativeEvent, ur_context_handle_t hContext,
ur_event_handle_t_::ur_event_handle_t_(
ur_context_handle_t hContext, ur_native_handle_t hNativeEvent,
const ur_event_native_properties_t *pProperties)
: ur_event_handle_t_(
hContext,
reinterpret_cast<ze_event_handle_t>(hNativeEvent), v2::EVENT_FLAGS_PROFILING_ENABLED /* TODO: this follows legacy adapter logic, we could check this with zeEventGetPool */),
zeEvent(reinterpret_cast<ze_event_handle_t>(hNativeEvent),
pProperties ? pProperties->isNativeHandleOwned : false) {}

ur_result_t ur_native_event_t::forceRelease() {
zeEvent.release();
delete this;
v2::raii::ze_event_handle_t{
reinterpret_cast<ze_event_handle_t>(hNativeEvent),
pProperties ? pProperties->isNativeHandleOwned : false},
v2::EVENT_FLAGS_PROFILING_ENABLED /* TODO: this follows legacy adapter
logic, we could check this with
zeEventGetPool */
,
nullptr) {}

ur_result_t ur_event_handle_t_::forceRelease() {
if (event_pool) {
event_pool->free(this);
} else {
std::get<v2::raii::ze_event_handle_t>(hZeEvent).release();
delete this;
}
return UR_RESULT_SUCCESS;
}

@@ -389,7 +396,7 @@ urEventCreateWithNativeHandle(ur_native_handle_t hNativeEvent,
*phEvent = hContext->nativeEventsPool.allocate();
ZE2UR_CALL(zeEventHostSignal, ((*phEvent)->getZeEvent()));
} else {
*phEvent = new ur_native_event_t(hNativeEvent, hContext, pProperties);
*phEvent = new ur_event_handle_t_(hContext, hNativeEvent, pProperties);
}
return UR_RESULT_SUCCESS;
} catch (...) {
49 changes: 20 additions & 29 deletions source/adapters/level_zero/v2/event.hpp
Original file line number Diff line number Diff line change
@@ -47,15 +47,24 @@ struct event_profiling_data_t {

struct ur_event_handle_t_ : _ur_object {
public:
ur_event_handle_t_(ur_context_handle_t hContext, ze_event_handle_t hZeEvent,
v2::event_flags_t flags);
// cache_borrowed_event is used for pooled events, whilst ze_event_handle_t is
// used for native events
using event_variant =
std::variant<v2::raii::cache_borrowed_event, v2::raii::ze_event_handle_t>;

ur_event_handle_t_(ur_context_handle_t hContext,
v2::raii::cache_borrowed_event eventAllocation,
v2::event_pool *pool);

ur_event_handle_t_(ur_context_handle_t hContext,
ur_native_handle_t hNativeEvent,
const ur_event_native_properties_t *pProperties);

// Set the queue and command that this event is associated with
void resetQueueAndCommand(ur_queue_handle_t hQueue, ur_command_t commandType);

// releases event immediately
virtual ur_result_t forceRelease() = 0;
virtual ~ur_event_handle_t_() = default;
ur_result_t forceRelease();

void reset();
ze_event_handle_t getZeEvent() const;
@@ -97,11 +106,16 @@ struct ur_event_handle_t_ : _ur_object {
uint64_t getEventStartTimestmap() const;
uint64_t getEventEndTimestamp();

private:
ur_event_handle_t_(ur_context_handle_t hContext, event_variant hZeEvent,
v2::event_flags_t flags, v2::event_pool *pool);

protected:
ur_context_handle_t hContext;

// non-owning handle to the L0 event
const ze_event_handle_t hZeEvent;
// Pool is used if and only if this is a pooled event
v2::event_pool *event_pool = nullptr;
event_variant hZeEvent;

// queue and commandType that this event is associated with, set by enqueue
// commands
@@ -111,26 +125,3 @@ struct ur_event_handle_t_ : _ur_object {
v2::event_flags_t flags;
event_profiling_data_t profilingData;
};

struct ur_pooled_event_t : ur_event_handle_t_ {
ur_pooled_event_t(ur_context_handle_t hContext,
v2::raii::cache_borrowed_event eventAllocation,
v2::event_pool *pool);

ur_result_t forceRelease() override;

private:
v2::raii::cache_borrowed_event zeEvent;
v2::event_pool *pool;
};

struct ur_native_event_t : ur_event_handle_t_ {
ur_native_event_t(ur_native_handle_t hNativeEvent,
ur_context_handle_t hContext,
const ur_event_native_properties_t *pProperties);

ur_result_t forceRelease() override;

private:
v2::raii::ze_event_handle_t zeEvent;
};
4 changes: 2 additions & 2 deletions source/adapters/level_zero/v2/event_pool.cpp
Original file line number Diff line number Diff line change
@@ -17,7 +17,7 @@ namespace v2 {

static constexpr size_t EVENTS_BURST = 64;

ur_pooled_event_t *event_pool::allocate() {
ur_event_handle_t event_pool::allocate() {
TRACK_SCOPE_LATENCY("event_pool::allocate");

std::unique_lock<std::mutex> lock(*mutex);
@@ -42,7 +42,7 @@ ur_pooled_event_t *event_pool::allocate() {
return event;
}

void event_pool::free(ur_pooled_event_t *event) {
void event_pool::free(ur_event_handle_t event) {
TRACK_SCOPE_LATENCY("event_pool::free");

std::unique_lock<std::mutex> lock(*mutex);
8 changes: 4 additions & 4 deletions source/adapters/level_zero/v2/event_pool.hpp
Original file line number Diff line number Diff line change
@@ -41,10 +41,10 @@ class event_pool {
event_pool &operator=(const event_pool &) = delete;

// Allocate an event from the pool. Thread safe.
ur_pooled_event_t *allocate();
ur_event_handle_t allocate();

// Free an event back to the pool. Thread safe.
void free(ur_pooled_event_t *event);
void free(ur_event_handle_t event);

event_provider *getProvider() const;
event_flags_t getFlags() const;
@@ -53,8 +53,8 @@ class event_pool {
ur_context_handle_t hContext;
std::unique_ptr<event_provider> provider;

std::deque<ur_pooled_event_t> events;
std::vector<ur_pooled_event_t *> freelist;
std::deque<ur_event_handle_t_> events;
std::vector<ur_event_handle_t> freelist;

std::unique_ptr<std::mutex> mutex;
};

0 comments on commit 0fd6433

Please sign in to comment.