Skip to content

Commit 33ac408

Browse files
airliedheftig
authored andcommitted
nouveau: rip out fence irq allow/block sequences.
fences are signalled on nvidia hw using non-stall interrupts. non-stall interrupts are not latched from my reading. When nouveau emits a fence, it requests a NON_STALL signalling, but it only calls the interface to allow the non-stall irq to happen after it has already emitted the fence. A recent change eacabb5 ("nouveau: push event block/allowing out of the fence context") made this worse by pushing out the fence allow/block to a workqueue. However I can't see how this could ever work great, since when enable signalling is called, the semaphore has already been emitted to the ring, and the hw could already have tried to set the bits, but it's been masked off. Changing the allowed mask later won't make the interrupt get called again. For now rip all of this out. This fixes a bunch of stalls seen running VK CTS sync tests. Signed-off-by: Dave Airlie <airlied@redhat.com>
1 parent 2aa75b0 commit 33ac408

File tree

2 files changed

+16
-63
lines changed

2 files changed

+16
-63
lines changed

drivers/gpu/drm/nouveau/nouveau_fence.c

Lines changed: 16 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -50,24 +50,14 @@ nouveau_fctx(struct nouveau_fence *fence)
5050
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
5151
}
5252

53-
static int
53+
static void
5454
nouveau_fence_signal(struct nouveau_fence *fence)
5555
{
56-
int drop = 0;
57-
5856
dma_fence_signal_locked(&fence->base);
5957
list_del(&fence->head);
6058
rcu_assign_pointer(fence->channel, NULL);
6159

62-
if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
63-
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
64-
65-
if (atomic_dec_and_test(&fctx->notify_ref))
66-
drop = 1;
67-
}
68-
6960
dma_fence_put(&fence->base);
70-
return drop;
7161
}
7262

7363
static struct nouveau_fence *
@@ -93,8 +83,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
9383
if (error)
9484
dma_fence_set_error(&fence->base, error);
9585

96-
if (nouveau_fence_signal(fence))
97-
nvif_event_block(&fctx->event);
86+
nouveau_fence_signal(fence);
9887
}
9988
fctx->killed = 1;
10089
spin_unlock_irqrestore(&fctx->lock, flags);
@@ -103,8 +92,8 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
10392
void
10493
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
10594
{
106-
cancel_work_sync(&fctx->allow_block_work);
10795
nouveau_fence_context_kill(fctx, 0);
96+
nvif_event_block(&fctx->event);
10897
nvif_event_dtor(&fctx->event);
10998
fctx->dead = 1;
11099

@@ -127,11 +116,10 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
127116
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
128117
}
129118

130-
static int
119+
static void
131120
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
132121
{
133122
struct nouveau_fence *fence;
134-
int drop = 0;
135123
u32 seq = fctx->read(chan);
136124

137125
while (!list_empty(&fctx->pending)) {
@@ -140,10 +128,8 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc
140128
if ((int)(seq - fence->base.seqno) < 0)
141129
break;
142130

143-
drop |= nouveau_fence_signal(fence);
131+
nouveau_fence_signal(fence);
144132
}
145-
146-
return drop;
147133
}
148134

149135
static int
@@ -160,26 +146,13 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
160146

161147
fence = list_entry(fctx->pending.next, typeof(*fence), head);
162148
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
163-
if (nouveau_fence_update(chan, fctx))
164-
ret = NVIF_EVENT_DROP;
149+
nouveau_fence_update(chan, fctx);
165150
}
166151
spin_unlock_irqrestore(&fctx->lock, flags);
167152

168153
return ret;
169154
}
170155

171-
static void
172-
nouveau_fence_work_allow_block(struct work_struct *work)
173-
{
174-
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
175-
allow_block_work);
176-
177-
if (atomic_read(&fctx->notify_ref) == 0)
178-
nvif_event_block(&fctx->event);
179-
else
180-
nvif_event_allow(&fctx->event);
181-
}
182-
183156
void
184157
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
185158
{
@@ -191,7 +164,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
191164
} args;
192165
int ret;
193166

194-
INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
195167
INIT_LIST_HEAD(&fctx->flip);
196168
INIT_LIST_HEAD(&fctx->pending);
197169
spin_lock_init(&fctx->lock);
@@ -216,6 +188,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
216188
&args.base, sizeof(args), &fctx->event);
217189

218190
WARN_ON(ret);
191+
192+
/*
193+
* Always allow non-stall irq events - previously this code tried to
194+
* enable/disable them, but that just seems racy as nonstall irqs are unlatched.
195+
*/
196+
nvif_event_allow(&fctx->event);
219197
}
220198

221199
int
@@ -247,8 +225,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
247225
return -ENODEV;
248226
}
249227

250-
if (nouveau_fence_update(chan, fctx))
251-
nvif_event_block(&fctx->event);
228+
nouveau_fence_update(chan, fctx);
252229

253230
list_add_tail(&fence->head, &fctx->pending);
254231
spin_unlock_irq(&fctx->lock);
@@ -271,8 +248,8 @@ nouveau_fence_done(struct nouveau_fence *fence)
271248

272249
spin_lock_irqsave(&fctx->lock, flags);
273250
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
274-
if (chan && nouveau_fence_update(chan, fctx))
275-
nvif_event_block(&fctx->event);
251+
if (chan)
252+
nouveau_fence_update(chan, fctx);
276253
spin_unlock_irqrestore(&fctx->lock, flags);
277254
}
278255
return dma_fence_is_signaled(&fence->base);
@@ -530,32 +507,10 @@ static const struct dma_fence_ops nouveau_fence_ops_legacy = {
530507
.release = nouveau_fence_release
531508
};
532509

533-
static bool nouveau_fence_enable_signaling(struct dma_fence *f)
534-
{
535-
struct nouveau_fence *fence = from_fence(f);
536-
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
537-
bool ret;
538-
bool do_work;
539-
540-
if (atomic_inc_return(&fctx->notify_ref) == 0)
541-
do_work = true;
542-
543-
ret = nouveau_fence_no_signaling(f);
544-
if (ret)
545-
set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
546-
else if (atomic_dec_and_test(&fctx->notify_ref))
547-
do_work = true;
548-
549-
if (do_work)
550-
schedule_work(&fctx->allow_block_work);
551-
552-
return ret;
553-
}
554-
555510
static const struct dma_fence_ops nouveau_fence_ops_uevent = {
556511
.get_driver_name = nouveau_fence_get_get_driver_name,
557512
.get_timeline_name = nouveau_fence_get_timeline_name,
558-
.enable_signaling = nouveau_fence_enable_signaling,
513+
.enable_signaling = nouveau_fence_no_signaling,
559514
.signaled = nouveau_fence_is_signaled,
560515
.release = nouveau_fence_release
561516
};

drivers/gpu/drm/nouveau/nouveau_fence.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ struct nouveau_fence_chan {
4646
char name[32];
4747

4848
struct nvif_event event;
49-
struct work_struct allow_block_work;
50-
atomic_t notify_ref;
5149
int dead, killed;
5250
};
5351

0 commit comments

Comments
 (0)