Skip to content

Commit

Permalink
sched/spinlock: remove nesting spinlock support
Browse files Browse the repository at this point in the history
developers must be careful to hold spinlocks and ensure all
of protected code is under control, so remove support for nested
spinlocks to improve performance.

Signed-off-by: chao an <anchao@lixiang.com>
  • Loading branch information
anchao committed Oct 11, 2024
1 parent 38f0056 commit e0778bd
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 33 deletions.
35 changes: 6 additions & 29 deletions include/nuttx/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,6 @@ void sched_note_spinlock_unlock(FAR volatile spinlock_t *spinlock);

extern volatile spinlock_t g_irq_spin;

/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */

extern volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];

/****************************************************************************
* Name: up_testset
*
Expand Down Expand Up @@ -532,14 +528,7 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)

if (NULL == lock)
{
int me = this_cpu();
if (0 == g_irq_spin_count[me])
{
spin_lock_wo_note(&g_irq_spin);
}

g_irq_spin_count[me]++;
DEBUGASSERT(0 != g_irq_spin_count[me]);
spin_lock_wo_note(&g_irq_spin);
}
else
{
Expand All @@ -558,10 +547,7 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
* Description:
* If SMP is enabled:
* If the argument lock is not specified (i.e. NULL),
* disable local interrupts and take the global spinlock (g_irq_spin)
* if the call counter (g_irq_spin_count[cpu]) equals to 0. Then the
* counter on the CPU is incremented to allow nested calls and return
* the interrupt state.
* disable local interrupts and take the global spinlock (g_irq_spin).
*
* If the argument lock is specified,
* disable local interrupts and take the lock spinlock and return
Expand Down Expand Up @@ -699,14 +685,7 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
{
if (NULL == lock)
{
int me = this_cpu();
DEBUGASSERT(0 < g_irq_spin_count[me]);
g_irq_spin_count[me]--;

if (0 == g_irq_spin_count[me])
{
spin_unlock_wo_note(&g_irq_spin);
}
spin_unlock_wo_note(&g_irq_spin);
}
else
{
Expand All @@ -724,11 +703,9 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
*
* Description:
* If SMP is enabled:
* If the argument lock is not specified (i.e. NULL),
* decrement the call counter (g_irq_spin_count[cpu]) and if it
* decrements to zero then release the spinlock (g_irq_spin) and
* restore the interrupt state as it was prior to the previous call to
* spin_lock_irqsave(NULL).
* If the argument lock is not specified (i.e. NULL), release the
* spinlock (g_irq_spin) and restore the interrupt state as it was
* prior to the previous call to spin_lock_irqsave(NULL).
*
* If the argument lock is specified, release the lock and
* restore the interrupt state as it was prior to the previous call to
Expand Down
4 changes: 0 additions & 4 deletions sched/irq/irq_spinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,6 @@

volatile spinlock_t g_irq_spin = SP_UNLOCKED;

/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */

volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];

#ifdef CONFIG_RW_SPINLOCK
/* Used for access control */

Expand Down

0 comments on commit e0778bd

Please sign in to comment.