Skip to content

Commit

Permalink
mm: replace spin_lock_irqrestore with raw_spin_lock_irqrestore
Browse files Browse the repository at this point in the history
reason:
raw_spin_lock_irqrestore has better performance.

Signed-off-by: hujun5 <[email protected]>
  • Loading branch information
hujun260 committed Jan 26, 2025
1 parent 0dbd482 commit 4514d7f
Show file tree
Hide file tree
Showing 8 changed files with 46 additions and 46 deletions.
14 changes: 7 additions & 7 deletions mm/iob/iob_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
* to protect the committed list: We disable interrupts very briefly.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);

/* Take the I/O buffer from the head of the committed list */

Expand All @@ -98,7 +98,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
iob->io_pktlen = 0; /* Total length of the packet */
}

spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
return iob;
}

Expand Down Expand Up @@ -173,7 +173,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
* we are waiting for I/O buffers to become free.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);

/* Try to get an I/O buffer */

Expand All @@ -191,7 +191,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
g_iob_count--;
}

spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);

if (timeout == UINT_MAX)
{
Expand All @@ -217,7 +217,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
return iob;
}

spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
return iob;
}

Expand Down Expand Up @@ -304,9 +304,9 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
* to protect the free list: We disable interrupts very briefly.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);
iob = iob_tryalloc_internal(throttled);
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
return iob;
}

Expand Down
14 changes: 7 additions & 7 deletions mm/iob/iob_alloc_qentry.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
* to protect the committed list: We disable interrupts very briefly.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);

/* Take the I/O buffer from the head of the committed list */

Expand All @@ -75,7 +75,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
iobq->qe_head = NULL; /* Nothing is contained */
}

spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
return iobq;
}

Expand Down Expand Up @@ -127,7 +127,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
* re-enabled while we are waiting for I/O buffers to become free.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);

/* Try to get an I/O buffer chain container. */

Expand All @@ -139,7 +139,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
*/

g_qentry_wait++;
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
ret = nxsem_wait_uninterruptible(&g_qentry_sem);
if (ret >= 0)
{
Expand All @@ -156,7 +156,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
return qentry;
}

spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);

return qentry;
}
Expand Down Expand Up @@ -212,9 +212,9 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
* to protect the free list: We disable interrupts very briefly.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);
iobq = iob_tryalloc_qentry_internal();
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
return iobq;
}

Expand Down
8 changes: 4 additions & 4 deletions mm/iob/iob_free.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
* interrupts very briefly.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);

/* Which list? If there is a task waiting for an IOB, then put
* the IOB on either the free list or on the committed list where
Expand All @@ -146,7 +146,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
g_iob_count++;
iob->io_flink = g_iob_committed;
g_iob_committed = iob;
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
nxsem_post(&g_iob_sem);
}
#if CONFIG_IOB_THROTTLE > 0
Expand All @@ -155,7 +155,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
iob->io_flink = g_iob_committed;
g_iob_committed = iob;
g_throttle_wait--;
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
nxsem_post(&g_throttle_sem);
}
#endif
Expand All @@ -164,7 +164,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
g_iob_count++;
iob->io_flink = g_iob_freelist;
g_iob_freelist = iob;
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
}

DEBUGASSERT(g_iob_count <= CONFIG_IOB_NBUFFERS);
Expand Down
6 changes: 3 additions & 3 deletions mm/iob/iob_free_qentry.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
* interrupts very briefly.
*/

flags = spin_lock_irqsave(&g_iob_lock);
flags = raw_spin_lock_irqsave(&g_iob_lock);

/* Which list? If there is a task waiting for an IOB chain, then put
* the IOB chain on either the free list or on the committed list where
Expand All @@ -73,14 +73,14 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
iobq->qe_flink = g_iob_qcommitted;
g_iob_qcommitted = iobq;
g_qentry_wait--;
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
nxsem_post(&g_qentry_sem);
}
else
{
iobq->qe_flink = g_iob_freeqlist;
g_iob_freeqlist = iobq;
spin_unlock_irqrestore(&g_iob_lock, flags);
raw_spin_unlock_irqrestore(&g_iob_lock, flags);
}

/* And return the I/O buffer chain container after the one that was freed */
Expand Down
12 changes: 6 additions & 6 deletions mm/kasan/generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ static void kasan_set_poison(FAR const void *addr, size_t size,
mask = KASAN_FIRST_WORD_MASK(bit);
size /= KASAN_SHADOW_SCALE;

flags = spin_lock_irqsave(&g_lock);
flags = raw_spin_lock_irqsave(&g_lock);
while (size >= nbit)
{
if (poisoned)
Expand Down Expand Up @@ -197,7 +197,7 @@ static void kasan_set_poison(FAR const void *addr, size_t size,
}
}

spin_unlock_irqrestore(&g_lock, flags);
raw_spin_unlock_irqrestore(&g_lock, flags);
}

/****************************************************************************
Expand Down Expand Up @@ -231,12 +231,12 @@ void kasan_register(FAR void *addr, FAR size_t *size)
region->begin = (uintptr_t)addr;
region->end = region->begin + *size;

flags = spin_lock_irqsave(&g_lock);
flags = raw_spin_lock_irqsave(&g_lock);

DEBUGASSERT(g_region_count <= CONFIG_MM_KASAN_REGIONS);
g_region[g_region_count++] = region;

spin_unlock_irqrestore(&g_lock, flags);
raw_spin_unlock_irqrestore(&g_lock, flags);

kasan_start();
kasan_poison(addr, *size);
Expand All @@ -248,7 +248,7 @@ void kasan_unregister(FAR void *addr)
irqstate_t flags;
size_t i;

flags = spin_lock_irqsave(&g_lock);
flags = raw_spin_lock_irqsave(&g_lock);
for (i = 0; i < g_region_count; i++)
{
if (g_region[i]->begin == (uintptr_t)addr)
Expand All @@ -260,5 +260,5 @@ void kasan_unregister(FAR void *addr)
}
}

spin_unlock_irqrestore(&g_lock, flags);
raw_spin_unlock_irqrestore(&g_lock, flags);
}
12 changes: 6 additions & 6 deletions mm/kasan/sw_tags.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,14 @@ static void kasan_set_poison(FAR const void *addr,
}

size = KASAN_SHADOW_SIZE(size);
flags = spin_lock_irqsave(&g_lock);
flags = raw_spin_lock_irqsave(&g_lock);

while (size--)
{
p[size] = value;
}

spin_unlock_irqrestore(&g_lock, flags);
raw_spin_unlock_irqrestore(&g_lock, flags);
}

/****************************************************************************
Expand Down Expand Up @@ -186,12 +186,12 @@ void kasan_register(FAR void *addr, FAR size_t *size)
region->begin = (uintptr_t)addr;
region->end = region->begin + *size;

flags = spin_lock_irqsave(&g_lock);
flags = raw_spin_lock_irqsave(&g_lock);

DEBUGASSERT(g_region_count <= CONFIG_MM_KASAN_REGIONS);
g_region[g_region_count++] = region;

spin_unlock_irqrestore(&g_lock, flags);
raw_spin_unlock_irqrestore(&g_lock, flags);

kasan_start();
kasan_poison(addr, *size);
Expand All @@ -203,7 +203,7 @@ void kasan_unregister(FAR void *addr)
irqstate_t flags;
size_t i;

flags = spin_lock_irqsave(&g_lock);
flags = raw_spin_lock_irqsave(&g_lock);
for (i = 0; i < g_region_count; i++)
{
if (g_region[i]->begin == (uintptr_t)addr)
Expand All @@ -215,5 +215,5 @@ void kasan_unregister(FAR void *addr)
}
}

spin_unlock_irqrestore(&g_lock, flags);
raw_spin_unlock_irqrestore(&g_lock, flags);
}
22 changes: 11 additions & 11 deletions mm/mempool/mempool.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
irqstate_t flags;

retry:
flags = spin_lock_irqsave(&pool->lock);
flags = raw_spin_lock_irqsave(&pool->lock);
blk = mempool_remove_queue(pool, &pool->queue);
if (blk == NULL)
{
Expand All @@ -354,15 +354,15 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
blk = mempool_remove_queue(pool, &pool->iqueue);
if (blk == NULL)
{
spin_unlock_irqrestore(&pool->lock, flags);
raw_spin_unlock_irqrestore(&pool->lock, flags);
return blk;
}
}
else
{
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);

spin_unlock_irqrestore(&pool->lock, flags);
raw_spin_unlock_irqrestore(&pool->lock, flags);
if (pool->expandsize >= blocksize + sizeof(sq_entry_t))
{
size_t nexpand = (pool->expandsize - sizeof(sq_entry_t)) /
Expand All @@ -376,7 +376,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
}

kasan_poison(base, size);
flags = spin_lock_irqsave(&pool->lock);
flags = raw_spin_lock_irqsave(&pool->lock);
mempool_add_queue(pool, &pool->queue,
base, nexpand, blocksize);
sq_addlast((FAR sq_entry_t *)(base + nexpand * blocksize),
Expand All @@ -396,7 +396,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
}

pool->nalloc++;
spin_unlock_irqrestore(&pool->lock, flags);
raw_spin_unlock_irqrestore(&pool->lock, flags);

#if CONFIG_MM_BACKTRACE >= 0
mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *)
Expand Down Expand Up @@ -424,7 +424,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)

void mempool_release(FAR struct mempool_s *pool, FAR void *blk)
{
irqstate_t flags = spin_lock_irqsave(&pool->lock);
irqstate_t flags = raw_spin_lock_irqsave(&pool->lock);
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
#if CONFIG_MM_BACKTRACE >= 0
FAR struct mempool_backtrace_s *buf =
Expand Down Expand Up @@ -461,7 +461,7 @@ void mempool_release(FAR struct mempool_s *pool, FAR void *blk)
}

kasan_poison(blk, pool->blocksize);
spin_unlock_irqrestore(&pool->lock, flags);
raw_spin_unlock_irqrestore(&pool->lock, flags);
if (pool->wait && pool->expandsize == 0)
{
int semcount;
Expand Down Expand Up @@ -495,13 +495,13 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info)

DEBUGASSERT(pool != NULL && info != NULL);

flags = spin_lock_irqsave(&pool->lock);
flags = raw_spin_lock_irqsave(&pool->lock);
info->ordblks = sq_count(&pool->queue);
info->iordblks = sq_count(&pool->iqueue);
info->aordblks = pool->nalloc;
info->arena = sq_count(&pool->equeue) * sizeof(sq_entry_t) +
(info->aordblks + info->ordblks + info->iordblks) * blocksize;
spin_unlock_irqrestore(&pool->lock, flags);
raw_spin_unlock_irqrestore(&pool->lock, flags);
info->sizeblks = blocksize;
if (pool->wait && pool->expandsize == 0)
{
Expand Down Expand Up @@ -534,11 +534,11 @@ mempool_info_task(FAR struct mempool_s *pool,

if (task->pid == PID_MM_FREE)
{
irqstate_t flags = spin_lock_irqsave(&pool->lock);
irqstate_t flags = raw_spin_lock_irqsave(&pool->lock);
size_t count = sq_count(&pool->queue) +
sq_count(&pool->iqueue);

spin_unlock_irqrestore(&pool->lock, flags);
raw_spin_unlock_irqrestore(&pool->lock, flags);
info.aordblks += count;
info.uordblks += count * blocksize;
}
Expand Down
4 changes: 2 additions & 2 deletions mm/mm_gran/mm_grancritical.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
int gran_enter_critical(FAR struct gran_s *priv)
{
#ifdef CONFIG_GRAN_INTR
priv->irqstate = spin_lock_irqsave(&priv->lock);
priv->irqstate = raw_spin_lock_irqsave(&priv->lock);
return OK;
#else
return nxmutex_lock(&priv->lock);
Expand All @@ -69,7 +69,7 @@ int gran_enter_critical(FAR struct gran_s *priv)
void gran_leave_critical(FAR struct gran_s *priv)
{
#ifdef CONFIG_GRAN_INTR
spin_unlock_irqrestore(&priv->lock, priv->irqstate);
raw_spin_unlock_irqrestore(&priv->lock, priv->irqstate);
#else
nxmutex_unlock(&priv->lock);
#endif
Expand Down

0 comments on commit 4514d7f

Please sign in to comment.