diff --git a/mm/iob/iob_alloc.c b/mm/iob/iob_alloc.c
index a0fb94ec523d9..893ec8cfb26f8 100644
--- a/mm/iob/iob_alloc.c
+++ b/mm/iob/iob_alloc.c
@@ -79,7 +79,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
    * to protect the committed list:  We disable interrupts very briefly.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
 
   /* Take the I/O buffer from the head of the committed list */
 
@@ -98,7 +98,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
       iob->io_pktlen = 0;    /* Total length of the packet */
     }
 
-  spin_unlock_irqrestore(&g_iob_lock, flags);
+  raw_spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
@@ -173,7 +173,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
    * we are waiting for I/O buffers to become free.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
 
   /* Try to get an I/O buffer */
 
@@ -191,7 +191,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
           g_iob_count--;
         }
 
-      spin_unlock_irqrestore(&g_iob_lock, flags);
+      raw_spin_unlock_irqrestore(&g_iob_lock, flags);
 
       if (timeout == UINT_MAX)
         {
@@ -217,7 +217,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
       return iob;
     }
 
-  spin_unlock_irqrestore(&g_iob_lock, flags);
+  raw_spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
@@ -304,9 +304,9 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
    * to protect the free list:  We disable interrupts very briefly.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
   iob = iob_tryalloc_internal(throttled);
-  spin_unlock_irqrestore(&g_iob_lock, flags);
+  raw_spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
diff --git a/mm/iob/iob_alloc_qentry.c b/mm/iob/iob_alloc_qentry.c
index d4ae2fff13793..3eccdcd2c977d 100644
--- a/mm/iob/iob_alloc_qentry.c
+++ b/mm/iob/iob_alloc_qentry.c
@@ -59,7 +59,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
    * to protect the committed list:  We disable interrupts very briefly.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
 
   /* Take the I/O buffer from the head of the committed list */
 
@@ -75,7 +75,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
       iobq->qe_head = NULL; /* Nothing is contained */
     }
 
-  spin_unlock_irqrestore(&g_iob_lock, flags);
+  raw_spin_unlock_irqrestore(&g_iob_lock, flags);
   return iobq;
 }
 
@@ -127,7 +127,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
    * re-enabled while we are waiting for I/O buffers to become free.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
 
   /* Try to get an I/O buffer chain container. */
 
@@ -139,7 +139,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
        */
 
       g_qentry_wait++;
-      spin_unlock_irqrestore(&g_iob_lock, flags);
+      raw_spin_unlock_irqrestore(&g_iob_lock, flags);
       ret = nxsem_wait_uninterruptible(&g_qentry_sem);
       if (ret >= 0)
         {
@@ -156,7 +156,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
       return qentry;
     }
 
-  spin_unlock_irqrestore(&g_iob_lock, flags);
+  raw_spin_unlock_irqrestore(&g_iob_lock, flags);
 
   return qentry;
 }
@@ -212,9 +212,9 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
    * to protect the free list:  We disable interrupts very briefly.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
   iobq = iob_tryalloc_qentry_internal();
-  spin_unlock_irqrestore(&g_iob_lock, flags);
+  raw_spin_unlock_irqrestore(&g_iob_lock, flags);
   return iobq;
 }
 
diff --git a/mm/iob/iob_free.c b/mm/iob/iob_free.c
index 86af4304d26f3..07b21c1c401b3 100644
--- a/mm/iob/iob_free.c
+++ b/mm/iob/iob_free.c
@@ -132,7 +132,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
    * interrupts very briefly.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
 
   /* Which list?  If there is a task waiting for an IOB, then put
    * the IOB on either the free list or on the committed list where
@@ -146,7 +146,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
       g_iob_count++;
       iob->io_flink   = g_iob_committed;
       g_iob_committed = iob;
-      spin_unlock_irqrestore(&g_iob_lock, flags);
+      raw_spin_unlock_irqrestore(&g_iob_lock, flags);
       nxsem_post(&g_iob_sem);
     }
 #if CONFIG_IOB_THROTTLE > 0
@@ -155,7 +155,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
       iob->io_flink   = g_iob_committed;
       g_iob_committed = iob;
       g_throttle_wait--;
-      spin_unlock_irqrestore(&g_iob_lock, flags);
+      raw_spin_unlock_irqrestore(&g_iob_lock, flags);
       nxsem_post(&g_throttle_sem);
     }
 #endif
@@ -164,7 +164,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
       g_iob_count++;
       iob->io_flink   = g_iob_freelist;
       g_iob_freelist  = iob;
-      spin_unlock_irqrestore(&g_iob_lock, flags);
+      raw_spin_unlock_irqrestore(&g_iob_lock, flags);
     }
 
   DEBUGASSERT(g_iob_count <= CONFIG_IOB_NBUFFERS);
diff --git a/mm/iob/iob_free_qentry.c b/mm/iob/iob_free_qentry.c
index 489cb88449215..6ec91dba686db 100644
--- a/mm/iob/iob_free_qentry.c
+++ b/mm/iob/iob_free_qentry.c
@@ -60,7 +60,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
    * interrupts very briefly.
    */
 
-  flags = spin_lock_irqsave(&g_iob_lock);
+  flags = raw_spin_lock_irqsave(&g_iob_lock);
 
   /* Which list?  If there is a task waiting for an IOB chain, then put
    * the IOB chain on either the free list or on the committed list where
@@ -73,14 +73,14 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
       iobq->qe_flink   = g_iob_qcommitted;
       g_iob_qcommitted = iobq;
       g_qentry_wait--;
-      spin_unlock_irqrestore(&g_iob_lock, flags);
+      raw_spin_unlock_irqrestore(&g_iob_lock, flags);
       nxsem_post(&g_qentry_sem);
     }
   else
     {
       iobq->qe_flink   = g_iob_freeqlist;
       g_iob_freeqlist  = iobq;
-      spin_unlock_irqrestore(&g_iob_lock, flags);
+      raw_spin_unlock_irqrestore(&g_iob_lock, flags);
     }
 
   /* And return the I/O buffer chain container after the one that was freed */
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index b5b6d3ee56153..21db5d57e3b14 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -165,7 +165,7 @@ static void kasan_set_poison(FAR const void *addr, size_t size,
   mask = KASAN_FIRST_WORD_MASK(bit);
   size /= KASAN_SHADOW_SCALE;
 
-  flags = spin_lock_irqsave(&g_lock);
+  flags = raw_spin_lock_irqsave(&g_lock);
   while (size >= nbit)
     {
       if (poisoned)
@@ -197,7 +197,7 @@ static void kasan_set_poison(FAR const void *addr, size_t size,
         }
     }
 
-  spin_unlock_irqrestore(&g_lock, flags);
+  raw_spin_unlock_irqrestore(&g_lock, flags);
 }
 
 /****************************************************************************
@@ -231,12 +231,12 @@ void kasan_register(FAR void *addr, FAR size_t *size)
   region->begin = (uintptr_t)addr;
   region->end   = region->begin + *size;
 
-  flags = spin_lock_irqsave(&g_lock);
+  flags = raw_spin_lock_irqsave(&g_lock);
 
   DEBUGASSERT(g_region_count <= CONFIG_MM_KASAN_REGIONS);
   g_region[g_region_count++] = region;
 
-  spin_unlock_irqrestore(&g_lock, flags);
+  raw_spin_unlock_irqrestore(&g_lock, flags);
 
   kasan_start();
   kasan_poison(addr, *size);
@@ -248,7 +248,7 @@ void kasan_unregister(FAR void *addr)
   irqstate_t flags;
   size_t i;
 
-  flags = spin_lock_irqsave(&g_lock);
+  flags = raw_spin_lock_irqsave(&g_lock);
   for (i = 0; i < g_region_count; i++)
     {
       if (g_region[i]->begin == (uintptr_t)addr)
@@ -260,5 +260,5 @@ void kasan_unregister(FAR void *addr)
         }
     }
 
-  spin_unlock_irqrestore(&g_lock, flags);
+  raw_spin_unlock_irqrestore(&g_lock, flags);
 }
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index d1ba3bf73a535..fca989db462d2 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -142,14 +142,14 @@ static void kasan_set_poison(FAR const void *addr,
     }
 
   size = KASAN_SHADOW_SIZE(size);
-  flags = spin_lock_irqsave(&g_lock);
+  flags = raw_spin_lock_irqsave(&g_lock);
 
   while (size--)
     {
       p[size] = value;
     }
 
-  spin_unlock_irqrestore(&g_lock, flags);
+  raw_spin_unlock_irqrestore(&g_lock, flags);
 }
 
 /****************************************************************************
@@ -186,12 +186,12 @@ void kasan_register(FAR void *addr, FAR size_t *size)
   region->begin = (uintptr_t)addr;
   region->end   = region->begin + *size;
 
-  flags = spin_lock_irqsave(&g_lock);
+  flags = raw_spin_lock_irqsave(&g_lock);
 
   DEBUGASSERT(g_region_count <= CONFIG_MM_KASAN_REGIONS);
   g_region[g_region_count++] = region;
 
-  spin_unlock_irqrestore(&g_lock, flags);
+  raw_spin_unlock_irqrestore(&g_lock, flags);
 
   kasan_start();
   kasan_poison(addr, *size);
@@ -203,7 +203,7 @@ void kasan_unregister(FAR void *addr)
   irqstate_t flags;
   size_t i;
 
-  flags = spin_lock_irqsave(&g_lock);
+  flags = raw_spin_lock_irqsave(&g_lock);
   for (i = 0; i < g_region_count; i++)
     {
       if (g_region[i]->begin == (uintptr_t)addr)
@@ -215,5 +215,5 @@ void kasan_unregister(FAR void *addr)
         }
     }
 
-  spin_unlock_irqrestore(&g_lock, flags);
+  raw_spin_unlock_irqrestore(&g_lock, flags);
 }
diff --git a/mm/mempool/mempool.c b/mm/mempool/mempool.c
index 89b3f80e24f06..f7a411fd5bd85 100644
--- a/mm/mempool/mempool.c
+++ b/mm/mempool/mempool.c
@@ -345,7 +345,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
   irqstate_t flags;
 
 retry:
-  flags = spin_lock_irqsave(&pool->lock);
+  flags = raw_spin_lock_irqsave(&pool->lock);
   blk = mempool_remove_queue(pool, &pool->queue);
   if (blk == NULL)
     {
@@ -354,7 +354,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
           blk = mempool_remove_queue(pool, &pool->iqueue);
           if (blk == NULL)
             {
-              spin_unlock_irqrestore(&pool->lock, flags);
+              raw_spin_unlock_irqrestore(&pool->lock, flags);
               return blk;
             }
         }
@@ -362,7 +362,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
         {
           size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
 
-          spin_unlock_irqrestore(&pool->lock, flags);
+          raw_spin_unlock_irqrestore(&pool->lock, flags);
           if (pool->expandsize >= blocksize + sizeof(sq_entry_t))
             {
               size_t nexpand = (pool->expandsize - sizeof(sq_entry_t)) /
@@ -376,7 +376,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
                 }
 
               kasan_poison(base, size);
-              flags = spin_lock_irqsave(&pool->lock);
+              flags = raw_spin_lock_irqsave(&pool->lock);
               mempool_add_queue(pool, &pool->queue,
                                 base, nexpand, blocksize);
               sq_addlast((FAR sq_entry_t *)(base + nexpand * blocksize),
@@ -396,7 +396,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
     }
 
   pool->nalloc++;
-  spin_unlock_irqrestore(&pool->lock, flags);
+  raw_spin_unlock_irqrestore(&pool->lock, flags);
 
 #if CONFIG_MM_BACKTRACE >= 0
   mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *)
@@ -424,7 +424,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool)
 
 void mempool_release(FAR struct mempool_s *pool, FAR void *blk)
 {
-  irqstate_t flags = spin_lock_irqsave(&pool->lock);
+  irqstate_t flags = raw_spin_lock_irqsave(&pool->lock);
   size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
 #if CONFIG_MM_BACKTRACE >= 0
   FAR struct mempool_backtrace_s *buf =
@@ -461,7 +461,7 @@ void mempool_release(FAR struct mempool_s *pool, FAR void *blk)
     }
 
   kasan_poison(blk, pool->blocksize);
-  spin_unlock_irqrestore(&pool->lock, flags);
+  raw_spin_unlock_irqrestore(&pool->lock, flags);
   if (pool->wait && pool->expandsize == 0)
     {
       int semcount;
@@ -495,13 +495,13 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info)
 
   DEBUGASSERT(pool != NULL && info != NULL);
 
-  flags = spin_lock_irqsave(&pool->lock);
+  flags = raw_spin_lock_irqsave(&pool->lock);
   info->ordblks = sq_count(&pool->queue);
   info->iordblks = sq_count(&pool->iqueue);
   info->aordblks = pool->nalloc;
   info->arena = sq_count(&pool->equeue) * sizeof(sq_entry_t) +
     (info->aordblks + info->ordblks + info->iordblks) * blocksize;
-  spin_unlock_irqrestore(&pool->lock, flags);
+  raw_spin_unlock_irqrestore(&pool->lock, flags);
   info->sizeblks = blocksize;
   if (pool->wait && pool->expandsize == 0)
     {
@@ -534,11 +534,11 @@ mempool_info_task(FAR struct mempool_s *pool,
 
   if (task->pid == PID_MM_FREE)
     {
-      irqstate_t flags = spin_lock_irqsave(&pool->lock);
+      irqstate_t flags = raw_spin_lock_irqsave(&pool->lock);
       size_t count = sq_count(&pool->queue) +
                      sq_count(&pool->iqueue);
 
-      spin_unlock_irqrestore(&pool->lock, flags);
+      raw_spin_unlock_irqrestore(&pool->lock, flags);
       info.aordblks += count;
       info.uordblks += count * blocksize;
     }
diff --git a/mm/mm_gran/mm_grancritical.c b/mm/mm_gran/mm_grancritical.c
index 06481e1de06f4..4d853e85138fc 100644
--- a/mm/mm_gran/mm_grancritical.c
+++ b/mm/mm_gran/mm_grancritical.c
@@ -59,7 +59,7 @@
 int gran_enter_critical(FAR struct gran_s *priv)
 {
 #ifdef CONFIG_GRAN_INTR
-  priv->irqstate = spin_lock_irqsave(&priv->lock);
+  priv->irqstate = raw_spin_lock_irqsave(&priv->lock);
   return OK;
 #else
   return nxmutex_lock(&priv->lock);
@@ -69,7 +69,7 @@ int gran_enter_critical(FAR struct gran_s *priv)
 void gran_leave_critical(FAR struct gran_s *priv)
 {
 #ifdef CONFIG_GRAN_INTR
-  spin_unlock_irqrestore(&priv->lock, priv->irqstate);
+  raw_spin_unlock_irqrestore(&priv->lock, priv->irqstate);
 #else
   nxmutex_unlock(&priv->lock);
 #endif