@@ -606,7 +606,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
606606 * by this look, but we want to avoid concurrent calls for performance
607607 * reasons and to make the pcpu_get_vm_areas more deterministic.
608608 */
609- static DEFINE_SPINLOCK (vmap_purge_lock );
609+ static DEFINE_MUTEX (vmap_purge_lock );
610610
611611/* for per-CPU blocks */
612612static void purge_fragmented_blocks_allcpus (void );
@@ -660,9 +660,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
660660 */
661661static void try_purge_vmap_area_lazy (void )
662662{
663- if (spin_trylock (& vmap_purge_lock )) {
663+ if (mutex_trylock (& vmap_purge_lock )) {
664664 __purge_vmap_area_lazy (ULONG_MAX , 0 );
665- spin_unlock (& vmap_purge_lock );
665+ mutex_unlock (& vmap_purge_lock );
666666 }
667667}
668668
@@ -671,10 +671,10 @@ static void try_purge_vmap_area_lazy(void)
671671 */
672672static void purge_vmap_area_lazy (void )
673673{
674- spin_lock (& vmap_purge_lock );
674+ mutex_lock (& vmap_purge_lock );
675675 purge_fragmented_blocks_allcpus ();
676676 __purge_vmap_area_lazy (ULONG_MAX , 0 );
677- spin_unlock (& vmap_purge_lock );
677+ mutex_unlock (& vmap_purge_lock );
678678}
679679
680680/*
@@ -1063,11 +1063,11 @@ void vm_unmap_aliases(void)
10631063 rcu_read_unlock ();
10641064 }
10651065
1066- spin_lock (& vmap_purge_lock );
1066+ mutex_lock (& vmap_purge_lock );
10671067 purge_fragmented_blocks_allcpus ();
10681068 if (!__purge_vmap_area_lazy (start , end ) && flush )
10691069 flush_tlb_kernel_range (start , end );
1070- spin_unlock (& vmap_purge_lock );
1070+ mutex_unlock (& vmap_purge_lock );
10711071}
10721072EXPORT_SYMBOL_GPL (vm_unmap_aliases );
10731073
0 commit comments