@@ -79,8 +79,23 @@ struct io_tlb_slot {
7979static bool swiotlb_force_bounce ;
8080static bool swiotlb_force_disable ;
8181
82+ #ifdef CONFIG_SWIOTLB_DYNAMIC
83+
84+ static void swiotlb_dyn_alloc (struct work_struct * work );
85+
86+ static struct io_tlb_mem io_tlb_default_mem = {
87+ .lock = __SPIN_LOCK_UNLOCKED (io_tlb_default_mem .lock ),
88+ .pools = LIST_HEAD_INIT (io_tlb_default_mem .pools ),
89+ .dyn_alloc = __WORK_INITIALIZER (io_tlb_default_mem .dyn_alloc ,
90+ swiotlb_dyn_alloc ),
91+ };
92+
93+ #else /* !CONFIG_SWIOTLB_DYNAMIC */
94+
8295static struct io_tlb_mem io_tlb_default_mem ;
8396
97+ #endif /* CONFIG_SWIOTLB_DYNAMIC */
98+
8499static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT ;
85100static unsigned long default_nareas ;
86101
@@ -278,6 +293,23 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
278293 return ;
279294}
280295
296+ /**
297+ * add_mem_pool() - add a memory pool to the allocator
298+ * @mem: Software IO TLB allocator.
299+ * @pool: Memory pool to be added.
300+ */
301+ static void add_mem_pool (struct io_tlb_mem * mem , struct io_tlb_pool * pool )
302+ {
303+ #ifdef CONFIG_SWIOTLB_DYNAMIC
304+ spin_lock (& mem -> lock );
305+ list_add_rcu (& pool -> node , & mem -> pools );
306+ mem -> nslabs += pool -> nslabs ;
307+ spin_unlock (& mem -> lock );
308+ #else
309+ mem -> nslabs = pool -> nslabs ;
310+ #endif
311+ }
312+
281313static void __init * swiotlb_memblock_alloc (unsigned long nslabs ,
282314 unsigned int flags ,
283315 int (* remap )(void * tlb , unsigned long nslabs ))
@@ -375,7 +407,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
375407
376408 swiotlb_init_io_tlb_pool (mem , __pa (tlb ), nslabs , false,
377409 default_nareas );
378- io_tlb_default_mem . nslabs = nslabs ;
410+ add_mem_pool ( & io_tlb_default_mem , mem ) ;
379411
380412 if (flags & SWIOTLB_VERBOSE )
381413 swiotlb_print_info ();
@@ -474,7 +506,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
474506 (nslabs << IO_TLB_SHIFT ) >> PAGE_SHIFT );
475507 swiotlb_init_io_tlb_pool (mem , virt_to_phys (vstart ), nslabs , true,
476508 nareas );
477- io_tlb_default_mem . nslabs = nslabs ;
509+ add_mem_pool ( & io_tlb_default_mem , mem ) ;
478510
479511 swiotlb_print_info ();
480512 return 0 ;
@@ -625,53 +657,94 @@ static void swiotlb_free_tlb(void *vaddr, size_t bytes)
625657/**
626658 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
627659 * @dev: Device for which a memory pool is allocated.
628- * @nslabs: Desired number of slabs.
660+ * @minslabs: Minimum number of slabs.
661+ * @nslabs: Desired (maximum) number of slabs.
662+ * @nareas: Number of areas.
629663 * @phys_limit: Maximum DMA buffer physical address.
630664 * @gfp: GFP flags for the allocations.
631665 *
632- * Allocate and initialize a new IO TLB memory pool.
666+ * Allocate and initialize a new IO TLB memory pool. The actual number of
667+ * slabs may be reduced if allocation of @nslabs fails. If even
668+ * @minslabs cannot be allocated, this function fails.
633669 *
634670 * Return: New memory pool, or %NULL on allocation failure.
635671 */
636672static struct io_tlb_pool * swiotlb_alloc_pool (struct device * dev ,
637- unsigned int nslabs , u64 phys_limit , gfp_t gfp )
673+ unsigned long minslabs , unsigned long nslabs ,
674+ unsigned int nareas , u64 phys_limit , gfp_t gfp )
638675{
639676 struct io_tlb_pool * pool ;
677+ unsigned int slot_order ;
640678 struct page * tlb ;
641679 size_t pool_size ;
642680 size_t tlb_size ;
643681
644- pool_size = sizeof (* pool ) + array_size (sizeof (* pool -> areas ), 1 ) +
645- array_size (sizeof (* pool -> slots ), nslabs );
682+ pool_size = sizeof (* pool ) + array_size (sizeof (* pool -> areas ), nareas );
646683 pool = kzalloc (pool_size , gfp );
647684 if (!pool )
648685 goto error ;
649686 pool -> areas = (void * )pool + sizeof (* pool );
650- pool -> slots = (void * )pool -> areas + sizeof (* pool -> areas );
651687
652688 tlb_size = nslabs << IO_TLB_SHIFT ;
653- tlb = swiotlb_alloc_tlb (dev , tlb_size , phys_limit , gfp );
654- if (!tlb )
655- goto error_tlb ;
689+ while (!(tlb = swiotlb_alloc_tlb (dev , tlb_size , phys_limit , gfp ))) {
690+ if (nslabs <= minslabs )
691+ goto error_tlb ;
692+ nslabs = ALIGN (nslabs >> 1 , IO_TLB_SEGSIZE );
693+ nareas = limit_nareas (nareas , nslabs );
694+ tlb_size = nslabs << IO_TLB_SHIFT ;
695+ }
656696
657- swiotlb_init_io_tlb_pool (pool , page_to_phys (tlb ), nslabs , true, 1 );
697+ slot_order = get_order (array_size (sizeof (* pool -> slots ), nslabs ));
698+ pool -> slots = (struct io_tlb_slot * )
699+ __get_free_pages (gfp , slot_order );
700+ if (!pool -> slots )
701+ goto error_slots ;
702+
703+ swiotlb_init_io_tlb_pool (pool , page_to_phys (tlb ), nslabs , true, nareas );
658704 return pool ;
659705
706+ error_slots :
707+ swiotlb_free_tlb (page_address (tlb ), tlb_size );
660708error_tlb :
661709 kfree (pool );
662710error :
663711 return NULL ;
664712}
665713
714+ /**
715+ * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
716+ * @work: Pointer to dyn_alloc in struct io_tlb_mem.
717+ */
718+ static void swiotlb_dyn_alloc (struct work_struct * work )
719+ {
720+ struct io_tlb_mem * mem =
721+ container_of (work , struct io_tlb_mem , dyn_alloc );
722+ struct io_tlb_pool * pool ;
723+
724+ pool = swiotlb_alloc_pool (NULL , IO_TLB_MIN_SLABS , default_nslabs ,
725+ default_nareas , mem -> phys_limit , GFP_KERNEL );
726+ if (!pool ) {
727+ pr_warn_ratelimited ("Failed to allocate new pool" );
728+ return ;
729+ }
730+
731+ add_mem_pool (mem , pool );
732+
733+ /* Pairs with smp_rmb() in swiotlb_find_pool(). */
734+ smp_wmb ();
735+ }
736+
666737/**
667738 * swiotlb_dyn_free() - RCU callback to free a memory pool
668739 * @rcu: RCU head in the corresponding struct io_tlb_pool.
669740 */
670741static void swiotlb_dyn_free (struct rcu_head * rcu )
671742{
672743 struct io_tlb_pool * pool = container_of (rcu , struct io_tlb_pool , rcu );
744+ size_t slots_size = array_size (sizeof (* pool -> slots ), pool -> nslabs );
673745 size_t tlb_size = pool -> end - pool -> start ;
674746
747+ free_pages ((unsigned long )pool -> slots , get_order (slots_size ));
675748 swiotlb_free_tlb (pool -> vaddr , tlb_size );
676749 kfree (pool );
677750}
@@ -689,15 +762,19 @@ static void swiotlb_dyn_free(struct rcu_head *rcu)
689762struct io_tlb_pool * swiotlb_find_pool (struct device * dev , phys_addr_t paddr )
690763{
691764 struct io_tlb_mem * mem = dev -> dma_io_tlb_mem ;
692- struct io_tlb_pool * pool = & mem -> defpool ;
693-
694- if (paddr >= pool -> start && paddr < pool -> end )
695- return pool ;
765+ struct io_tlb_pool * pool ;
696766
697- /* Pairs with smp_wmb() in swiotlb_find_slots(). */
767+ /* Pairs with smp_wmb() in swiotlb_find_slots() and
768+ * swiotlb_dyn_alloc(), which modify the RCU lists.
769+ */
698770 smp_rmb ();
699771
700772 rcu_read_lock ();
773+ list_for_each_entry_rcu (pool , & mem -> pools , node ) {
774+ if (paddr >= pool -> start && paddr < pool -> end )
775+ goto out ;
776+ }
777+
701778 list_for_each_entry_rcu (pool , & dev -> dma_io_tlb_pools , node ) {
702779 if (paddr >= pool -> start && paddr < pool -> end )
703780 goto out ;
@@ -1046,18 +1123,24 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
10461123 u64 phys_limit ;
10471124 int index ;
10481125
1049- pool = & mem -> defpool ;
1050- index = swiotlb_pool_find_slots (dev , pool , orig_addr ,
1051- alloc_size , alloc_align_mask );
1052- if (index >= 0 )
1053- goto found ;
1054-
1126+ rcu_read_lock ();
1127+ list_for_each_entry_rcu (pool , & mem -> pools , node ) {
1128+ index = swiotlb_pool_find_slots (dev , pool , orig_addr ,
1129+ alloc_size , alloc_align_mask );
1130+ if (index >= 0 ) {
1131+ rcu_read_unlock ();
1132+ goto found ;
1133+ }
1134+ }
1135+ rcu_read_unlock ();
10551136 if (!mem -> can_grow )
10561137 return -1 ;
10571138
1139+ schedule_work (& mem -> dyn_alloc );
1140+
10581141 nslabs = nr_slots (alloc_size );
10591142 phys_limit = min_not_zero (* dev -> dma_mask , dev -> bus_dma_limit );
1060- pool = swiotlb_alloc_pool (dev , nslabs , phys_limit ,
1143+ pool = swiotlb_alloc_pool (dev , nslabs , nslabs , 1 , phys_limit ,
10611144 GFP_NOWAIT | __GFP_NOWARN );
10621145 if (!pool )
10631146 return -1 ;
@@ -1141,7 +1224,19 @@ static unsigned long mem_pool_used(struct io_tlb_pool *pool)
11411224 */
11421225static unsigned long mem_used (struct io_tlb_mem * mem )
11431226{
1227+ #ifdef CONFIG_SWIOTLB_DYNAMIC
1228+ struct io_tlb_pool * pool ;
1229+ unsigned long used = 0 ;
1230+
1231+ rcu_read_lock ();
1232+ list_for_each_entry_rcu (pool , & mem -> pools , node )
1233+ used += mem_pool_used (pool );
1234+ rcu_read_unlock ();
1235+
1236+ return used ;
1237+ #else
11441238 return mem_pool_used (& mem -> defpool );
1239+ #endif
11451240}
11461241
11471242#endif /* CONFIG_DEBUG_FS */
@@ -1562,7 +1657,10 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
15621657 false, nareas );
15631658 mem -> force_bounce = true;
15641659 mem -> for_alloc = true;
1565- mem -> nslabs = nslabs ;
1660+ #ifdef CONFIG_SWIOTLB_DYNAMIC
1661+ spin_lock_init (& mem -> lock );
1662+ #endif
1663+ add_mem_pool (mem , pool );
15661664
15671665 rmem -> priv = mem ;
15681666
0 commit comments