@@ -14,11 +14,14 @@ struct dma_coherent_mem {
1414 int size ;
1515 int flags ;
1616 unsigned long * bitmap ;
17+ spinlock_t spinlock ;
1718};
1819
19- int dma_declare_coherent_memory (struct device * dev , phys_addr_t phys_addr ,
20- dma_addr_t device_addr , size_t size , int flags )
20+ static int dma_init_coherent_memory (phys_addr_t phys_addr , dma_addr_t device_addr ,
21+ size_t size , int flags ,
22+ struct dma_coherent_mem * * mem )
2123{
24+ struct dma_coherent_mem * dma_mem = NULL ;
2225 void __iomem * mem_base = NULL ;
2326 int pages = size >> PAGE_SHIFT ;
2427 int bitmap_size = BITS_TO_LONGS (pages ) * sizeof (long );
@@ -27,40 +30,77 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
2730 goto out ;
2831 if (!size )
2932 goto out ;
30- if (dev -> dma_mem )
31- goto out ;
32-
33- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
3433
3534 mem_base = ioremap (phys_addr , size );
3635 if (!mem_base )
3736 goto out ;
3837
39- dev -> dma_mem = kzalloc (sizeof (struct dma_coherent_mem ), GFP_KERNEL );
40- if (!dev -> dma_mem )
38+ dma_mem = kzalloc (sizeof (struct dma_coherent_mem ), GFP_KERNEL );
39+ if (!dma_mem )
4140 goto out ;
42- dev -> dma_mem -> bitmap = kzalloc (bitmap_size , GFP_KERNEL );
43- if (!dev -> dma_mem -> bitmap )
44- goto free1_out ;
41+ dma_mem -> bitmap = kzalloc (bitmap_size , GFP_KERNEL );
42+ if (!dma_mem -> bitmap )
43+ goto out ;
44+
45+ dma_mem -> virt_base = mem_base ;
46+ dma_mem -> device_base = device_addr ;
47+ dma_mem -> pfn_base = PFN_DOWN (phys_addr );
48+ dma_mem -> size = pages ;
49+ dma_mem -> flags = flags ;
50+ spin_lock_init (& dma_mem -> spinlock );
4551
46- dev -> dma_mem -> virt_base = mem_base ;
47- dev -> dma_mem -> device_base = device_addr ;
48- dev -> dma_mem -> pfn_base = PFN_DOWN (phys_addr );
49- dev -> dma_mem -> size = pages ;
50- dev -> dma_mem -> flags = flags ;
52+ * mem = dma_mem ;
5153
5254 if (flags & DMA_MEMORY_MAP )
5355 return DMA_MEMORY_MAP ;
5456
5557 return DMA_MEMORY_IO ;
5658
57- free1_out :
58- kfree (dev -> dma_mem );
59- out :
59+ out :
60+ kfree (dma_mem );
6061 if (mem_base )
6162 iounmap (mem_base );
6263 return 0 ;
6364}
65+
66+ static void dma_release_coherent_memory (struct dma_coherent_mem * mem )
67+ {
68+ if (!mem )
69+ return ;
70+ iounmap (mem -> virt_base );
71+ kfree (mem -> bitmap );
72+ kfree (mem );
73+ }
74+
75+ static int dma_assign_coherent_memory (struct device * dev ,
76+ struct dma_coherent_mem * mem )
77+ {
78+ if (dev -> dma_mem )
79+ return - EBUSY ;
80+
81+ dev -> dma_mem = mem ;
82+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
83+
84+ return 0 ;
85+ }
86+
87+ int dma_declare_coherent_memory (struct device * dev , phys_addr_t phys_addr ,
88+ dma_addr_t device_addr , size_t size , int flags )
89+ {
90+ struct dma_coherent_mem * mem ;
91+ int ret ;
92+
93+ ret = dma_init_coherent_memory (phys_addr , device_addr , size , flags ,
94+ & mem );
95+ if (ret == 0 )
96+ return 0 ;
97+
98+ if (dma_assign_coherent_memory (dev , mem ) == 0 )
99+ return ret ;
100+
101+ dma_release_coherent_memory (mem );
102+ return 0 ;
103+ }
64104EXPORT_SYMBOL (dma_declare_coherent_memory );
65105
66106void dma_release_declared_memory (struct device * dev )
@@ -69,26 +109,28 @@ void dma_release_declared_memory(struct device *dev)
69109
70110 if (!mem )
71111 return ;
112+ dma_release_coherent_memory (mem );
72113 dev -> dma_mem = NULL ;
73- iounmap (mem -> virt_base );
74- kfree (mem -> bitmap );
75- kfree (mem );
76114}
77115EXPORT_SYMBOL (dma_release_declared_memory );
78116
79117void * dma_mark_declared_memory_occupied (struct device * dev ,
80118 dma_addr_t device_addr , size_t size )
81119{
82120 struct dma_coherent_mem * mem = dev -> dma_mem ;
121+ unsigned long flags ;
83122 int pos , err ;
84123
85124 size += device_addr & ~PAGE_MASK ;
86125
87126 if (!mem )
88127 return ERR_PTR (- EINVAL );
89128
129+ spin_lock_irqsave (& mem -> spinlock , flags );
90130 pos = (device_addr - mem -> device_base ) >> PAGE_SHIFT ;
91131 err = bitmap_allocate_region (mem -> bitmap , pos , get_order (size ));
132+ spin_unlock_irqrestore (& mem -> spinlock , flags );
133+
92134 if (err != 0 )
93135 return ERR_PTR (err );
94136 return mem -> virt_base + (pos << PAGE_SHIFT );
@@ -115,6 +157,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
115157{
116158 struct dma_coherent_mem * mem ;
117159 int order = get_order (size );
160+ unsigned long flags ;
118161 int pageno ;
119162
120163 if (!dev )
@@ -124,6 +167,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
124167 return 0 ;
125168
126169 * ret = NULL ;
170+ spin_lock_irqsave (& mem -> spinlock , flags );
127171
128172 if (unlikely (size > (mem -> size << PAGE_SHIFT )))
129173 goto err ;
@@ -138,10 +182,12 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
138182 * dma_handle = mem -> device_base + (pageno << PAGE_SHIFT );
139183 * ret = mem -> virt_base + (pageno << PAGE_SHIFT );
140184 memset (* ret , 0 , size );
185+ spin_unlock_irqrestore (& mem -> spinlock , flags );
141186
142187 return 1 ;
143188
144189err :
190+ spin_unlock_irqrestore (& mem -> spinlock , flags );
145191 /*
146192 * In the case where the allocation can not be satisfied from the
147193 * per-device area, try to fall back to generic memory if the
@@ -171,8 +217,11 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
171217 if (mem && vaddr >= mem -> virt_base && vaddr <
172218 (mem -> virt_base + (mem -> size << PAGE_SHIFT ))) {
173219 int page = (vaddr - mem -> virt_base ) >> PAGE_SHIFT ;
220+ unsigned long flags ;
174221
222+ spin_lock_irqsave (& mem -> spinlock , flags );
175223 bitmap_release_region (mem -> bitmap , page , order );
224+ spin_unlock_irqrestore (& mem -> spinlock , flags );
176225 return 1 ;
177226 }
178227 return 0 ;
@@ -218,3 +267,61 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
218267 return 0 ;
219268}
220269EXPORT_SYMBOL (dma_mmap_from_coherent );
270+
271+ /*
272+ * Support for reserved memory regions defined in device tree
273+ */
274+ #ifdef CONFIG_OF_RESERVED_MEM
275+ #include <linux/of.h>
276+ #include <linux/of_fdt.h>
277+ #include <linux/of_reserved_mem.h>
278+
279+ static int rmem_dma_device_init (struct reserved_mem * rmem , struct device * dev )
280+ {
281+ struct dma_coherent_mem * mem = rmem -> priv ;
282+
283+ if (!mem &&
284+ dma_init_coherent_memory (rmem -> base , rmem -> base , rmem -> size ,
285+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE ,
286+ & mem ) != DMA_MEMORY_MAP ) {
287+ pr_err ("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n" ,
288+ & rmem -> base , (unsigned long )rmem -> size / SZ_1M );
289+ return - ENODEV ;
290+ }
291+ rmem -> priv = mem ;
292+ dma_assign_coherent_memory (dev , mem );
293+ return 0 ;
294+ }
295+
296+ static void rmem_dma_device_release (struct reserved_mem * rmem ,
297+ struct device * dev )
298+ {
299+ dev -> dma_mem = NULL ;
300+ }
301+
302+ static const struct reserved_mem_ops rmem_dma_ops = {
303+ .device_init = rmem_dma_device_init ,
304+ .device_release = rmem_dma_device_release ,
305+ };
306+
307+ static int __init rmem_dma_setup (struct reserved_mem * rmem )
308+ {
309+ unsigned long node = rmem -> fdt_node ;
310+
311+ if (of_get_flat_dt_prop (node , "reusable" , NULL ))
312+ return - EINVAL ;
313+
314+ #ifdef CONFIG_ARM
315+ if (!of_get_flat_dt_prop (node , "no-map" , NULL )) {
316+ pr_err ("Reserved memory: regions without no-map are not yet supported\n" );
317+ return - EINVAL ;
318+ }
319+ #endif
320+
321+ rmem -> ops = & rmem_dma_ops ;
322+ pr_info ("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n" ,
323+ & rmem -> base , (unsigned long )rmem -> size / SZ_1M );
324+ return 0 ;
325+ }
326+ RESERVEDMEM_OF_DECLARE (dma , "shared-dma-pool" , rmem_dma_setup );
327+ #endif
0 commit comments