| From dhobsong@igel.co.jp Mon Oct 29 00:52:50 2012 |
| From: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Date: Mon, 29 Oct 2012 16:50:42 +0900 |
| Subject: [PATCH v2 25/58] ARM: dma-mapping: move all dma bounce code to separate dma ops structure |
| To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au |
| Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp |
| Message-ID: <1351497075-32717-26-git-send-email-dhobsong@igel.co.jp> |
| |
| |
| From: Marek Szyprowski <m.szyprowski@samsung.com> |
| |
| This patch removes dma bounce hooks from the common dma mapping |
| implementation on ARM architecture and creates a separate set of |
| dma_map_ops for dma bounce devices. |
| |
| Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> |
| Acked-by: Kyungmin Park <kyungmin.park@samsung.com> |
| Tested-By: Subash Patel <subash.ramaswamy@linaro.org> |
| (cherry picked from commit 15237e1f505b3e5c2276f240b01cd2133e110cbc) |
| |
| Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
| --- |
| arch/arm/common/dmabounce.c | 62 ++++++++++++++++++----- |
| arch/arm/include/asm/dma-mapping.h | 99 ------------------------------------- |
| arch/arm/mm/dma-mapping.c | 79 +++++++++++++++++++++++++---- |
| 3 files changed, 120 insertions(+), 120 deletions(-) |
| |
| --- a/arch/arm/common/dmabounce.c |
| +++ b/arch/arm/common/dmabounce.c |
| @@ -308,8 +308,9 @@ static inline void unmap_single(struct d |
| * substitute the safe buffer for the unsafe one. |
| * (basically move the buffer from an unsafe area to a safe one) |
| */ |
| -dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
| - unsigned long offset, size_t size, enum dma_data_direction dir) |
| +static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, |
| + unsigned long offset, size_t size, enum dma_data_direction dir, |
| + struct dma_attrs *attrs) |
| { |
| dma_addr_t dma_addr; |
| int ret; |
| @@ -324,7 +325,7 @@ dma_addr_t __dma_map_page(struct device |
| return DMA_ERROR_CODE; |
| |
| if (ret == 0) { |
| - __dma_page_cpu_to_dev(page, offset, size, dir); |
| + arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); |
| return dma_addr; |
| } |
| |
| @@ -335,7 +336,6 @@ dma_addr_t __dma_map_page(struct device |
| |
| return map_single(dev, page_address(page) + offset, size, dir); |
| } |
| -EXPORT_SYMBOL(__dma_map_page); |
| |
| /* |
| * see if a mapped address was really a "safe" buffer and if so, copy |
| @@ -343,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page); |
| * the safe buffer. (basically return things back to the way they |
| * should be) |
| */ |
| -void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
| - enum dma_data_direction dir) |
| +static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
| + enum dma_data_direction dir, struct dma_attrs *attrs) |
| { |
| struct safe_buffer *buf; |
| |
| @@ -353,16 +353,14 @@ void __dma_unmap_page(struct device *dev |
| |
| buf = find_safe_buffer_dev(dev, dma_addr, __func__); |
| if (!buf) { |
| - __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), |
| - dma_addr & ~PAGE_MASK, size, dir); |
| + arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir); |
| return; |
| } |
| |
| unmap_single(dev, buf, size, dir); |
| } |
| -EXPORT_SYMBOL(__dma_unmap_page); |
| |
| -int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
| +static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
| size_t sz, enum dma_data_direction dir) |
| { |
| struct safe_buffer *buf; |
| @@ -392,9 +390,17 @@ int dmabounce_sync_for_cpu(struct device |
| } |
| return 0; |
| } |
| -EXPORT_SYMBOL(dmabounce_sync_for_cpu); |
| |
| -int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, |
| +static void dmabounce_sync_for_cpu(struct device *dev, |
| + dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| +{ |
| + if (!__dmabounce_sync_for_cpu(dev, handle, size, dir)) |
| + return; |
| + |
| + arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir); |
| +} |
| + |
| +static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, |
| size_t sz, enum dma_data_direction dir) |
| { |
| struct safe_buffer *buf; |
| @@ -424,7 +430,35 @@ int dmabounce_sync_for_device(struct dev |
| } |
| return 0; |
| } |
| -EXPORT_SYMBOL(dmabounce_sync_for_device); |
| + |
| +static void dmabounce_sync_for_device(struct device *dev, |
| + dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| +{ |
| + if (!__dmabounce_sync_for_device(dev, handle, size, dir)) |
| + return; |
| + |
| + arm_dma_ops.sync_single_for_device(dev, handle, size, dir); |
| +} |
| + |
| +static int dmabounce_set_mask(struct device *dev, u64 dma_mask) |
| +{ |
| + if (dev->archdata.dmabounce) |
| + return 0; |
| + |
| + return arm_dma_ops.set_dma_mask(dev, dma_mask); |
| +} |
| + |
| +static struct dma_map_ops dmabounce_ops = { |
| + .map_page = dmabounce_map_page, |
| + .unmap_page = dmabounce_unmap_page, |
| + .sync_single_for_cpu = dmabounce_sync_for_cpu, |
| + .sync_single_for_device = dmabounce_sync_for_device, |
| + .map_sg = arm_dma_map_sg, |
| + .unmap_sg = arm_dma_unmap_sg, |
| + .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, |
| + .sync_sg_for_device = arm_dma_sync_sg_for_device, |
| + .set_dma_mask = dmabounce_set_mask, |
| +}; |
| |
| static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, |
| const char *name, unsigned long size) |
| @@ -486,6 +520,7 @@ int dmabounce_register_dev(struct device |
| #endif |
| |
| dev->archdata.dmabounce = device_info; |
| + set_dma_ops(dev, &dmabounce_ops); |
| |
| dev_info(dev, "dmabounce: registered device\n"); |
| |
| @@ -504,6 +539,7 @@ void dmabounce_unregister_dev(struct dev |
| struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
| |
| dev->archdata.dmabounce = NULL; |
| + set_dma_ops(dev, NULL); |
| |
| if (!device_info) { |
| dev_warn(dev, |
| --- a/arch/arm/include/asm/dma-mapping.h |
| +++ b/arch/arm/include/asm/dma-mapping.h |
| @@ -85,62 +85,6 @@ static inline dma_addr_t virt_to_dma(str |
| #endif |
| |
| /* |
| - * The DMA API is built upon the notion of "buffer ownership". A buffer |
| - * is either exclusively owned by the CPU (and therefore may be accessed |
| - * by it) or exclusively owned by the DMA device. These helper functions |
| - * represent the transitions between these two ownership states. |
| - * |
| - * Note, however, that on later ARMs, this notion does not work due to |
| - * speculative prefetches. We model our approach on the assumption that |
| - * the CPU does do speculative prefetches, which means we clean caches |
| - * before transfers and delay cache invalidation until transfer completion. |
| - * |
| - * Private support functions: these are not part of the API and are |
| - * liable to change. Drivers must not use these. |
| - */ |
| -static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, |
| - enum dma_data_direction dir) |
| -{ |
| - extern void ___dma_single_cpu_to_dev(const void *, size_t, |
| - enum dma_data_direction); |
| - |
| - if (!arch_is_coherent()) |
| - ___dma_single_cpu_to_dev(kaddr, size, dir); |
| -} |
| - |
| -static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, |
| - enum dma_data_direction dir) |
| -{ |
| - extern void ___dma_single_dev_to_cpu(const void *, size_t, |
| - enum dma_data_direction); |
| - |
| - if (!arch_is_coherent()) |
| - ___dma_single_dev_to_cpu(kaddr, size, dir); |
| -} |
| - |
| -static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, |
| - size_t size, enum dma_data_direction dir) |
| -{ |
| - extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, |
| - size_t, enum dma_data_direction); |
| - |
| - if (!arch_is_coherent()) |
| - ___dma_page_cpu_to_dev(page, off, size, dir); |
| -} |
| - |
| -static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, |
| - size_t size, enum dma_data_direction dir) |
| -{ |
| - extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, |
| - size_t, enum dma_data_direction); |
| - |
| - if (!arch_is_coherent()) |
| - ___dma_page_dev_to_cpu(page, off, size, dir); |
| -} |
| - |
| -extern int dma_supported(struct device *, u64); |
| -extern int dma_set_mask(struct device *, u64); |
| -/* |
| * DMA errors are defined by all-bits-set in the DMA address. |
| */ |
| static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| @@ -163,6 +107,8 @@ static inline void dma_free_noncoherent( |
| { |
| } |
| |
| +extern int dma_supported(struct device *dev, u64 mask); |
| + |
| /** |
| * dma_alloc_coherent - allocate consistent memory for DMA |
| * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| @@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device |
| extern void __init init_consistent_dma_size(unsigned long size); |
| |
| |
| -#ifdef CONFIG_DMABOUNCE |
| /* |
| * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" |
| * and utilize bounce buffers as needed to work around limited DMA windows. |
| @@ -275,48 +220,8 @@ extern int dmabounce_register_dev(struct |
| */ |
| extern void dmabounce_unregister_dev(struct device *); |
| |
| -/* |
| - * The DMA API, implemented by dmabounce.c. See below for descriptions. |
| - */ |
| -extern dma_addr_t __dma_map_page(struct device *, struct page *, |
| - unsigned long, size_t, enum dma_data_direction); |
| -extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, |
| - enum dma_data_direction); |
| - |
| -/* |
| - * Private functions |
| - */ |
| -int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction); |
| -int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction); |
| -#else |
| -static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, |
| - size_t size, enum dma_data_direction dir) |
| -{ |
| - return 1; |
| -} |
| - |
| -static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, |
| - size_t size, enum dma_data_direction dir) |
| -{ |
| - return 1; |
| -} |
| |
| |
| -static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
| - unsigned long offset, size_t size, enum dma_data_direction dir) |
| -{ |
| - __dma_page_cpu_to_dev(page, offset, size, dir); |
| - return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
| -} |
| - |
| -static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, |
| - size_t size, enum dma_data_direction dir) |
| -{ |
| - __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), |
| - handle & ~PAGE_MASK, size, dir); |
| -} |
| -#endif /* CONFIG_DMABOUNCE */ |
| - |
| /* |
| * The scatter list versions of the above methods. |
| */ |
| --- a/arch/arm/mm/dma-mapping.c |
| +++ b/arch/arm/mm/dma-mapping.c |
| @@ -34,6 +34,75 @@ |
| |
| #include "mm.h" |
| |
| +/* |
| + * The DMA API is built upon the notion of "buffer ownership". A buffer |
| + * is either exclusively owned by the CPU (and therefore may be accessed |
| + * by it) or exclusively owned by the DMA device. These helper functions |
| + * represent the transitions between these two ownership states. |
| + * |
| + * Note, however, that on later ARMs, this notion does not work due to |
| + * speculative prefetches. We model our approach on the assumption that |
| + * the CPU does do speculative prefetches, which means we clean caches |
| + * before transfers and delay cache invalidation until transfer completion. |
| + * |
| + * Private support functions: these are not part of the API and are |
| + * liable to change. Drivers must not use these. |
| + */ |
| +static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, |
| + enum dma_data_direction dir) |
| +{ |
| + extern void ___dma_single_cpu_to_dev(const void *, size_t, |
| + enum dma_data_direction); |
| + |
| + if (!arch_is_coherent()) |
| + ___dma_single_cpu_to_dev(kaddr, size, dir); |
| +} |
| + |
| +static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, |
| + enum dma_data_direction dir) |
| +{ |
| + extern void ___dma_single_dev_to_cpu(const void *, size_t, |
| + enum dma_data_direction); |
| + |
| + if (!arch_is_coherent()) |
| + ___dma_single_dev_to_cpu(kaddr, size, dir); |
| +} |
| + |
| +static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, |
| + size_t size, enum dma_data_direction dir) |
| +{ |
| + extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, |
| + size_t, enum dma_data_direction); |
| + |
| + if (!arch_is_coherent()) |
| + ___dma_page_cpu_to_dev(page, off, size, dir); |
| +} |
| + |
| +static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, |
| + size_t size, enum dma_data_direction dir) |
| +{ |
| + extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, |
| + size_t, enum dma_data_direction); |
| + |
| + if (!arch_is_coherent()) |
| + ___dma_page_dev_to_cpu(page, off, size, dir); |
| +} |
| + |
| + |
| +static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
| + unsigned long offset, size_t size, enum dma_data_direction dir) |
| +{ |
| + __dma_page_cpu_to_dev(page, offset, size, dir); |
| + return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
| +} |
| + |
| +static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, |
| + size_t size, enum dma_data_direction dir) |
| +{ |
| + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), |
| + handle & ~PAGE_MASK, size, dir); |
| +} |
| + |
| /** |
| * arm_dma_map_page - map a portion of a page for streaming DMA |
| * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| @@ -81,9 +150,6 @@ static inline void arm_dma_sync_single_f |
| { |
| unsigned int offset = handle & (PAGE_SIZE - 1); |
| struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
| - if (!dmabounce_sync_for_cpu(dev, handle, size, dir)) |
| - return; |
| - |
| __dma_page_dev_to_cpu(page, offset, size, dir); |
| } |
| |
| @@ -92,9 +158,6 @@ static inline void arm_dma_sync_single_f |
| { |
| unsigned int offset = handle & (PAGE_SIZE - 1); |
| struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
| - if (!dmabounce_sync_for_device(dev, handle, size, dir)) |
| - return; |
| - |
| __dma_page_cpu_to_dev(page, offset, size, dir); |
| } |
| |
| @@ -837,7 +900,6 @@ void ___dma_page_cpu_to_dev(struct page |
| } |
| /* FIXME: non-speculating: flush on bidirectional mappings? */ |
| } |
| -EXPORT_SYMBOL(___dma_page_cpu_to_dev); |
| |
| void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, |
| size_t size, enum dma_data_direction dir) |
| @@ -857,7 +919,6 @@ void ___dma_page_dev_to_cpu(struct page |
| if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) |
| set_bit(PG_dcache_clean, &page->flags); |
| } |
| -EXPORT_SYMBOL(___dma_page_dev_to_cpu); |
| |
| /** |
| * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA |
| @@ -975,9 +1036,7 @@ static int arm_dma_set_mask(struct devic |
| if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
| return -EIO; |
| |
| -#ifndef CONFIG_DMABOUNCE |
| *dev->dma_mask = dma_mask; |
| -#endif |
| |
| return 0; |
| } |