| From dhobsong@igel.co.jp Mon Oct 29 00:52:47 2012 |
| From: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Date: Mon, 29 Oct 2012 16:50:41 +0900 |
| Subject: [PATCH v2 24/58] ARM: dma-mapping: implement dma sg methods on top of any generic dma ops |
| To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au |
| Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp |
| Message-ID: <1351497075-32717-25-git-send-email-dhobsong@igel.co.jp> |
| |
| |
| From: Marek Szyprowski <m.szyprowski@samsung.com> |
| |
| This patch converts all dma_sg methods to be generic (independent of the |
| current DMA mapping implementation for ARM architecture). All dma sg |
| operations are now implemented on top of respective |
| dma_map_page/dma_sync_single_for* operations from dma_map_ops structure. |
| |
| Before this patch there were custom methods for all scatter/gather |
| related operations. They iterated over the whole scatter list and called |
| cache related operations directly (which in turn checked if we use dma |
| bounce code or not and called respective version). This patch changes |
| them not to use such shortcut. Instead it provides similar loop over |
| scatter list and calls methods from the device's dma_map_ops structure. |
| This enables us to use device dependent implementations of cache related |
| operations (direct linear or dma bounce) depending on the provided |
| dma_map_ops structure. |
| |
| Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> |
| Acked-by: Kyungmin Park <kyungmin.park@samsung.com> |
| Tested-By: Subash Patel <subash.ramaswamy@linaro.org> |
| (cherry picked from commit 2a550e73d3e5f040a3e8eb733c942ab352eafb36) |
| |
| Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp> |
| Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
| --- |
| arch/arm/mm/dma-mapping.c | 43 +++++++++++++++++++------------------------ |
| 1 file changed, 19 insertions(+), 24 deletions(-) |
| |
| --- a/arch/arm/mm/dma-mapping.c |
| +++ b/arch/arm/mm/dma-mapping.c |
| @@ -860,7 +860,7 @@ void ___dma_page_dev_to_cpu(struct page |
| EXPORT_SYMBOL(___dma_page_dev_to_cpu); |
| |
| /** |
| - * dma_map_sg - map a set of SG buffers for streaming mode DMA |
| + * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA |
| * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| * @sg: list of buffers |
| * @nents: number of buffers to map |
| @@ -878,12 +878,13 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu); |
| int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
| enum dma_data_direction dir, struct dma_attrs *attrs) |
| { |
| + struct dma_map_ops *ops = get_dma_ops(dev); |
| struct scatterlist *s; |
| int i, j; |
| |
| for_each_sg(sg, s, nents, i) { |
| - s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, |
| - s->length, dir); |
| + s->dma_address = ops->map_page(dev, sg_page(s), s->offset, |
| + s->length, dir, attrs); |
| if (dma_mapping_error(dev, s->dma_address)) |
| goto bad_mapping; |
| } |
| @@ -891,12 +892,12 @@ int arm_dma_map_sg(struct device *dev, s |
| |
| bad_mapping: |
| for_each_sg(sg, s, i, j) |
| - __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
| + ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); |
| return 0; |
| } |
| |
| /** |
| - * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
| + * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
| * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| * @sg: list of buffers |
| * @nents: number of buffers to unmap (same as was passed to dma_map_sg) |
| @@ -908,15 +909,17 @@ int arm_dma_map_sg(struct device *dev, s |
| void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
| enum dma_data_direction dir, struct dma_attrs *attrs) |
| { |
| + struct dma_map_ops *ops = get_dma_ops(dev); |
| struct scatterlist *s; |
| + |
| int i; |
| |
| for_each_sg(sg, s, nents, i) |
| - __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
| + ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); |
| } |
| |
| /** |
| - * dma_sync_sg_for_cpu |
| + * arm_dma_sync_sg_for_cpu |
| * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| * @sg: list of buffers |
| * @nents: number of buffers to map (returned from dma_map_sg) |
| @@ -925,21 +928,17 @@ void arm_dma_unmap_sg(struct device *dev |
| void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
| int nents, enum dma_data_direction dir) |
| { |
| + struct dma_map_ops *ops = get_dma_ops(dev); |
| struct scatterlist *s; |
| int i; |
| |
| - for_each_sg(sg, s, nents, i) { |
| - if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), |
| - sg_dma_len(s), dir)) |
| - continue; |
| - |
| - __dma_page_dev_to_cpu(sg_page(s), s->offset, |
| - s->length, dir); |
| - } |
| + for_each_sg(sg, s, nents, i) |
| + ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, |
| + dir); |
| } |
| |
| /** |
| - * dma_sync_sg_for_device |
| + * arm_dma_sync_sg_for_device |
| * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| * @sg: list of buffers |
| * @nents: number of buffers to map (returned from dma_map_sg) |
| @@ -948,17 +947,13 @@ void arm_dma_sync_sg_for_cpu(struct devi |
| void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| int nents, enum dma_data_direction dir) |
| { |
| + struct dma_map_ops *ops = get_dma_ops(dev); |
| struct scatterlist *s; |
| int i; |
| |
| - for_each_sg(sg, s, nents, i) { |
| - if (!dmabounce_sync_for_device(dev, sg_dma_address(s), |
| - sg_dma_len(s), dir)) |
| - continue; |
| - |
| - __dma_page_cpu_to_dev(sg_page(s), s->offset, |
| - s->length, dir); |
| - } |
| + for_each_sg(sg, s, nents, i) |
| + ops->sync_single_for_device(dev, sg_dma_address(s), s->length, |
| + dir); |
| } |
| |
| /* |