blob: bc256b64102710039e208ec82c513e97b223a290 [file] [log] [blame]
/*
* Coherent per-device memory handling.
* Borrowed from i386
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
phys_addr_t pfn_base;
int size;
int flags;
unsigned long *bitmap;
};
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
goto out;
if (!size)
goto out;
if (dev->dma_mem)
goto out;
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
mem_base = ioremap(bus_addr, size);
if (!mem_base)
goto out;
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
if (!dev->dma_mem)
goto out;
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!dev->dma_mem->bitmap)
goto free1_out;
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
if (flags & DMA_MEMORY_MAP)
return DMA_MEMORY_MAP;
return DMA_MEMORY_IO;
free1_out:
kfree(dev->dma_mem);
out:
if (mem_base)
iounmap(mem_base);
return 0;
}
EXPORT_SYMBOL(dma_declare_coherent_memory);
void dma_release_declared_memory(struct device *dev)
{
struct dma_coherent_mem *mem = dev->dma_mem;
if (!mem)
return;
dev->dma_mem = NULL;
iounmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
EXPORT_SYMBOL(dma_release_declared_memory);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem = dev->dma_mem;
int pos, err;
size += device_addr & ~PAGE_MASK;
if (!mem)
return ERR_PTR(-EINVAL);
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
if (err != 0)
return ERR_PTR(err);
return mem->virt_base + (pos << PAGE_SHIFT);
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
/**
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
*
* @dev: device from which we allocate memory
* @size: size of requested memory area
* @dma_handle: This will be filled with the correct dma handle
* @ret: This pointer will be filled with the virtual address
* to allocated area.
*
* This function should be only called from per-arch dma_alloc_coherent()
* to support allocation from per-device coherent memory pools.
*
* Returns 0 if dma_alloc_coherent should continue with allocating from
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
{
struct dma_coherent_mem *mem;
int order = get_order(size);
int pageno;
if (!dev)
return 0;
mem = dev->dma_mem;
if (!mem)
return 0;
*ret = NULL;
if (unlikely(size > (mem->size << PAGE_SHIFT)))
goto err;
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
if (unlikely(pageno < 0))
goto err;
/*
* Memory was found in the per-device area.
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
memset(*ret, 0, size);
return 1;
err:
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
* constraints allow it.
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
EXPORT_SYMBOL(dma_alloc_from_coherent);
/**
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
* @dev: device from which the memory was allocated
* @order: the order of pages allocated
* @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, releases that memory.
*
* Returns 1 if we correctly released the memory, or 0 if
* dma_release_coherent() should proceed with releasing memory from
* generic pools.
*/
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
bitmap_release_region(mem->bitmap, page, order);
return 1;
}
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
/**
* dma_mmap_from_coherent() - try to mmap the memory allocated from
* per-device coherent memory pool to userspace
* @dev: device from which the memory was allocated
* @vma: vm_area for the userspace memory
* @vaddr: cpu address returned by dma_alloc_from_coherent
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
* @ret: result from remap_pfn_range()
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
* proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int count = size >> PAGE_SHIFT;
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
unsigned pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(dma_mmap_from_coherent);