diff options
author | Krishna Reddy <vdumpa@nvidia.com> | 2014-06-26 17:29:30 -0700 |
---|---|---|
committer | Mandar Padmawar <mpadmawar@nvidia.com> | 2014-06-30 04:08:26 -0700 |
commit | a983aa965a92305360f854362cf770bc82503675 (patch) | |
tree | 8b1cb3503b7abd103e9404fe53cd2ba30be8b5f7 /drivers/base/dma-contiguous.c | |
parent | 7d511f3a70cfe9508a9bd2230d8b20b6c5114689 (diff) |
base: dma-contiguous: add API to specify the start of allocation
Add API dma_alloc_at_from_contiguous to support allocations at
specific phys address.
Bug 200016405
Change-Id: I425a25af3163c391e6b7d9b8bc3299f3ffc7c7c8
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/431949
GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/base/dma-contiguous.c')
-rw-r--r-- | drivers/base/dma-contiguous.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 5e3bf5607746..4645fe3da91b 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -292,24 +292,14 @@ err: return base; } -/** - * dma_alloc_from_contiguous() - allocate pages from contiguous area - * @dev: Pointer to device for which the allocation is performed. - * @count: Requested number of pages. - * @align: Requested alignment of pages (in PAGE_SIZE order). - * - * This function allocates memory buffer for specified device. It uses - * device specific contiguous memory area if available or the default - * global one. Requires architecture specific get_dev_cma_area() helper - * function. - */ -struct page *dma_alloc_from_contiguous(struct device *dev, int count, - unsigned int align) +struct page *dma_alloc_at_from_contiguous(struct device *dev, int count, + unsigned int align, phys_addr_t at_addr) { unsigned long mask, pfn, pageno, start = 0; struct cma *cma = dev_get_cma_area(dev); struct page *page = NULL; int ret; + unsigned long start_pfn = __phys_to_pfn(at_addr); if (!cma || !cma->count) return NULL; @@ -325,12 +315,16 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, mask = (1 << align) - 1; + if (start_pfn && start_pfn < cma->base_pfn) + return NULL; + start = start_pfn ? start_pfn - cma->base_pfn : start; + mutex_lock(&cma_mutex); for (;;) { pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, start, count, mask); - if (pageno >= cma->count) + if (pageno >= cma->count || (start && start != pageno)) break; pfn = cma->base_pfn + pageno; @@ -339,7 +333,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, bitmap_set(cma->bitmap, pageno, count); page = pfn_to_page(pfn); break; - } else if (ret != -EBUSY) { + } else if (ret != -EBUSY || start) { break; } pr_debug("%s(): memory range at %p is busy, retrying\n", @@ -354,6 +348,23 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, } /** + * dma_alloc_from_contiguous() - allocate pages from contiguous area + * @dev: Pointer to device for which the allocation is performed. + * @count: Requested number of pages. + * @align: Requested alignment of pages (in PAGE_SIZE order). + * + * This function allocates memory buffer for specified device. It uses + * device specific contiguous memory area if available or the default + * global one. Requires architecture specific get_dev_cma_area() helper + * function. + */ +struct page *dma_alloc_from_contiguous(struct device *dev, int count, + unsigned int align) +{ + return dma_alloc_at_from_contiguous(dev, count, align, 0); +} + +/** * dma_release_from_contiguous() - release allocated pages * @dev: Pointer to device for which the pages were allocated. * @pages: Allocated pages. |