summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-11-29 14:03:30 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-01-27 16:03:09 -0500
commit69a07f0b117a40fcc1a479358d8e1f41793617f2 (patch)
treefc3b827b9a9c7898b35d5cffd5995e56805cc4d8 /drivers/gpu/drm/ttm
parentf9820a46dd7888b05a36e81166fb1abcc47dcc3f (diff)
ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set.
For pages that have the TTM_PAGE_FLAG_DMA32 flag set we use the DMA API. We save the bus address in our array which we use to program the GART (see "radeon/ttm/PCIe: Use dma_addr if TTM has set it." and "nouveau/ttm/PCIe: Use dma_addr if TTM has set it."). The reason behind using the DMA API is that under Xen we would end up programming the GART with the bounce buffer (SWIOTLB) DMA address instead of the physical DMA address of the TTM page. The reason being that alloc_page with GFP_DMA32 does not allocate pages under the the 4GB mark when running under Xen hypervisor. Under baremetal this means we do the DMA API call earlier instead of when we program the GART. For details please refer to: https://lkml.org/lkml/2011/1/7/251 [v2: Fixed indentation, revised desc, added Reviewed-by] Reviewed-by: Thomas Hellstrom <thomas@shipmail.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Ian Campbell <ian.campbell@citrix.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 9d9d92945f8c..737a2a2e46a5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -683,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
- p = alloc_page(gfp_flags);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr;
+ addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_address[r],
+ gfp_flags);
+ if (addr == NULL)
+ return -ENOMEM;
+ p = virt_to_page(addr);
+ } else
+ p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
-
list_add(&p->lru, pages);
}
return 0;
@@ -738,12 +746,24 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
+ unsigned r;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
+ r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) {
- __free_page(p);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr = page_address(p);
+ WARN_ON(!addr || !dma_address[r]);
+ if (addr)
+ dma_free_coherent(NULL, PAGE_SIZE,
+ addr,
+ dma_address[r]);
+ dma_address[r] = 0;
+ } else
+ __free_page(p);
+ r--;
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);