summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c575
1 files changed, 331 insertions, 244 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30dc4b42..2683377f4131 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
/*
* NV10-NV40 tiling helpers
*/
static void
-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_mem_update_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+ int i = tile - dev_priv->tile.reg;
+ unsigned long save;
- tile->addr = addr;
- tile->size = size;
- tile->used = !!pitch;
- nouveau_fence_unref((void **)&tile->fence);
+ nouveau_fence_unref(&tile->fence);
+ if (tile->pitch)
+ pfb->free_tile_region(dev, i);
+
+ if (pitch)
+ pfb->init_tile_region(dev, i, addr, size, pitch, flags);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, save);
pfifo->reassign(dev, false);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
- pgraph->set_region_tiling(dev, i, addr, size, pitch);
- pfb->set_region_tiling(dev, i, addr, size, pitch);
+ pfb->set_tile_region(dev, i);
+ pgraph->set_tile_region(dev, i);
pfifo->cache_pull(dev, true);
pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
-struct nouveau_tile_reg *
-nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
- uint32_t pitch)
+static struct nouveau_tile_reg *
+nv10_mem_get_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_tile_reg *found = NULL;
- unsigned long i, flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- for (i = 0; i < pfb->num_tiles; i++) {
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+ spin_lock(&dev_priv->tile.lock);
- if (tile->used)
- /* Tile region in use. */
- continue;
-
- if (tile->fence &&
- !nouveau_fence_signalled(tile->fence, NULL))
- /* Pending tile region. */
- continue;
-
- if (max(tile->addr, addr) <
- min(tile->addr + tile->size, addr + size))
- /* Kill an intersecting tile region. */
- nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
-
- if (pitch && !found) {
- /* Free tile region. */
- nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
- found = tile;
- }
- }
-
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ if (!tile->used &&
+ (!tile->fence || nouveau_fence_signalled(tile->fence)))
+ tile->used = true;
+ else
+ tile = NULL;
- return found;
+ spin_unlock(&dev_priv->tile.lock);
+ return tile;
}
void
-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence)
-{
- if (fence) {
- /* Mark it as pending. */
- tile->fence = fence;
- nouveau_fence_ref(fence);
- }
-
- tile->used = false;
-}
-
-/*
- * NV50 VM helpers
- */
-int
-nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
- uint32_t flags, uint64_t phys)
+nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned block;
- int i;
-
- virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
- size = (size >> 16) << 1;
-
- phys |= ((uint64_t)flags << 32);
- phys |= 1;
- if (dev_priv->vram_sys_base) {
- phys += dev_priv->vram_sys_base;
- phys |= 0x30;
- }
- while (size) {
- unsigned offset_h = upper_32_bits(phys);
- unsigned offset_l = lower_32_bits(phys);
- unsigned pte, end;
-
- for (i = 7; i >= 0; i--) {
- block = 1 << (i + 1);
- if (size >= block && !(virt & (block - 1)))
- break;
- }
- offset_l |= (i << 7);
-
- phys += block << 15;
- size -= block;
-
- while (block) {
- pgt = dev_priv->vm_vram_pt[virt >> 14];
- pte = virt & 0x3ffe;
-
- end = pte + block;
- if (end > 16384)
- end = 16384;
- block -= (end - pte);
- virt += (end - pte);
-
- while (pte < end) {
- nv_wo32(pgt, (pte * 4) + 0, offset_l);
- nv_wo32(pgt, (pte * 4) + 4, offset_h);
- pte += 2;
- }
+ if (tile) {
+ spin_lock(&dev_priv->tile.lock);
+ if (fence) {
+ /* Mark it as pending. */
+ tile->fence = fence;
+ nouveau_fence_ref(fence);
}
- }
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
- return 0;
+ tile->used = false;
+ spin_unlock(&dev_priv->tile.lock);
+ }
}
-void
-nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned pages, pte, end;
-
- virt -= dev_priv->vm_vram_base;
- pages = (size >> 16) << 1;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_tile_reg *tile, *found = NULL;
+ int i;
- while (pages) {
- pgt = dev_priv->vm_vram_pt[virt >> 29];
- pte = (virt & 0x1ffe0000ULL) >> 15;
+ for (i = 0; i < pfb->num_tiles; i++) {
+ tile = nv10_mem_get_tile_region(dev, i);
- end = pte + pages;
- if (end > 16384)
- end = 16384;
- pages -= (end - pte);
- virt += (end - pte) << 15;
+ if (pitch && !found) {
+ found = tile;
+ continue;
- while (pte < end) {
- nv_wo32(pgt, (pte * 4), 0);
- pte++;
+ } else if (tile && tile->pitch) {
+ /* Kill an unused tile region. */
+ nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
}
+
+ nv10_mem_put_tile_region(dev, tile, NULL);
}
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
+ if (found)
+ nv10_mem_update_tile_region(dev, found, addr, size,
+ pitch, flags);
+ return found;
}
/*
@@ -223,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -312,62 +240,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
-static void
-nv50_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru;
-
- r0 = nv_rd32(dev, 0x100200);
- r4 = nv_rd32(dev, 0x100204);
- rt = nv_rd32(dev, 0x100250);
- ru = nv_rd32(dev, 0x001540);
- NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = ((r4 & 0x01000000) ? 8 : 4);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != dev_priv->vram_size) {
- NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
- (u32)(dev_priv->vram_size >> 20));
- NV_WARN(dev, "we calculated %dMiB VRAM\n",
- (u32)(predicted >> 20));
- }
-
- dev_priv->vram_rblock_size = rowsize >> 12;
- if (rt & 1)
- dev_priv->vram_rblock_size *= 3;
-
- NV_DEBUG(dev, "rblock %lld bytes\n",
- (u64)dev_priv->vram_rblock_size << 12);
-}
-
-static void
-nvaa_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /* To our knowledge, there's no large scale reordering of pages
- * that occurs on IGP chipsets.
- */
- dev_priv->vram_rblock_size = 1;
-}
-
-static int
+int
nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,33 +254,6 @@ nouveau_mem_detect(struct drm_device *dev)
if (dev_priv->card_type < NV_50) {
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
- } else
- if (dev_priv->card_type < NV_C0) {
- dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
- dev_priv->vram_size &= 0xffffffff00ll;
-
- switch (dev_priv->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf:
- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
- dev_priv->vram_sys_base <<= 12;
- nvaa_vram_preinit(dev);
- break;
- default:
- nv50_vram_preinit(dev);
- break;
- }
- } else {
- dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
- dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
- }
-
- NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
- if (dev_priv->vram_sys_base) {
- NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
- dev_priv->vram_sys_base);
}
if (dev_priv->vram_size)
@@ -415,6 +261,15 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
+bool
+nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+ return true;
+
+ return false;
+}
+
#if __OS_HAS_AGP
static unsigned long
get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -537,20 +392,22 @@ nouveau_mem_vram_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits;
- if (dev_priv->card_type >= NV_50 &&
- pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
- dma_bits = 40;
- else
- dma_bits = 32;
+ dma_bits = 32;
+ if (dev_priv->card_type >= NV_50) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ dma_bits = 40;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset != 0x40 &&
+ dev_priv->chipset != 0x45) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
+ dma_bits = 39;
+ }
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
return ret;
- ret = nouveau_mem_detect(dev);
- if (ret)
- return ret;
-
dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
@@ -566,23 +423,50 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
+ /* reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
+ } else
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= dev_priv->engine.fifo.channels;
+
+ /* pciegart table */
+ if (drm_pci_device_is_pcie(dev))
+ rsvd += 512 * 1024;
+
+ /* object storage */
+ rsvd += 512 * 1024;
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
+ }
+
+ ret = dev_priv->engine.vram.init(dev);
+ if (ret)
+ return ret;
+
+ NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+ if (dev_priv->vram_sys_base) {
+ NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+ dev_priv->vram_sys_base);
+ }
+
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
- dev_priv->fb_mappable_pages =
- pci_resource_len(dev->pdev, 1);
+ dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
- /* reserve space at end of VRAM for PRAMIN */
- if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
- dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
- dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
- else
- if (dev_priv->card_type >= NV_40)
- dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
- else
- dev_priv->ramin_rsvd_vram = (512 * 1024);
-
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
@@ -594,13 +478,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &dev_priv->vga_ram);
- if (ret == 0)
- ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_WARN(dev, "failed to reserve VGA memory\n");
- nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram,
+ TTM_PL_FLAG_VRAM);
+
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
}
dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
@@ -619,7 +507,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
+ if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -799,3 +687,202 @@ nouveau_mem_timing_fini(struct drm_device *dev)
kfree(mem->timing);
}
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_mm *mm;
+ u64 size, block, rsvd;
+ int ret;
+
+ rsvd = (256 * 1024); /* vga memory */
+ size = (p_size << PAGE_SHIFT) - rsvd;
+ block = dev_priv->vram_rblock_size;
+
+ ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
+ if (ret)
+ return ret;
+
+ man->priv = mm;
+ return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+ struct nouveau_mm *mm = man->priv;
+ int ret;
+
+ ret = nouveau_mm_fini(&mm);
+ if (ret)
+ return ret;
+
+ man->priv = NULL;
+ return 0;
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nouveau_mem *node = mem->mm_node;
+ struct drm_device *dev = dev_priv->dev;
+
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+
+ vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct drm_device *dev = dev_priv->dev;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node;
+ u32 size_nc = 0;
+ int ret;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+ size_nc = 1 << nvbo->vma.node->type;
+
+ ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
+ mem->page_alignment << PAGE_SHIFT, size_nc,
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
+ if (ret) {
+ mem->mm_node = NULL;
+ return (ret == -ENOSPC) ? 0 : ret;
+ }
+
+ node->page_shift = 12;
+ if (nvbo->vma.node)
+ node->page_shift = nvbo->vma.node->type;
+
+ mem->mm_node = node;
+ mem->start = node->offset >> PAGE_SHIFT;
+ return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ u32 total = 0, free = 0;
+
+ mutex_lock(&mm->mutex);
+ list_for_each_entry(r, &mm->nodes, nl_entry) {
+ printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+ prefix, r->type, ((u64)r->offset << 12),
+ (((u64)r->offset + r->length) << 12));
+
+ total += r->length;
+ if (!r->type)
+ free += r->length;
+ }
+ mutex_unlock(&mm->mutex);
+
+ printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
+ prefix, (u64)total << 12, (u64)free << 12);
+ printk(KERN_DEBUG "%s block: 0x%08x\n",
+ prefix, mm->block_size << 12);
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+ nouveau_vram_manager_init,
+ nouveau_vram_manager_fini,
+ nouveau_vram_manager_new,
+ nouveau_vram_manager_del,
+ nouveau_vram_manager_debug
+};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+ return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct nouveau_mem *node = mem->mm_node;
+
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+ mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mem *node;
+ int ret;
+
+ if (unlikely((mem->num_pages << PAGE_SHIFT) >=
+ dev_priv->gart_info.aper_size))
+ return -ENOMEM;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* This node must be for evicting large-paged VRAM
+ * to system memory. Due to a nv50 limitation of
+ * not being able to mix large/small pages within
+ * the same PDE, we need to create a temporary
+ * small-paged VMA for the eviction.
+ */
+ if (vma->node->type != vm->spg_shift) {
+ ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
+ vm->spg_shift, NV_MEM_ACCESS_RW,
+ &node->tmp_vma);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+ }
+
+ node->page_shift = nvbo->vma.node->type;
+ mem->mm_node = node;
+ mem->start = 0;
+ return 0;
+}
+
+void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+ nouveau_gart_manager_init,
+ nouveau_gart_manager_fini,
+ nouveau_gart_manager_new,
+ nouveau_gart_manager_del,
+ nouveau_gart_manager_debug
+};