2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40 #include "nouveau_mem.h"
41 #include "nouveau_vmm.h"
43 #include <nvif/class.h>
44 #include <nvif/if500b.h>
45 #include <nvif/if900b.h>
48 * NV10-NV40 tiling helpers
52 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
53 u32 addr, u32 size, u32 pitch, u32 flags)
55 struct nouveau_drm *drm = nouveau_drm(dev);
56 int i = reg - drm->tile.reg;
57 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
58 struct nvkm_fb_tile *tile = &fb->tile.region[i];
60 nouveau_fence_unref(®->fence);
63 nvkm_fb_tile_fini(fb, i, tile);
66 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
68 nvkm_fb_tile_prog(fb, i, tile);
71 static struct nouveau_drm_tile *
72 nv10_bo_get_tile_region(struct drm_device *dev, int i)
74 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
77 spin_lock(&drm->tile.lock);
80 (!tile->fence || nouveau_fence_done(tile->fence)))
85 spin_unlock(&drm->tile.lock);
90 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91 struct dma_fence *fence)
93 struct nouveau_drm *drm = nouveau_drm(dev);
96 spin_lock(&drm->tile.lock);
97 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
99 spin_unlock(&drm->tile.lock);
103 static struct nouveau_drm_tile *
104 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 zeta)
107 struct nouveau_drm *drm = nouveau_drm(dev);
108 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
109 struct nouveau_drm_tile *tile, *found = NULL;
112 for (i = 0; i < fb->tile.regions; i++) {
113 tile = nv10_bo_get_tile_region(dev, i);
115 if (pitch && !found) {
119 } else if (tile && fb->tile.region[i].pitch) {
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
124 nv10_bo_put_tile_region(dev, tile, NULL);
128 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
133 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
135 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
136 struct drm_device *dev = drm->dev;
137 struct nouveau_bo *nvbo = nouveau_bo(bo);
139 if (unlikely(nvbo->bo.base.filp))
140 DRM_ERROR("bo %p still attached to GEM object\n", bo);
141 WARN_ON(nvbo->pin_refcnt > 0);
142 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
147 roundup_64(u64 x, u32 y)
155 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
156 int *align, u64 *size)
158 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 struct nvif_device *device = &drm->client.device;
161 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
163 if (device->info.chipset >= 0x40) {
165 *size = roundup_64(*size, 64 * nvbo->mode);
167 } else if (device->info.chipset >= 0x30) {
169 *size = roundup_64(*size, 64 * nvbo->mode);
171 } else if (device->info.chipset >= 0x20) {
173 *size = roundup_64(*size, 64 * nvbo->mode);
175 } else if (device->info.chipset >= 0x10) {
177 *size = roundup_64(*size, 32 * nvbo->mode);
181 *size = roundup_64(*size, (1 << nvbo->page));
182 *align = max((1 << nvbo->page), *align);
185 *size = roundup_64(*size, PAGE_SIZE);
189 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
190 u32 tile_mode, u32 tile_flags)
192 struct nouveau_drm *drm = cli->drm;
193 struct nouveau_bo *nvbo;
194 struct nvif_mmu *mmu = &cli->mmu;
195 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
199 NV_WARN(drm, "skipped size %016llx\n", *size);
200 return ERR_PTR(-EINVAL);
203 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
205 return ERR_PTR(-ENOMEM);
206 INIT_LIST_HEAD(&nvbo->head);
207 INIT_LIST_HEAD(&nvbo->entry);
208 INIT_LIST_HEAD(&nvbo->vma_list);
209 nvbo->bo.bdev = &drm->ttm.bdev;
211 /* This is confusing, and doesn't actually mean we want an uncached
212 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
213 * into in nouveau_gem_new().
215 if (flags & TTM_PL_FLAG_UNCACHED) {
216 /* Determine if we can get a cache-coherent map, forcing
217 * uncached mapping if we can't.
219 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
220 nvbo->force_coherent = true;
223 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
224 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
225 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
227 return ERR_PTR(-EINVAL);
230 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
232 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
233 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
234 nvbo->comp = (tile_flags & 0x00030000) >> 16;
235 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
237 return ERR_PTR(-EINVAL);
240 nvbo->zeta = (tile_flags & 0x00000007);
242 nvbo->mode = tile_mode;
243 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
245 /* Determine the desirable target GPU page size for the buffer. */
246 for (i = 0; i < vmm->page_nr; i++) {
247 /* Because we cannot currently allow VMM maps to fail
248 * during buffer migration, we need to determine page
249 * size for the buffer up-front, and pre-allocate its
252 * Skip page sizes that can't support needed domains.
254 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
255 (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
257 if ((flags & TTM_PL_FLAG_TT) &&
258 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
261 /* Select this page size if it's the first that supports
262 * the potential memory domains, or when it's compatible
263 * with the requested compression settings.
265 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
268 /* Stop once the buffer is larger than the current page size. */
269 if (*size >= 1ULL << vmm->page[i].shift)
274 return ERR_PTR(-EINVAL);
276 /* Disable compression if suitable settings couldn't be found. */
277 if (nvbo->comp && !vmm->page[pi].comp) {
278 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
279 nvbo->kind = mmu->kind[nvbo->kind];
282 nvbo->page = vmm->page[pi].shift;
284 nouveau_bo_fixup_align(nvbo, flags, align, size);
290 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
291 struct sg_table *sg, struct dma_resv *robj)
293 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
297 acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
299 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
300 nouveau_bo_placement_set(nvbo, flags, 0);
302 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
303 &nvbo->placement, align >> PAGE_SHIFT, false,
304 acc_size, sg, robj, nouveau_bo_del_ttm);
306 /* ttm will call nouveau_bo_del_ttm if it fails.. */
314 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
315 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
316 struct sg_table *sg, struct dma_resv *robj,
317 struct nouveau_bo **pnvbo)
319 struct nouveau_bo *nvbo;
322 nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
325 return PTR_ERR(nvbo);
327 ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
336 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
340 if (type & TTM_PL_FLAG_VRAM)
341 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
342 if (type & TTM_PL_FLAG_TT)
343 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
344 if (type & TTM_PL_FLAG_SYSTEM)
345 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
349 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
351 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
352 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
353 unsigned i, fpfn, lpfn;
355 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
356 nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
357 nvbo->bo.mem.num_pages < vram_pages / 4) {
359 * Make sure that the color and depth buffers are handled
360 * by independent memory controller units. Up to a 9x
361 * speed up when alpha-blending and depth-test are enabled
365 fpfn = vram_pages / 2;
369 lpfn = vram_pages / 2;
371 for (i = 0; i < nvbo->placement.num_placement; ++i) {
372 nvbo->placements[i].fpfn = fpfn;
373 nvbo->placements[i].lpfn = lpfn;
375 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
376 nvbo->busy_placements[i].fpfn = fpfn;
377 nvbo->busy_placements[i].lpfn = lpfn;
383 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
385 struct ttm_placement *pl = &nvbo->placement;
386 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
387 TTM_PL_MASK_CACHING) |
388 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
390 pl->placement = nvbo->placements;
391 set_placement_list(nvbo->placements, &pl->num_placement,
394 pl->busy_placement = nvbo->busy_placements;
395 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
398 set_placement_range(nvbo, type);
402 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
404 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
405 struct ttm_buffer_object *bo = &nvbo->bo;
406 bool force = false, evict = false;
409 ret = ttm_bo_reserve(bo, false, false, NULL);
413 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
414 memtype == TTM_PL_FLAG_VRAM && contig) {
422 if (nvbo->pin_refcnt) {
423 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
424 NV_ERROR(drm, "bo %p pinned elsewhere: "
425 "0x%08x vs 0x%08x\n", bo,
426 1 << bo->mem.mem_type, memtype);
434 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
435 ret = nouveau_bo_validate(nvbo, false, false);
441 nouveau_bo_placement_set(nvbo, memtype, 0);
443 /* drop pin_refcnt temporarily, so we don't trip the assertion
444 * in nouveau_bo_move() that makes sure we're not trying to
445 * move a pinned buffer
448 ret = nouveau_bo_validate(nvbo, false, false);
453 switch (bo->mem.mem_type) {
455 drm->gem.vram_available -= bo->mem.size;
458 drm->gem.gart_available -= bo->mem.size;
466 nvbo->contig = false;
467 ttm_bo_unreserve(bo);
472 nouveau_bo_unpin(struct nouveau_bo *nvbo)
474 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
475 struct ttm_buffer_object *bo = &nvbo->bo;
478 ret = ttm_bo_reserve(bo, false, false, NULL);
482 ref = --nvbo->pin_refcnt;
483 WARN_ON_ONCE(ref < 0);
487 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
489 ret = nouveau_bo_validate(nvbo, false, false);
491 switch (bo->mem.mem_type) {
493 drm->gem.vram_available += bo->mem.size;
496 drm->gem.gart_available += bo->mem.size;
504 ttm_bo_unreserve(bo);
509 nouveau_bo_map(struct nouveau_bo *nvbo)
513 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
517 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
519 ttm_bo_unreserve(&nvbo->bo);
524 nouveau_bo_unmap(struct nouveau_bo *nvbo)
529 ttm_bo_kunmap(&nvbo->kmap);
533 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
535 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
536 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
542 /* Don't waste time looping if the object is coherent */
543 if (nvbo->force_coherent)
546 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
547 dma_sync_single_for_device(drm->dev->dev,
548 ttm_dma->dma_address[i],
549 PAGE_SIZE, DMA_TO_DEVICE);
553 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
555 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
556 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
562 /* Don't waste time looping if the object is coherent */
563 if (nvbo->force_coherent)
566 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
567 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
568 PAGE_SIZE, DMA_FROM_DEVICE);
572 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
575 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
578 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
582 nouveau_bo_sync_for_device(nvbo);
588 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
591 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
596 iowrite16_native(val, (void __force __iomem *)mem);
602 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
605 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
610 return ioread32_native((void __force __iomem *)mem);
616 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
619 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
624 iowrite32_native(val, (void __force __iomem *)mem);
629 static struct ttm_tt *
630 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
632 #if IS_ENABLED(CONFIG_AGP)
633 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
635 if (drm->agp.bridge) {
636 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
640 return nouveau_sgdma_create_ttm(bo, page_flags);
644 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
646 /* We'll do this from user space. */
651 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
652 struct ttm_mem_type_manager *man)
654 struct nouveau_drm *drm = nouveau_bdev(bdev);
655 struct nvif_mmu *mmu = &drm->client.mmu;
659 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
660 man->available_caching = TTM_PL_MASK_CACHING;
661 man->default_caching = TTM_PL_FLAG_CACHED;
664 man->flags = TTM_MEMTYPE_FLAG_FIXED |
665 TTM_MEMTYPE_FLAG_MAPPABLE;
666 man->available_caching = TTM_PL_FLAG_UNCACHED |
668 man->default_caching = TTM_PL_FLAG_WC;
670 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
671 /* Some BARs do not support being ioremapped WC */
672 const u8 type = mmu->type[drm->ttm.type_vram].type;
673 if (type & NVIF_MEM_UNCACHED) {
674 man->available_caching = TTM_PL_FLAG_UNCACHED;
675 man->default_caching = TTM_PL_FLAG_UNCACHED;
678 man->func = &nouveau_vram_manager;
679 man->io_reserve_fastpath = false;
680 man->use_io_reserve_lru = true;
682 man->func = &ttm_bo_manager_func;
686 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
687 man->func = &nouveau_gart_manager;
689 if (!drm->agp.bridge)
690 man->func = &nv04_gart_manager;
692 man->func = &ttm_bo_manager_func;
694 if (drm->agp.bridge) {
695 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
696 man->available_caching = TTM_PL_FLAG_UNCACHED |
698 man->default_caching = TTM_PL_FLAG_WC;
700 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
701 TTM_MEMTYPE_FLAG_CMA;
702 man->available_caching = TTM_PL_MASK_CACHING;
703 man->default_caching = TTM_PL_FLAG_CACHED;
714 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
716 struct nouveau_bo *nvbo = nouveau_bo(bo);
718 switch (bo->mem.mem_type) {
720 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
724 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
728 *pl = nvbo->placement;
733 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
735 int ret = RING_SPACE(chan, 2);
737 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
738 OUT_RING (chan, handle & 0x0000ffff);
745 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
746 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
748 struct nouveau_mem *mem = nouveau_mem(old_reg);
749 int ret = RING_SPACE(chan, 10);
751 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
752 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
753 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
754 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
755 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
756 OUT_RING (chan, PAGE_SIZE);
757 OUT_RING (chan, PAGE_SIZE);
758 OUT_RING (chan, PAGE_SIZE);
759 OUT_RING (chan, new_reg->num_pages);
760 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
766 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
768 int ret = RING_SPACE(chan, 2);
770 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
771 OUT_RING (chan, handle);
777 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
778 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
780 struct nouveau_mem *mem = nouveau_mem(old_reg);
781 u64 src_offset = mem->vma[0].addr;
782 u64 dst_offset = mem->vma[1].addr;
783 u32 page_count = new_reg->num_pages;
786 page_count = new_reg->num_pages;
788 int line_count = (page_count > 8191) ? 8191 : page_count;
790 ret = RING_SPACE(chan, 11);
794 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
795 OUT_RING (chan, upper_32_bits(src_offset));
796 OUT_RING (chan, lower_32_bits(src_offset));
797 OUT_RING (chan, upper_32_bits(dst_offset));
798 OUT_RING (chan, lower_32_bits(dst_offset));
799 OUT_RING (chan, PAGE_SIZE);
800 OUT_RING (chan, PAGE_SIZE);
801 OUT_RING (chan, PAGE_SIZE);
802 OUT_RING (chan, line_count);
803 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
804 OUT_RING (chan, 0x00000110);
806 page_count -= line_count;
807 src_offset += (PAGE_SIZE * line_count);
808 dst_offset += (PAGE_SIZE * line_count);
815 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
816 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
818 struct nouveau_mem *mem = nouveau_mem(old_reg);
819 u64 src_offset = mem->vma[0].addr;
820 u64 dst_offset = mem->vma[1].addr;
821 u32 page_count = new_reg->num_pages;
824 page_count = new_reg->num_pages;
826 int line_count = (page_count > 2047) ? 2047 : page_count;
828 ret = RING_SPACE(chan, 12);
832 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
833 OUT_RING (chan, upper_32_bits(dst_offset));
834 OUT_RING (chan, lower_32_bits(dst_offset));
835 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
836 OUT_RING (chan, upper_32_bits(src_offset));
837 OUT_RING (chan, lower_32_bits(src_offset));
838 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
839 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
840 OUT_RING (chan, PAGE_SIZE); /* line_length */
841 OUT_RING (chan, line_count);
842 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
843 OUT_RING (chan, 0x00100110);
845 page_count -= line_count;
846 src_offset += (PAGE_SIZE * line_count);
847 dst_offset += (PAGE_SIZE * line_count);
854 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
855 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
857 struct nouveau_mem *mem = nouveau_mem(old_reg);
858 u64 src_offset = mem->vma[0].addr;
859 u64 dst_offset = mem->vma[1].addr;
860 u32 page_count = new_reg->num_pages;
863 page_count = new_reg->num_pages;
865 int line_count = (page_count > 8191) ? 8191 : page_count;
867 ret = RING_SPACE(chan, 11);
871 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
872 OUT_RING (chan, upper_32_bits(src_offset));
873 OUT_RING (chan, lower_32_bits(src_offset));
874 OUT_RING (chan, upper_32_bits(dst_offset));
875 OUT_RING (chan, lower_32_bits(dst_offset));
876 OUT_RING (chan, PAGE_SIZE);
877 OUT_RING (chan, PAGE_SIZE);
878 OUT_RING (chan, PAGE_SIZE);
879 OUT_RING (chan, line_count);
880 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
881 OUT_RING (chan, 0x00000110);
883 page_count -= line_count;
884 src_offset += (PAGE_SIZE * line_count);
885 dst_offset += (PAGE_SIZE * line_count);
892 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
893 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
895 struct nouveau_mem *mem = nouveau_mem(old_reg);
896 int ret = RING_SPACE(chan, 7);
898 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
899 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
900 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
901 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
902 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
903 OUT_RING (chan, 0x00000000 /* COPY */);
904 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
910 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
911 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
913 struct nouveau_mem *mem = nouveau_mem(old_reg);
914 int ret = RING_SPACE(chan, 7);
916 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
917 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
918 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
919 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
920 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
921 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
922 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
928 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
930 int ret = RING_SPACE(chan, 6);
932 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
933 OUT_RING (chan, handle);
934 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
935 OUT_RING (chan, chan->drm->ntfy.handle);
936 OUT_RING (chan, chan->vram.handle);
937 OUT_RING (chan, chan->vram.handle);
944 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
945 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
947 struct nouveau_mem *mem = nouveau_mem(old_reg);
948 u64 length = (new_reg->num_pages << PAGE_SHIFT);
949 u64 src_offset = mem->vma[0].addr;
950 u64 dst_offset = mem->vma[1].addr;
951 int src_tiled = !!mem->kind;
952 int dst_tiled = !!nouveau_mem(new_reg)->kind;
956 u32 amount, stride, height;
958 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
962 amount = min(length, (u64)(4 * 1024 * 1024));
964 height = amount / stride;
967 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
970 OUT_RING (chan, stride);
971 OUT_RING (chan, height);
976 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
980 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
983 OUT_RING (chan, stride);
984 OUT_RING (chan, height);
989 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
993 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
994 OUT_RING (chan, upper_32_bits(src_offset));
995 OUT_RING (chan, upper_32_bits(dst_offset));
996 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
997 OUT_RING (chan, lower_32_bits(src_offset));
998 OUT_RING (chan, lower_32_bits(dst_offset));
999 OUT_RING (chan, stride);
1000 OUT_RING (chan, stride);
1001 OUT_RING (chan, stride);
1002 OUT_RING (chan, height);
1003 OUT_RING (chan, 0x00000101);
1004 OUT_RING (chan, 0x00000000);
1005 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1009 src_offset += amount;
1010 dst_offset += amount;
1017 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
1019 int ret = RING_SPACE(chan, 4);
1021 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
1022 OUT_RING (chan, handle);
1023 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
1024 OUT_RING (chan, chan->drm->ntfy.handle);
1030 static inline uint32_t
1031 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
1032 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
1034 if (reg->mem_type == TTM_PL_TT)
1036 return chan->vram.handle;
1040 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
1041 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
1043 u32 src_offset = old_reg->start << PAGE_SHIFT;
1044 u32 dst_offset = new_reg->start << PAGE_SHIFT;
1045 u32 page_count = new_reg->num_pages;
1048 ret = RING_SPACE(chan, 3);
1052 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
1053 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
1054 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
1056 page_count = new_reg->num_pages;
1057 while (page_count) {
1058 int line_count = (page_count > 2047) ? 2047 : page_count;
1060 ret = RING_SPACE(chan, 11);
1064 BEGIN_NV04(chan, NvSubCopy,
1065 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1066 OUT_RING (chan, src_offset);
1067 OUT_RING (chan, dst_offset);
1068 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1069 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1070 OUT_RING (chan, PAGE_SIZE); /* line_length */
1071 OUT_RING (chan, line_count);
1072 OUT_RING (chan, 0x00000101);
1073 OUT_RING (chan, 0x00000000);
1074 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1077 page_count -= line_count;
1078 src_offset += (PAGE_SIZE * line_count);
1079 dst_offset += (PAGE_SIZE * line_count);
1086 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1087 struct ttm_mem_reg *reg)
1089 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1090 struct nouveau_mem *new_mem = nouveau_mem(reg);
1091 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
1094 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
1095 old_mem->mem.size, &old_mem->vma[0]);
1099 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
1100 new_mem->mem.size, &old_mem->vma[1]);
1104 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
1108 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
1111 nvif_vmm_put(vmm, &old_mem->vma[1]);
1112 nvif_vmm_put(vmm, &old_mem->vma[0]);
1118 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1119 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1121 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1122 struct nouveau_channel *chan = drm->ttm.chan;
1123 struct nouveau_cli *cli = (void *)chan->user.client;
1124 struct nouveau_fence *fence;
1127 /* create temporary vmas for the transfer and attach them to the
1128 * old nvkm_mem node, these will get cleaned up after ttm has
1129 * destroyed the ttm_mem_reg
1131 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1132 ret = nouveau_bo_move_prep(drm, bo, new_reg);
1137 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1138 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1140 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
1142 ret = nouveau_fence_new(chan, false, &fence);
1144 ret = ttm_bo_move_accel_cleanup(bo,
1148 nouveau_fence_unref(&fence);
1152 mutex_unlock(&cli->mutex);
1157 nouveau_bo_move_init(struct nouveau_drm *drm)
1159 static const struct {
1163 int (*exec)(struct nouveau_channel *,
1164 struct ttm_buffer_object *,
1165 struct ttm_mem_reg *, struct ttm_mem_reg *);
1166 int (*init)(struct nouveau_channel *, u32 handle);
1168 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
1169 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
1170 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
1171 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
1172 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1173 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1174 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1175 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1176 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1177 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1178 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1179 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1180 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1181 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1182 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1183 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1184 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1185 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1186 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1188 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1189 }, *mthd = _methods;
1190 const char *name = "CPU";
1194 struct nouveau_channel *chan;
1199 chan = drm->channel;
1203 ret = nvif_object_init(&chan->user,
1204 mthd->oclass | (mthd->engine << 16),
1205 mthd->oclass, NULL, 0,
1208 ret = mthd->init(chan, drm->ttm.copy.handle);
1210 nvif_object_fini(&drm->ttm.copy);
1214 drm->ttm.move = mthd->exec;
1215 drm->ttm.chan = chan;
1219 } while ((++mthd)->exec);
1221 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1225 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1226 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1228 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1229 struct ttm_place placement_memtype = {
1232 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1234 struct ttm_placement placement;
1235 struct ttm_mem_reg tmp_reg;
1238 placement.num_placement = placement.num_busy_placement = 1;
1239 placement.placement = placement.busy_placement = &placement_memtype;
1242 tmp_reg.mm_node = NULL;
1243 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1247 ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
1251 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
1255 ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
1257 ttm_bo_mem_put(bo, &tmp_reg);
1262 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1263 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1265 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1266 struct ttm_place placement_memtype = {
1269 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1271 struct ttm_placement placement;
1272 struct ttm_mem_reg tmp_reg;
1275 placement.num_placement = placement.num_busy_placement = 1;
1276 placement.placement = placement.busy_placement = &placement_memtype;
1279 tmp_reg.mm_node = NULL;
1280 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1284 ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
1288 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
1293 ttm_bo_mem_put(bo, &tmp_reg);
1298 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1299 struct ttm_mem_reg *new_reg)
1301 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
1302 struct nouveau_bo *nvbo = nouveau_bo(bo);
1303 struct nouveau_vma *vma;
1305 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1306 if (bo->destroy != nouveau_bo_del_ttm)
1309 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
1310 mem->mem.page == nvbo->page) {
1311 list_for_each_entry(vma, &nvbo->vma_list, head) {
1312 nouveau_vma_map(vma, mem);
1315 list_for_each_entry(vma, &nvbo->vma_list, head) {
1316 WARN_ON(ttm_bo_wait(bo, false, false));
1317 nouveau_vma_unmap(vma);
1323 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1324 struct nouveau_drm_tile **new_tile)
1326 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1327 struct drm_device *dev = drm->dev;
1328 struct nouveau_bo *nvbo = nouveau_bo(bo);
1329 u64 offset = new_reg->start << PAGE_SHIFT;
1332 if (new_reg->mem_type != TTM_PL_VRAM)
1335 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1336 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1337 nvbo->mode, nvbo->zeta);
1344 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1345 struct nouveau_drm_tile *new_tile,
1346 struct nouveau_drm_tile **old_tile)
1348 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1349 struct drm_device *dev = drm->dev;
1350 struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
1352 nv10_bo_put_tile_region(dev, *old_tile, fence);
1353 *old_tile = new_tile;
1357 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
1358 struct ttm_operation_ctx *ctx,
1359 struct ttm_mem_reg *new_reg)
1361 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1362 struct nouveau_bo *nvbo = nouveau_bo(bo);
1363 struct ttm_mem_reg *old_reg = &bo->mem;
1364 struct nouveau_drm_tile *new_tile = NULL;
1367 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1371 if (nvbo->pin_refcnt)
1372 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1374 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1375 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1381 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1382 BUG_ON(bo->mem.mm_node != NULL);
1384 new_reg->mm_node = NULL;
1388 /* Hardware assisted copy. */
1389 if (drm->ttm.move) {
1390 if (new_reg->mem_type == TTM_PL_SYSTEM)
1391 ret = nouveau_bo_move_flipd(bo, evict,
1393 ctx->no_wait_gpu, new_reg);
1394 else if (old_reg->mem_type == TTM_PL_SYSTEM)
1395 ret = nouveau_bo_move_flips(bo, evict,
1397 ctx->no_wait_gpu, new_reg);
1399 ret = nouveau_bo_move_m2mf(bo, evict,
1401 ctx->no_wait_gpu, new_reg);
1406 /* Fallback to software copy. */
1407 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1409 ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1412 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1414 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1416 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1423 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1425 struct nouveau_bo *nvbo = nouveau_bo(bo);
1427 return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1428 filp->private_data);
1432 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1434 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1435 struct nouveau_drm *drm = nouveau_bdev(bdev);
1436 struct nvkm_device *device = nvxx_device(&drm->client.device);
1437 struct nouveau_mem *mem = nouveau_mem(reg);
1439 reg->bus.addr = NULL;
1440 reg->bus.offset = 0;
1441 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1443 reg->bus.is_iomem = false;
1444 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1446 switch (reg->mem_type) {
1451 #if IS_ENABLED(CONFIG_AGP)
1452 if (drm->agp.bridge) {
1453 reg->bus.offset = reg->start << PAGE_SHIFT;
1454 reg->bus.base = drm->agp.base;
1455 reg->bus.is_iomem = !drm->agp.cma;
1458 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
1461 /* fall through - tiled memory */
1463 reg->bus.offset = reg->start << PAGE_SHIFT;
1464 reg->bus.base = device->func->resource_addr(device, 1);
1465 reg->bus.is_iomem = true;
1466 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1468 struct nv50_mem_map_v0 nv50;
1469 struct gf100_mem_map_v0 gf100;
1475 switch (mem->mem.object.oclass) {
1476 case NVIF_CLASS_MEM_NV50:
1477 args.nv50.version = 0;
1479 args.nv50.kind = mem->kind;
1480 args.nv50.comp = mem->comp;
1481 argc = sizeof(args.nv50);
1483 case NVIF_CLASS_MEM_GF100:
1484 args.gf100.version = 0;
1486 args.gf100.kind = mem->kind;
1487 argc = sizeof(args.gf100);
1494 ret = nvif_object_map_handle(&mem->mem.object,
1498 return ret ? ret : -EINVAL;
1501 reg->bus.offset = handle;
1511 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1513 struct nouveau_drm *drm = nouveau_bdev(bdev);
1514 struct nouveau_mem *mem = nouveau_mem(reg);
1516 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1517 switch (reg->mem_type) {
1520 nvif_object_unmap_handle(&mem->mem.object);
1523 nvif_object_unmap_handle(&mem->mem.object);
1532 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1534 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1535 struct nouveau_bo *nvbo = nouveau_bo(bo);
1536 struct nvkm_device *device = nvxx_device(&drm->client.device);
1537 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1540 /* as long as the bo isn't in vram, and isn't tiled, we've got
1541 * nothing to do here.
1543 if (bo->mem.mem_type != TTM_PL_VRAM) {
1544 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1548 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1549 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1551 ret = nouveau_bo_validate(nvbo, false, false);
1558 /* make sure bo is in mappable vram */
1559 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1560 bo->mem.start + bo->mem.num_pages < mappable)
1563 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1564 nvbo->placements[i].fpfn = 0;
1565 nvbo->placements[i].lpfn = mappable;
1568 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1569 nvbo->busy_placements[i].fpfn = 0;
1570 nvbo->busy_placements[i].lpfn = mappable;
1573 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1574 return nouveau_bo_validate(nvbo, false, false);
1578 nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1580 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1581 struct nouveau_drm *drm;
1585 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1587 if (ttm->state != tt_unpopulated)
1590 if (slave && ttm->sg) {
1591 /* make userspace faulting work */
1592 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1593 ttm_dma->dma_address, ttm->num_pages);
1594 ttm->state = tt_unbound;
1598 drm = nouveau_bdev(ttm->bdev);
1599 dev = drm->dev->dev;
1601 #if IS_ENABLED(CONFIG_AGP)
1602 if (drm->agp.bridge) {
1603 return ttm_agp_tt_populate(ttm, ctx);
1607 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1608 if (swiotlb_nr_tbl()) {
1609 return ttm_dma_populate((void *)ttm, dev, ctx);
1613 r = ttm_pool_populate(ttm, ctx);
1618 for (i = 0; i < ttm->num_pages; i++) {
1621 addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
1624 if (dma_mapping_error(dev, addr)) {
1626 dma_unmap_page(dev, ttm_dma->dma_address[i],
1627 PAGE_SIZE, DMA_BIDIRECTIONAL);
1628 ttm_dma->dma_address[i] = 0;
1630 ttm_pool_unpopulate(ttm);
1634 ttm_dma->dma_address[i] = addr;
1640 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1642 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1643 struct nouveau_drm *drm;
1646 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1651 drm = nouveau_bdev(ttm->bdev);
1652 dev = drm->dev->dev;
1654 #if IS_ENABLED(CONFIG_AGP)
1655 if (drm->agp.bridge) {
1656 ttm_agp_tt_unpopulate(ttm);
1661 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1662 if (swiotlb_nr_tbl()) {
1663 ttm_dma_unpopulate((void *)ttm, dev);
1668 for (i = 0; i < ttm->num_pages; i++) {
1669 if (ttm_dma->dma_address[i]) {
1670 dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
1675 ttm_pool_unpopulate(ttm);
1679 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1681 struct dma_resv *resv = nvbo->bo.base.resv;
1684 dma_resv_add_excl_fence(resv, &fence->base);
1686 dma_resv_add_shared_fence(resv, &fence->base);
1689 struct ttm_bo_driver nouveau_bo_driver = {
1690 .ttm_tt_create = &nouveau_ttm_tt_create,
1691 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1692 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1693 .invalidate_caches = nouveau_bo_invalidate_caches,
1694 .init_mem_type = nouveau_bo_init_mem_type,
1695 .eviction_valuable = ttm_bo_eviction_valuable,
1696 .evict_flags = nouveau_bo_evict_flags,
1697 .move_notify = nouveau_bo_move_ntfy,
1698 .move = nouveau_bo_move,
1699 .verify_access = nouveau_bo_verify_access,
1700 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1701 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1702 .io_mem_free = &nouveau_ttm_io_mem_free,