2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
29 #include <nvif/class.h>
30 #include <nvif/object.h>
31 #include <nvif/if500b.h>
32 #include <nvif/if900b.h>
34 #include <linux/sched/mm.h>
35 #include <linux/hmm.h>
38 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
39 * it in vram while in use. We likely want to overhaul memory management for
40 * nouveau to be more page like (not necessarily with system page size but a
41 * bigger page size) at lowest level and have some shim layer on top that would
42 * provide the same functionality as TTM.
44 #define DMEM_CHUNK_SIZE (2UL << 20)
45 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
47 struct nouveau_migrate;
49 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
50 u64 dst_addr, u64 src_addr);
52 struct nouveau_dmem_chunk {
53 struct list_head list;
54 struct nouveau_bo *bo;
55 struct nouveau_drm *drm;
56 unsigned long pfn_first;
57 unsigned long callocated;
58 unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
63 struct nouveau_dmem_migrate {
64 nouveau_migrate_copy_t copy_func;
65 struct nouveau_channel *chan;
69 struct hmm_devmem *devmem;
70 struct nouveau_dmem_migrate migrate;
71 struct list_head chunk_free;
72 struct list_head chunk_full;
73 struct list_head chunk_empty;
77 struct nouveau_migrate_hmem {
78 struct scatterlist *sg;
79 struct nouveau_mem mem;
84 struct nouveau_dmem_fault {
85 struct nouveau_drm *drm;
86 struct nouveau_fence *fence;
87 struct nouveau_migrate_hmem hmem;
90 struct nouveau_migrate {
91 struct vm_area_struct *vma;
92 struct nouveau_drm *drm;
93 struct nouveau_fence *fence;
95 struct nouveau_migrate_hmem hmem;
99 nouveau_migrate_hmem_fini(struct nouveau_drm *drm,
100 struct nouveau_migrate_hmem *hmem)
102 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
104 nouveau_mem_fini(&hmem->mem);
105 nvif_vmm_put(vmm, &hmem->vma);
108 dma_unmap_sg_attrs(drm->dev->dev, hmem->sg,
109 hmem->npages, DMA_BIDIRECTIONAL,
110 DMA_ATTR_SKIP_CPU_SYNC);
117 nouveau_migrate_hmem_init(struct nouveau_drm *drm,
118 struct nouveau_migrate_hmem *hmem,
119 unsigned long npages,
120 const unsigned long *pfns)
122 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
126 hmem->sg = kzalloc(npages * sizeof(*hmem->sg), GFP_KERNEL);
127 if (hmem->sg == NULL)
130 for (i = 0, hmem->npages = 0; hmem->npages < npages; ++i) {
133 if (!pfns[i] || pfns[i] == MIGRATE_PFN_ERROR)
136 page = migrate_pfn_to_page(pfns[i]);
142 sg_set_page(&hmem->sg[hmem->npages], page, PAGE_SIZE, 0);
145 sg_mark_end(&hmem->sg[hmem->npages - 1]);
147 i = dma_map_sg_attrs(drm->dev->dev, hmem->sg, hmem->npages,
148 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
149 if (i != hmem->npages) {
154 ret = nouveau_mem_sgl(&hmem->mem, &drm->client,
155 hmem->npages, hmem->sg);
159 ret = nvif_vmm_get(vmm, LAZY, false, hmem->mem.mem.page,
160 0, hmem->mem.mem.size, &hmem->vma);
164 ret = nouveau_mem_map(&hmem->mem, vmm, &hmem->vma);
171 nouveau_migrate_hmem_fini(drm, hmem);
177 nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
179 struct nouveau_dmem_chunk *chunk;
180 struct nouveau_drm *drm;
183 chunk = (void *)hmm_devmem_page_get_drvdata(page);
184 idx = page_to_pfn(page) - chunk->pfn_first;
190 * This is really a bad example, we need to overhaul nouveau memory
191 * management to be more page focus and allow lighter locking scheme
192 * to be use in the process.
194 spin_lock(&chunk->lock);
195 clear_bit(idx, chunk->bitmap);
196 WARN_ON(!chunk->callocated);
199 * FIXME when chunk->callocated reach 0 we should add the chunk to
200 * a reclaim list so that it can be freed in case of memory pressure.
202 spin_unlock(&chunk->lock);
206 nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
207 const unsigned long *src_pfns,
208 unsigned long *dst_pfns,
213 struct nouveau_dmem_fault *fault = private;
214 struct nouveau_drm *drm = fault->drm;
215 unsigned long addr, i, c, npages = 0;
216 nouveau_migrate_copy_t copy;
220 /* First allocate new memory */
221 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
222 struct page *dpage, *spage;
225 spage = migrate_pfn_to_page(src_pfns[i]);
226 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
229 dpage = hmm_vma_alloc_locked_page(vma, addr);
231 dst_pfns[i] = MIGRATE_PFN_ERROR;
235 dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
240 /* Create scatter list FIXME: get rid of scatter list */
241 ret = nouveau_migrate_hmem_init(drm, &fault->hmem, npages, dst_pfns);
245 /* Copy things over */
246 copy = drm->dmem->migrate.copy_func;
247 for (addr = start, i = c = 0; addr < end; addr += PAGE_SIZE, i++) {
248 struct nouveau_dmem_chunk *chunk;
249 struct page *spage, *dpage;
250 u64 src_addr, dst_addr;
252 dpage = migrate_pfn_to_page(dst_pfns[i]);
253 if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
256 dst_addr = fault->hmem.vma.addr + (c << PAGE_SHIFT);
259 spage = migrate_pfn_to_page(src_pfns[i]);
260 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
261 dst_pfns[i] = MIGRATE_PFN_ERROR;
266 chunk = (void *)hmm_devmem_page_get_drvdata(spage);
267 src_addr = page_to_pfn(spage) - chunk->pfn_first;
268 src_addr = (src_addr << PAGE_SHIFT) + chunk->vma.addr;
270 ret = copy(drm, 1, dst_addr, src_addr);
272 dst_pfns[i] = MIGRATE_PFN_ERROR;
278 nouveau_fence_new(drm->dmem->migrate.chan, false, &fault->fence);
283 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
286 if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
289 page = migrate_pfn_to_page(dst_pfns[i]);
290 dst_pfns[i] = MIGRATE_PFN_ERROR;
298 void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
299 const unsigned long *src_pfns,
300 const unsigned long *dst_pfns,
305 struct nouveau_dmem_fault *fault = private;
306 struct nouveau_drm *drm = fault->drm;
309 nouveau_fence_wait(fault->fence, true, false);
310 nouveau_fence_unref(&fault->fence);
313 * FIXME wait for channel to be IDLE before calling finalizing
314 * the hmem object below (nouveau_migrate_hmem_fini()).
317 nouveau_migrate_hmem_fini(drm, &fault->hmem);
320 static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
321 .alloc_and_copy = nouveau_dmem_fault_alloc_and_copy,
322 .finalize_and_map = nouveau_dmem_fault_finalize_and_map,
326 nouveau_dmem_fault(struct hmm_devmem *devmem,
327 struct vm_area_struct *vma,
329 const struct page *page,
333 struct drm_device *drm_dev = dev_get_drvdata(devmem->device);
334 unsigned long src[1] = {0}, dst[1] = {0};
335 struct nouveau_dmem_fault fault = {0};
341 * FIXME what we really want is to find some heuristic to migrate more
342 * than just one page on CPU fault. When such fault happens it is very
343 * likely that more surrounding page will CPU fault too.
345 fault.drm = nouveau_drm(drm_dev);
346 ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr,
347 addr + PAGE_SIZE, src, dst, &fault);
349 return VM_FAULT_SIGBUS;
351 if (dst[0] == MIGRATE_PFN_ERROR)
352 return VM_FAULT_SIGBUS;
357 static const struct hmm_devmem_ops
358 nouveau_dmem_devmem_ops = {
359 .free = nouveau_dmem_free,
360 .fault = nouveau_dmem_fault,
364 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
366 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
367 struct nouveau_dmem_chunk *chunk;
370 if (drm->dmem == NULL)
373 mutex_lock(&drm->dmem->mutex);
374 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
375 struct nouveau_dmem_chunk,
378 mutex_unlock(&drm->dmem->mutex);
382 list_del(&chunk->list);
383 mutex_unlock(&drm->dmem->mutex);
385 ret = nvif_vmm_get(vmm, LAZY, false, 12, 0,
386 DMEM_CHUNK_SIZE, &chunk->vma);
390 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
391 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
396 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
398 nouveau_bo_ref(NULL, &chunk->bo);
402 ret = nouveau_mem_map(nouveau_mem(&chunk->bo->bo.mem), vmm, &chunk->vma);
404 nouveau_bo_unpin(chunk->bo);
405 nouveau_bo_ref(NULL, &chunk->bo);
409 bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
410 spin_lock_init(&chunk->lock);
413 mutex_lock(&drm->dmem->mutex);
415 list_add(&chunk->list, &drm->dmem->chunk_empty);
417 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
418 mutex_unlock(&drm->dmem->mutex);
423 static struct nouveau_dmem_chunk *
424 nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
426 struct nouveau_dmem_chunk *chunk;
428 chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
429 struct nouveau_dmem_chunk,
434 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
435 struct nouveau_dmem_chunk,
444 nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
445 unsigned long npages,
446 unsigned long *pages)
448 struct nouveau_dmem_chunk *chunk;
452 memset(pages, 0xff, npages * sizeof(*pages));
454 mutex_lock(&drm->dmem->mutex);
455 for (c = 0; c < npages;) {
458 chunk = nouveau_dmem_chunk_first_free_locked(drm);
460 mutex_unlock(&drm->dmem->mutex);
461 ret = nouveau_dmem_chunk_alloc(drm);
470 spin_lock(&chunk->lock);
471 i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
472 while (i < DMEM_CHUNK_NPAGES && c < npages) {
473 pages[c] = chunk->pfn_first + i;
474 set_bit(i, chunk->bitmap);
478 i = find_next_zero_bit(chunk->bitmap,
479 DMEM_CHUNK_NPAGES, i);
481 spin_unlock(&chunk->lock);
483 mutex_unlock(&drm->dmem->mutex);
489 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
491 unsigned long pfns[1];
495 /* FIXME stop all the miss-match API ... */
496 ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
500 page = pfn_to_page(pfns[0]);
507 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
514 nouveau_dmem_resume(struct nouveau_drm *drm)
516 struct nouveau_dmem_chunk *chunk;
519 if (drm->dmem == NULL)
522 mutex_lock(&drm->dmem->mutex);
523 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
524 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
525 /* FIXME handle pin failure */
528 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
529 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
530 /* FIXME handle pin failure */
533 list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
534 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
535 /* FIXME handle pin failure */
538 mutex_unlock(&drm->dmem->mutex);
542 nouveau_dmem_suspend(struct nouveau_drm *drm)
544 struct nouveau_dmem_chunk *chunk;
546 if (drm->dmem == NULL)
549 mutex_lock(&drm->dmem->mutex);
550 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
551 nouveau_bo_unpin(chunk->bo);
553 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
554 nouveau_bo_unpin(chunk->bo);
556 list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
557 nouveau_bo_unpin(chunk->bo);
559 mutex_unlock(&drm->dmem->mutex);
563 nouveau_dmem_fini(struct nouveau_drm *drm)
565 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
566 struct nouveau_dmem_chunk *chunk, *tmp;
568 if (drm->dmem == NULL)
571 mutex_lock(&drm->dmem->mutex);
573 WARN_ON(!list_empty(&drm->dmem->chunk_free));
574 WARN_ON(!list_empty(&drm->dmem->chunk_full));
576 list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
578 nouveau_bo_unpin(chunk->bo);
579 nouveau_bo_ref(NULL, &chunk->bo);
581 nvif_vmm_put(vmm, &chunk->vma);
582 list_del(&chunk->list);
586 mutex_unlock(&drm->dmem->mutex);
590 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
591 u64 dst_addr, u64 src_addr)
593 struct nouveau_channel *chan = drm->dmem->migrate.chan;
596 ret = RING_SPACE(chan, 10);
600 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
601 OUT_RING (chan, upper_32_bits(src_addr));
602 OUT_RING (chan, lower_32_bits(src_addr));
603 OUT_RING (chan, upper_32_bits(dst_addr));
604 OUT_RING (chan, lower_32_bits(dst_addr));
605 OUT_RING (chan, PAGE_SIZE);
606 OUT_RING (chan, PAGE_SIZE);
607 OUT_RING (chan, PAGE_SIZE);
608 OUT_RING (chan, npages);
609 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
614 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
616 switch (drm->ttm.copy.oclass) {
617 case PASCAL_DMA_COPY_A:
618 case PASCAL_DMA_COPY_B:
619 case VOLTA_DMA_COPY_A:
620 case TURING_DMA_COPY_A:
621 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
622 drm->dmem->migrate.chan = drm->ttm.chan;
631 nouveau_dmem_init(struct nouveau_drm *drm)
633 struct device *device = drm->dev->dev;
634 unsigned long i, size;
637 /* This only make sense on PASCAL or newer */
638 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
641 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
644 mutex_init(&drm->dmem->mutex);
645 INIT_LIST_HEAD(&drm->dmem->chunk_free);
646 INIT_LIST_HEAD(&drm->dmem->chunk_full);
647 INIT_LIST_HEAD(&drm->dmem->chunk_empty);
649 size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
651 /* Initialize migration dma helpers before registering memory */
652 ret = nouveau_dmem_migrate_init(drm);
660 * FIXME we need some kind of policy to decide how much VRAM we
661 * want to register with HMM. For now just register everything
662 * and latter if we want to do thing like over commit then we
663 * could revisit this.
665 drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
667 if (drm->dmem->devmem == NULL) {
673 for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
674 struct nouveau_dmem_chunk *chunk;
678 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
680 nouveau_dmem_fini(drm);
685 chunk->pfn_first = drm->dmem->devmem->pfn_first;
686 chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
687 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
689 page = pfn_to_page(chunk->pfn_first);
690 for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) {
691 hmm_devmem_page_set_drvdata(page, (long)chunk);
695 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
699 nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
700 const unsigned long *src_pfns,
701 unsigned long *dst_pfns,
706 struct nouveau_migrate *migrate = private;
707 struct nouveau_drm *drm = migrate->drm;
708 unsigned long addr, i, c, npages = 0;
709 nouveau_migrate_copy_t copy;
712 /* First allocate new memory */
713 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
714 struct page *dpage, *spage;
717 spage = migrate_pfn_to_page(src_pfns[i]);
718 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
721 dpage = nouveau_dmem_page_alloc_locked(drm);
725 dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
734 /* Create scatter list FIXME: get rid of scatter list */
735 ret = nouveau_migrate_hmem_init(drm, &migrate->hmem, npages, src_pfns);
739 /* Copy things over */
740 copy = drm->dmem->migrate.copy_func;
741 for (addr = start, i = c = 0; addr < end; addr += PAGE_SIZE, i++) {
742 struct nouveau_dmem_chunk *chunk;
743 struct page *spage, *dpage;
744 u64 src_addr, dst_addr;
746 dpage = migrate_pfn_to_page(dst_pfns[i]);
747 if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
750 chunk = (void *)hmm_devmem_page_get_drvdata(dpage);
751 dst_addr = page_to_pfn(dpage) - chunk->pfn_first;
752 dst_addr = (dst_addr << PAGE_SHIFT) + chunk->vma.addr;
754 spage = migrate_pfn_to_page(src_pfns[i]);
755 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
756 nouveau_dmem_page_free_locked(drm, dpage);
761 src_addr = migrate->hmem.vma.addr + (c << PAGE_SHIFT);
764 ret = copy(drm, 1, dst_addr, src_addr);
766 nouveau_dmem_page_free_locked(drm, dpage);
772 nouveau_fence_new(drm->dmem->migrate.chan, false, &migrate->fence);
777 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
780 if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
783 page = migrate_pfn_to_page(dst_pfns[i]);
784 dst_pfns[i] = MIGRATE_PFN_ERROR;
792 void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
793 const unsigned long *src_pfns,
794 const unsigned long *dst_pfns,
799 struct nouveau_migrate *migrate = private;
800 struct nouveau_drm *drm = migrate->drm;
802 if (migrate->fence) {
803 nouveau_fence_wait(migrate->fence, true, false);
804 nouveau_fence_unref(&migrate->fence);
807 * FIXME wait for channel to be IDLE before finalizing
808 * the hmem object below (nouveau_migrate_hmem_fini()) ?
811 nouveau_migrate_hmem_fini(drm, &migrate->hmem);
814 * FIXME optimization: update GPU page table to point to newly
819 static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
820 .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
821 .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
825 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
826 struct vm_area_struct *vma,
830 unsigned long *src_pfns, *dst_pfns, npages;
831 struct nouveau_migrate migrate = {0};
832 unsigned long i, c, max;
835 npages = (end - start) >> PAGE_SHIFT;
836 max = min(SG_MAX_SINGLE_ALLOC, npages);
837 src_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
838 if (src_pfns == NULL)
840 dst_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
841 if (dst_pfns == NULL) {
848 migrate.npages = npages;
849 for (i = 0; i < npages; i += c) {
852 c = min(SG_MAX_SINGLE_ALLOC, npages);
853 next = start + (c << PAGE_SHIFT);
854 ret = migrate_vma(&nouveau_dmem_migrate_ops, vma, start,
855 next, src_pfns, dst_pfns, &migrate);
868 nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
870 if (!is_device_private_page(page))
873 if (drm->dmem->devmem != page->pgmap->data)
880 nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
881 struct hmm_range *range)
883 unsigned long i, npages;
885 npages = (range->end - range->start) >> PAGE_SHIFT;
886 for (i = 0; i < npages; ++i) {
887 struct nouveau_dmem_chunk *chunk;
891 page = hmm_pfn_to_page(range, range->pfns[i]);
895 if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
899 if (!nouveau_dmem_page(drm, page)) {
900 WARN(1, "Some unknown device memory !\n");
905 chunk = (void *)hmm_devmem_page_get_drvdata(page);
906 addr = page_to_pfn(page) - chunk->pfn_first;
907 addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT;
909 range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
910 range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;