2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Heterogeneous Memory Management (HMM)
19 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
20 * is for. Here we focus on the HMM API description, with some explanation of
21 * the underlying implementation.
23 * Short description: HMM provides a set of helpers to share a virtual address
24 * space between CPU and a device, so that the device can access any valid
25 * address of the process (while still obeying memory protection). HMM also
26 * provides helpers to migrate process memory to device memory, and back. Each
27 * set of functionality (address space mirroring, and migration to and from
28 * device memory) can be used independently of the other.
31 * HMM address space mirroring API:
33 * Use HMM address space mirroring if you want to mirror range of the CPU page
34 * table of a process into a device page table. Here, "mirror" means "keep
35 * synchronized". Prerequisites: the device must provide the ability to write-
36 * protect its page tables (at PAGE_SIZE granularity), and must be able to
37 * recover from the resulting potential page faults.
39 * HMM guarantees that at any point in time, a given virtual address points to
40 * either the same memory in both CPU and device page tables (that is: CPU and
41 * device page tables each point to the same pages), or that one page table (CPU
42 * or device) points to no entry, while the other still points to the old page
43 * for the address. The latter case happens when the CPU page table update
44 * happens first, and then the update is mirrored over to the device page table.
45 * This does not cause any issue, because the CPU page table cannot start
46 * pointing to a new page until the device page table is invalidated.
48 * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
49 * updates to each device driver that has registered a mirror. It also provides
50 * some API calls to help with taking a snapshot of the CPU page table, and to
51 * synchronize with any updates that might happen concurrently.
54 * HMM migration to and from device memory:
56 * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
57 * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
58 * of the device memory, and allows the device driver to manage its memory
59 * using those struct pages. Having struct pages for device memory makes
60 * migration easier. Because that memory is not addressable by the CPU it must
61 * never be pinned to the device; in other words, any CPU page fault can always
62 * cause the device memory to be migrated (copied/moved) back to regular memory.
64 * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
65 * allows use of a device DMA engine to perform the copy operation between
66 * regular system memory and device memory.
71 #include <linux/kconfig.h>
72 #include <asm/pgtable.h>
74 #if IS_ENABLED(CONFIG_HMM)
76 #include <linux/device.h>
77 #include <linux/migrate.h>
78 #include <linux/memremap.h>
79 #include <linux/completion.h>
84 * hmm_pfn_flag_e - HMM flag enums
87 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
88 * HMM_PFN_WRITE: CPU page table has write permission set
89 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
91 * The driver provide a flags array, if driver valid bit for an entry is bit
92 * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide
93 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
94 * Same logic apply to all flags. This is same idea as vm_page_prot in vma
95 * except that this is per device driver rather than per architecture.
100 HMM_PFN_DEVICE_PRIVATE,
105 * hmm_pfn_value_e - HMM pfn special value
108 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
109 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
110 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
111 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
112 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
113 * set and the pfn value is undefined.
115 * Driver provide entry value for none entry, error entry and special entry,
116 * driver can alias (ie use same value for error and special for instance). It
117 * should not alias none and error or special.
119 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
120 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
121 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table
122 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
124 enum hmm_pfn_value_e {
132 * struct hmm_range - track invalidation lock on virtual address range
134 * @hmm: the core HMM structure this range is active against
135 * @vma: the vm area struct for the range
136 * @list: all range lock are on a list
137 * @start: range virtual start address (inclusive)
138 * @end: range virtual end address (exclusive)
139 * @pfns: array of pfns (big enough for the range)
140 * @flags: pfn flags to match device driver page table
141 * @values: pfn value for some special case (none, special, error, ...)
142 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
143 * @valid: pfns array did not change since it has been fill by an HMM function
147 struct vm_area_struct *vma;
148 struct list_head list;
152 const uint64_t *flags;
153 const uint64_t *values;
159 * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
160 * @range: range use to decode HMM pfn value
161 * @pfn: HMM pfn value to get corresponding struct page from
162 * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
164 * If the HMM pfn is valid (ie valid flag set) then return the struct page
165 * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
167 static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
170 if (pfn == range->values[HMM_PFN_NONE])
172 if (pfn == range->values[HMM_PFN_ERROR])
174 if (pfn == range->values[HMM_PFN_SPECIAL])
176 if (!(pfn & range->flags[HMM_PFN_VALID]))
178 return pfn_to_page(pfn >> range->pfn_shift);
182 * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
183 * @range: range use to decode HMM pfn value
184 * @pfn: HMM pfn value to extract pfn from
185 * Returns: pfn value if HMM pfn is valid, -1UL otherwise
187 static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
190 if (pfn == range->values[HMM_PFN_NONE])
192 if (pfn == range->values[HMM_PFN_ERROR])
194 if (pfn == range->values[HMM_PFN_SPECIAL])
196 if (!(pfn & range->flags[HMM_PFN_VALID]))
198 return (pfn >> range->pfn_shift);
202 * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
203 * @range: range use to encode HMM pfn value
204 * @page: struct page pointer for which to create the HMM pfn
205 * Returns: valid HMM pfn for the page
207 static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
210 return (page_to_pfn(page) << range->pfn_shift) |
211 range->flags[HMM_PFN_VALID];
215 * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
216 * @range: range use to encode HMM pfn value
217 * @pfn: pfn value for which to create the HMM pfn
218 * Returns: valid HMM pfn for the pfn
220 static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
223 return (pfn << range->pfn_shift) |
224 range->flags[HMM_PFN_VALID];
228 #if IS_ENABLED(CONFIG_HMM_MIRROR)
230 * Mirroring: how to synchronize device page table with CPU page table.
232 * A device driver that is participating in HMM mirroring must always
233 * synchronize with CPU page table updates. For this, device drivers can either
234 * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
235 * drivers can decide to register one mirror per device per process, or just
236 * one mirror per process for a group of devices. The pattern is:
238 * int device_bind_address_space(..., struct mm_struct *mm, ...)
240 * struct device_address_space *das;
242 * // Device driver specific initialization, and allocation of das
243 * // which contains an hmm_mirror struct as one of its fields.
246 * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
248 * // Cleanup on error
252 * // Other device driver specific initialization
256 * Once an hmm_mirror is registered for an address space, the device driver
257 * will get callbacks through sync_cpu_device_pagetables() operation (see
258 * hmm_mirror_ops struct).
260 * Device driver must not free the struct containing the hmm_mirror struct
261 * before calling hmm_mirror_unregister(). The expected usage is to do that when
262 * the device driver is unbinding from an address space.
265 * void device_unbind_address_space(struct device_address_space *das)
267 * // Device driver specific cleanup
270 * hmm_mirror_unregister(&das->mirror);
272 * // Other device driver specific cleanup, and now das can be freed
280 * enum hmm_update_event - type of update
281 * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
283 enum hmm_update_event {
284 HMM_UPDATE_INVALIDATE,
288 * struct hmm_update - HMM update informations for callback
290 * @start: virtual start address of the range to update
291 * @end: virtual end address of the range to update
292 * @event: event triggering the update (what is happening)
293 * @blockable: can the callback block/sleep ?
298 enum hmm_update_event event;
303 * struct hmm_mirror_ops - HMM mirror device operations callback
305 * @update: callback to update range on a device
307 struct hmm_mirror_ops {
308 /* release() - release hmm_mirror
310 * @mirror: pointer to struct hmm_mirror
312 * This is called when the mm_struct is being released.
313 * The callback should make sure no references to the mirror occur
314 * after the callback returns.
316 void (*release)(struct hmm_mirror *mirror);
318 /* sync_cpu_device_pagetables() - synchronize page tables
320 * @mirror: pointer to struct hmm_mirror
321 * @update: update informations (see struct hmm_update)
322 * Returns: -EAGAIN if update.blockable false and callback need to
323 * block, 0 otherwise.
325 * This callback ultimately originates from mmu_notifiers when the CPU
326 * page table is updated. The device driver must update its page table
327 * in response to this callback. The update argument tells what action
330 * The device driver must not return from this callback until the device
331 * page tables are completely updated (TLBs flushed, etc); this is a
334 int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
335 const struct hmm_update *update);
339 * struct hmm_mirror - mirror struct for a device driver
341 * @hmm: pointer to struct hmm (which is unique per mm_struct)
342 * @ops: device driver callback for HMM mirror operations
343 * @list: for list of mirrors of a given mm
345 * Each address space (mm_struct) being mirrored by a device must register one
346 * instance of an hmm_mirror struct with HMM. HMM will track the list of all
347 * mirrors for each mm_struct.
351 const struct hmm_mirror_ops *ops;
352 struct list_head list;
355 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
356 void hmm_mirror_unregister(struct hmm_mirror *mirror);
360 * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device
361 * driver lock that serializes device page table updates, then call
362 * hmm_vma_range_done(), to check if the snapshot is still valid. The same
363 * device driver page table update lock must also be used in the
364 * hmm_mirror_ops.sync_cpu_device_pagetables() callback, so that CPU page
365 * table invalidation serializes on it.
367 * YOU MUST CALL hmm_vma_range_done() ONCE AND ONLY ONCE EACH TIME YOU CALL
368 * hmm_vma_get_pfns() WITHOUT ERROR !
370 * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID !
372 int hmm_vma_get_pfns(struct hmm_range *range);
373 bool hmm_vma_range_done(struct hmm_range *range);
377 * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
378 * not migrate any device memory back to system memory. The HMM pfn array will
379 * be updated with the fault result and current snapshot of the CPU page table
382 * The mmap_sem must be taken in read mode before entering and it might be
383 * dropped by the function if the block argument is false. In that case, the
384 * function returns -EAGAIN.
386 * Return value does not reflect if the fault was successful for every single
387 * address or not. Therefore, the caller must to inspect the HMM pfn array to
388 * determine fault status for each address.
390 * Trying to fault inside an invalid vma will result in -EINVAL.
392 * See the function description in mm/hmm.c for further documentation.
394 int hmm_vma_fault(struct hmm_range *range, bool block);
396 /* Below are for HMM internal use only! Not to be used by device driver! */
397 void hmm_mm_destroy(struct mm_struct *mm);
399 static inline void hmm_mm_init(struct mm_struct *mm)
403 #else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
404 static inline void hmm_mm_destroy(struct mm_struct *mm) {}
405 static inline void hmm_mm_init(struct mm_struct *mm) {}
406 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
408 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
411 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
415 * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
417 * @free: call when refcount on page reach 1 and thus is no longer use
418 * @fault: call when there is a page fault to unaddressable memory
420 * Both callback happens from page_free() and page_fault() callback of struct
421 * dev_pagemap respectively. See include/linux/memremap.h for more details on
424 * The hmm_devmem_ops callback are just here to provide a coherent and
425 * uniq API to device driver and device driver should not register their
426 * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
429 struct hmm_devmem_ops {
431 * free() - free a device page
432 * @devmem: device memory structure (see struct hmm_devmem)
433 * @page: pointer to struct page being freed
435 * Call back occurs whenever a device page refcount reach 1 which
436 * means that no one is holding any reference on the page anymore
437 * (ZONE_DEVICE page have an elevated refcount of 1 as default so
438 * that they are not release to the general page allocator).
440 * Note that callback has exclusive ownership of the page (as no
441 * one is holding any reference).
443 void (*free)(struct hmm_devmem *devmem, struct page *page);
445 * fault() - CPU page fault or get user page (GUP)
446 * @devmem: device memory structure (see struct hmm_devmem)
447 * @vma: virtual memory area containing the virtual address
448 * @addr: virtual address that faulted or for which there is a GUP
449 * @page: pointer to struct page backing virtual address (unreliable)
450 * @flags: FAULT_FLAG_* (see include/linux/mm.h)
451 * @pmdp: page middle directory
452 * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
455 * The callback occurs whenever there is a CPU page fault or GUP on a
456 * virtual address. This means that the device driver must migrate the
457 * page back to regular memory (CPU accessible).
459 * The device driver is free to migrate more than one page from the
460 * fault() callback as an optimization. However if device decide to
461 * migrate more than one page it must always priotirize the faulting
462 * address over the others.
464 * The struct page pointer is only given as an hint to allow quick
465 * lookup of internal device driver data. A concurrent migration
466 * might have already free that page and the virtual address might
467 * not longer be back by it. So it should not be modified by the
470 * Note that mmap semaphore is held in read mode at least when this
471 * callback occurs, hence the vma is valid upon callback entry.
473 vm_fault_t (*fault)(struct hmm_devmem *devmem,
474 struct vm_area_struct *vma,
476 const struct page *page,
482 * struct hmm_devmem - track device memory
484 * @completion: completion object for device memory
485 * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
486 * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
487 * @resource: IO resource reserved for this chunk of memory
488 * @pagemap: device page map for that chunk
489 * @device: device to bind resource to
490 * @ops: memory operations callback
491 * @ref: per CPU refcount
492 * @page_fault: callback when CPU fault on an unaddressable device page
494 * This an helper structure for device drivers that do not wish to implement
495 * the gory details related to hotplugging new memoy and allocating struct
498 * Device drivers can directly use ZONE_DEVICE memory on their own if they
501 * The page_fault() callback must migrate page back, from device memory to
502 * system memory, so that the CPU can access it. This might fail for various
503 * reasons (device issues, device have been unplugged, ...). When such error
504 * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
505 * set the CPU page table entry to "poisoned".
507 * Note that because memory cgroup charges are transferred to the device memory,
508 * this should never fail due to memory restrictions. However, allocation
509 * of a regular system page might still fail because we are out of memory. If
510 * that happens, the page_fault() callback must return VM_FAULT_OOM.
512 * The page_fault() callback can also try to migrate back multiple pages in one
513 * chunk, as an optimization. It must, however, prioritize the faulting address
514 * over all the others.
516 typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
518 const struct page *page,
523 struct completion completion;
524 unsigned long pfn_first;
525 unsigned long pfn_last;
526 struct resource *resource;
527 struct device *device;
528 struct dev_pagemap pagemap;
529 const struct hmm_devmem_ops *ops;
530 struct percpu_ref ref;
531 dev_page_fault_t page_fault;
535 * To add (hotplug) device memory, HMM assumes that there is no real resource
536 * that reserves a range in the physical address space (this is intended to be
537 * use by unaddressable device memory). It will reserve a physical range big
538 * enough and allocate struct page for it.
540 * The device driver can wrap the hmm_devmem struct inside a private device
543 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
544 struct device *device,
546 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
547 struct device *device,
548 struct resource *res);
551 * hmm_devmem_page_set_drvdata - set per-page driver data field
553 * @page: pointer to struct page
554 * @data: driver data value to set
556 * Because page can not be on lru we have an unsigned long that driver can use
557 * to store a per page field. This just a simple helper to do that.
559 static inline void hmm_devmem_page_set_drvdata(struct page *page,
562 page->hmm_data = data;
566 * hmm_devmem_page_get_drvdata - get per page driver data field
568 * @page: pointer to struct page
569 * Return: driver data value
571 static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
573 return page->hmm_data;
578 * struct hmm_device - fake device to hang device memory onto
580 * @device: device struct
581 * @minor: device minor number
584 struct device device;
589 * A device driver that wants to handle multiple devices memory through a
590 * single fake device can use hmm_device to do so. This is purely a helper and
591 * it is not strictly needed, in order to make use of any HMM functionality.
593 struct hmm_device *hmm_device_new(void *drvdata);
594 void hmm_device_put(struct hmm_device *hmm_device);
595 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
596 #else /* IS_ENABLED(CONFIG_HMM) */
597 static inline void hmm_mm_destroy(struct mm_struct *mm) {}
598 static inline void hmm_mm_init(struct mm_struct *mm) {}
599 #endif /* IS_ENABLED(CONFIG_HMM) */
601 #endif /* LINUX_HMM_H */