2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
47 static int __init init_dax_wait_table(void)
51 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
52 init_waitqueue_head(wait_table + i);
55 fs_initcall(init_dax_wait_table);
57 static int dax_is_pmd_entry(void *entry)
59 return (unsigned long)entry & RADIX_DAX_PMD;
62 static int dax_is_pte_entry(void *entry)
64 return !((unsigned long)entry & RADIX_DAX_PMD);
67 static int dax_is_zero_entry(void *entry)
69 return (unsigned long)entry & RADIX_DAX_HZP;
72 static int dax_is_empty_entry(void *entry)
74 return (unsigned long)entry & RADIX_DAX_EMPTY;
78 * DAX radix tree locking
80 struct exceptional_entry_key {
81 struct address_space *mapping;
85 struct wait_exceptional_entry_queue {
86 wait_queue_entry_t wait;
87 struct exceptional_entry_key key;
90 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
91 pgoff_t index, void *entry, struct exceptional_entry_key *key)
96 * If 'entry' is a PMD, align the 'index' that we use for the wait
97 * queue to the start of that PMD. This ensures that all offsets in
98 * the range covered by the PMD map to the same bit lock.
100 if (dax_is_pmd_entry(entry))
101 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
103 key->mapping = mapping;
104 key->entry_start = index;
106 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
107 return wait_table + hash;
110 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
111 int sync, void *keyp)
113 struct exceptional_entry_key *key = keyp;
114 struct wait_exceptional_entry_queue *ewait =
115 container_of(wait, struct wait_exceptional_entry_queue, wait);
117 if (key->mapping != ewait->key.mapping ||
118 key->entry_start != ewait->key.entry_start)
120 return autoremove_wake_function(wait, mode, sync, NULL);
124 * We do not necessarily hold the mapping->tree_lock when we call this
125 * function so it is possible that 'entry' is no longer a valid item in the
126 * radix tree. This is okay because all we really need to do is to find the
127 * correct waitqueue where tasks might be waiting for that old 'entry' and
130 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
131 pgoff_t index, void *entry, bool wake_all)
133 struct exceptional_entry_key key;
134 wait_queue_head_t *wq;
136 wq = dax_entry_waitqueue(mapping, index, entry, &key);
139 * Checking for locked entry and prepare_to_wait_exclusive() happens
140 * under mapping->tree_lock, ditto for entry handling in our callers.
141 * So at this point all tasks that could have seen our entry locked
142 * must be in the waitqueue and the following check will see them.
144 if (waitqueue_active(wq))
145 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
149 * Check whether the given slot is locked. The function must be called with
150 * mapping->tree_lock held
152 static inline int slot_locked(struct address_space *mapping, void **slot)
154 unsigned long entry = (unsigned long)
155 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
156 return entry & RADIX_DAX_ENTRY_LOCK;
160 * Mark the given slot is locked. The function must be called with
161 * mapping->tree_lock held
163 static inline void *lock_slot(struct address_space *mapping, void **slot)
165 unsigned long entry = (unsigned long)
166 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
168 entry |= RADIX_DAX_ENTRY_LOCK;
169 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
170 return (void *)entry;
174 * Mark the given slot is unlocked. The function must be called with
175 * mapping->tree_lock held
177 static inline void *unlock_slot(struct address_space *mapping, void **slot)
179 unsigned long entry = (unsigned long)
180 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
182 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
183 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
184 return (void *)entry;
188 * Lookup entry in radix tree, wait for it to become unlocked if it is
189 * exceptional entry and return it. The caller must call
190 * put_unlocked_mapping_entry() when he decided not to lock the entry or
191 * put_locked_mapping_entry() when he locked the entry and now wants to
194 * The function must be called with mapping->tree_lock held.
196 static void *get_unlocked_mapping_entry(struct address_space *mapping,
197 pgoff_t index, void ***slotp)
200 struct wait_exceptional_entry_queue ewait;
201 wait_queue_head_t *wq;
203 init_wait(&ewait.wait);
204 ewait.wait.func = wake_exceptional_entry_func;
207 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
209 if (!entry || !radix_tree_exceptional_entry(entry) ||
210 !slot_locked(mapping, slot)) {
216 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
217 prepare_to_wait_exclusive(wq, &ewait.wait,
218 TASK_UNINTERRUPTIBLE);
219 spin_unlock_irq(&mapping->tree_lock);
221 finish_wait(wq, &ewait.wait);
222 spin_lock_irq(&mapping->tree_lock);
226 static void dax_unlock_mapping_entry(struct address_space *mapping,
231 spin_lock_irq(&mapping->tree_lock);
232 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
233 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
234 !slot_locked(mapping, slot))) {
235 spin_unlock_irq(&mapping->tree_lock);
238 unlock_slot(mapping, slot);
239 spin_unlock_irq(&mapping->tree_lock);
240 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
243 static void put_locked_mapping_entry(struct address_space *mapping,
244 pgoff_t index, void *entry)
246 if (!radix_tree_exceptional_entry(entry)) {
250 dax_unlock_mapping_entry(mapping, index);
255 * Called when we are done with radix tree entry we looked up via
256 * get_unlocked_mapping_entry() and which we didn't lock in the end.
258 static void put_unlocked_mapping_entry(struct address_space *mapping,
259 pgoff_t index, void *entry)
261 if (!radix_tree_exceptional_entry(entry))
264 /* We have to wake up next waiter for the radix tree entry lock */
265 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
269 * Find radix tree entry at given index. If it points to a page, return with
270 * the page locked. If it points to the exceptional entry, return with the
271 * radix tree entry locked. If the radix tree doesn't contain given index,
272 * create empty exceptional entry for the index and return with it locked.
274 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
275 * either return that locked entry or will return an error. This error will
276 * happen if there are any 4k entries (either zero pages or DAX entries)
277 * within the 2MiB range that we are requesting.
279 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
280 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
281 * insertion will fail if it finds any 4k entries already in the tree, and a
282 * 4k insertion will cause an existing 2MiB entry to be unmapped and
283 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
284 * well as 2MiB empty entries.
286 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
287 * real storage backing them. We will leave these real 2MiB DAX entries in
288 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
290 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
291 * persistent memory the benefit is doubtful. We can add that later if we can
294 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
295 unsigned long size_flag)
297 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
301 spin_lock_irq(&mapping->tree_lock);
302 entry = get_unlocked_mapping_entry(mapping, index, &slot);
305 if (size_flag & RADIX_DAX_PMD) {
306 if (!radix_tree_exceptional_entry(entry) ||
307 dax_is_pte_entry(entry)) {
308 put_unlocked_mapping_entry(mapping, index,
310 entry = ERR_PTR(-EEXIST);
313 } else { /* trying to grab a PTE entry */
314 if (radix_tree_exceptional_entry(entry) &&
315 dax_is_pmd_entry(entry) &&
316 (dax_is_zero_entry(entry) ||
317 dax_is_empty_entry(entry))) {
318 pmd_downgrade = true;
323 /* No entry for given index? Make sure radix tree is big enough. */
324 if (!entry || pmd_downgrade) {
329 * Make sure 'entry' remains valid while we drop
330 * mapping->tree_lock.
332 entry = lock_slot(mapping, slot);
335 spin_unlock_irq(&mapping->tree_lock);
337 * Besides huge zero pages the only other thing that gets
338 * downgraded are empty entries which don't need to be
341 if (pmd_downgrade && dax_is_zero_entry(entry))
342 unmap_mapping_range(mapping,
343 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
345 err = radix_tree_preload(
346 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
349 put_locked_mapping_entry(mapping, index, entry);
352 spin_lock_irq(&mapping->tree_lock);
356 * We needed to drop the page_tree lock while calling
357 * radix_tree_preload() and we didn't have an entry to
358 * lock. See if another thread inserted an entry at
359 * our index during this time.
361 entry = __radix_tree_lookup(&mapping->page_tree, index,
364 radix_tree_preload_end();
365 spin_unlock_irq(&mapping->tree_lock);
371 radix_tree_delete(&mapping->page_tree, index);
372 mapping->nrexceptional--;
373 dax_wake_mapping_entry_waiter(mapping, index, entry,
377 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
379 err = __radix_tree_insert(&mapping->page_tree, index,
380 dax_radix_order(entry), entry);
381 radix_tree_preload_end();
383 spin_unlock_irq(&mapping->tree_lock);
385 * Our insertion of a DAX entry failed, most likely
386 * because we were inserting a PMD entry and it
387 * collided with a PTE sized entry at a different
388 * index in the PMD range. We haven't inserted
389 * anything into the radix tree and have no waiters to
394 /* Good, we have inserted empty locked entry into the tree. */
395 mapping->nrexceptional++;
396 spin_unlock_irq(&mapping->tree_lock);
399 /* Normal page in radix tree? */
400 if (!radix_tree_exceptional_entry(entry)) {
401 struct page *page = entry;
404 spin_unlock_irq(&mapping->tree_lock);
406 /* Page got truncated? Retry... */
407 if (unlikely(page->mapping != mapping)) {
414 entry = lock_slot(mapping, slot);
416 spin_unlock_irq(&mapping->tree_lock);
420 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
421 pgoff_t index, bool trunc)
425 struct radix_tree_root *page_tree = &mapping->page_tree;
427 spin_lock_irq(&mapping->tree_lock);
428 entry = get_unlocked_mapping_entry(mapping, index, NULL);
429 if (!entry || !radix_tree_exceptional_entry(entry))
432 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
433 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
435 radix_tree_delete(page_tree, index);
436 mapping->nrexceptional--;
439 put_unlocked_mapping_entry(mapping, index, entry);
440 spin_unlock_irq(&mapping->tree_lock);
444 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
445 * entry to get unlocked before deleting it.
447 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
449 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
452 * This gets called from truncate / punch_hole path. As such, the caller
453 * must hold locks protecting against concurrent modifications of the
454 * radix tree (usually fs-private i_mmap_sem for writing). Since the
455 * caller has seen exceptional entry for this index, we better find it
456 * at that index as well...
463 * Invalidate exceptional DAX entry if it is clean.
465 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
468 return __dax_invalidate_mapping_entry(mapping, index, false);
471 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
472 sector_t sector, size_t size, struct page *to,
481 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
485 id = dax_read_lock();
486 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
491 vto = kmap_atomic(to);
492 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
499 * By this point grab_mapping_entry() has ensured that we have a locked entry
500 * of the appropriate size so we don't have to worry about downgrading PMDs to
501 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
502 * already in the tree, we will skip the insertion and just dirty the PMD as
505 static void *dax_insert_mapping_entry(struct address_space *mapping,
506 struct vm_fault *vmf,
507 void *entry, sector_t sector,
510 struct radix_tree_root *page_tree = &mapping->page_tree;
512 bool hole_fill = false;
514 pgoff_t index = vmf->pgoff;
516 if (vmf->flags & FAULT_FLAG_WRITE)
517 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
519 /* Replacing hole page with block mapping? */
520 if (!radix_tree_exceptional_entry(entry)) {
523 * Unmap the page now before we remove it from page cache below.
524 * The page is locked so it cannot be faulted in again.
526 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
528 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
530 return ERR_PTR(error);
531 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
532 /* replacing huge zero page with PMD block mapping */
533 unmap_mapping_range(mapping,
534 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
537 spin_lock_irq(&mapping->tree_lock);
538 new_entry = dax_radix_locked_entry(sector, flags);
541 __delete_from_page_cache(entry, NULL);
542 /* Drop pagecache reference */
544 error = __radix_tree_insert(page_tree, index,
545 dax_radix_order(new_entry), new_entry);
547 new_entry = ERR_PTR(error);
550 mapping->nrexceptional++;
551 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
553 * Only swap our new entry into the radix tree if the current
554 * entry is a zero page or an empty entry. If a normal PTE or
555 * PMD entry is already in the tree, we leave it alone. This
556 * means that if we are trying to insert a PTE and the
557 * existing entry is a PMD, we will just leave the PMD in the
558 * tree and dirty it if necessary.
560 struct radix_tree_node *node;
564 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
565 WARN_ON_ONCE(ret != entry);
566 __radix_tree_replace(page_tree, node, slot,
567 new_entry, NULL, NULL);
569 if (vmf->flags & FAULT_FLAG_WRITE)
570 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
572 spin_unlock_irq(&mapping->tree_lock);
574 radix_tree_preload_end();
576 * We don't need hole page anymore, it has been replaced with
577 * locked radix tree entry now.
579 if (mapping->a_ops->freepage)
580 mapping->a_ops->freepage(entry);
587 static inline unsigned long
588 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
590 unsigned long address;
592 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
593 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
597 /* Walk all mappings of a given index of a file and writeprotect them */
598 static void dax_mapping_entry_mkclean(struct address_space *mapping,
599 pgoff_t index, unsigned long pfn)
601 struct vm_area_struct *vma;
602 pte_t pte, *ptep = NULL;
606 i_mmap_lock_read(mapping);
607 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
608 unsigned long address, start, end;
612 if (!(vma->vm_flags & VM_SHARED))
615 address = pgoff_address(index, vma);
618 * Note because we provide start/end to follow_pte_pmd it will
619 * call mmu_notifier_invalidate_range_start() on our behalf
620 * before taking any lock.
622 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
626 #ifdef CONFIG_FS_DAX_PMD
629 if (pfn != pmd_pfn(*pmdp))
631 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
634 flush_cache_page(vma, address, pfn);
635 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
636 pmd = pmd_wrprotect(pmd);
637 pmd = pmd_mkclean(pmd);
638 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
639 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
644 if (pfn != pte_pfn(*ptep))
646 if (!pte_dirty(*ptep) && !pte_write(*ptep))
649 flush_cache_page(vma, address, pfn);
650 pte = ptep_clear_flush(vma, address, ptep);
651 pte = pte_wrprotect(pte);
652 pte = pte_mkclean(pte);
653 set_pte_at(vma->vm_mm, address, ptep, pte);
654 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
656 pte_unmap_unlock(ptep, ptl);
659 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
661 i_mmap_unlock_read(mapping);
664 static int dax_writeback_one(struct block_device *bdev,
665 struct dax_device *dax_dev, struct address_space *mapping,
666 pgoff_t index, void *entry)
668 struct radix_tree_root *page_tree = &mapping->page_tree;
669 void *entry2, **slot, *kaddr;
677 * A page got tagged dirty in DAX mapping? Something is seriously
680 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
683 spin_lock_irq(&mapping->tree_lock);
684 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
685 /* Entry got punched out / reallocated? */
686 if (!entry2 || !radix_tree_exceptional_entry(entry2))
689 * Entry got reallocated elsewhere? No need to writeback. We have to
690 * compare sectors as we must not bail out due to difference in lockbit
693 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
695 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
696 dax_is_zero_entry(entry))) {
701 /* Another fsync thread may have already written back this entry */
702 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
704 /* Lock the entry to serialize with page faults */
705 entry = lock_slot(mapping, slot);
707 * We can clear the tag now but we have to be careful so that concurrent
708 * dax_writeback_one() calls for the same index cannot finish before we
709 * actually flush the caches. This is achieved as the calls will look
710 * at the entry only under tree_lock and once they do that they will
711 * see the entry locked and wait for it to unlock.
713 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
714 spin_unlock_irq(&mapping->tree_lock);
717 * Even if dax_writeback_mapping_range() was given a wbc->range_start
718 * in the middle of a PMD, the 'index' we are given will be aligned to
719 * the start index of the PMD, as will the sector we pull from
720 * 'entry'. This allows us to flush for PMD_SIZE and not have to
721 * worry about partial PMD writebacks.
723 sector = dax_radix_sector(entry);
724 size = PAGE_SIZE << dax_radix_order(entry);
726 id = dax_read_lock();
727 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
732 * dax_direct_access() may sleep, so cannot hold tree_lock over
735 ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
739 if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
744 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
745 dax_flush(dax_dev, pgoff, kaddr, size);
747 * After we have flushed the cache, we can clear the dirty tag. There
748 * cannot be new dirty data in the pfn after the flush has completed as
749 * the pfn mappings are writeprotected and fault waits for mapping
752 spin_lock_irq(&mapping->tree_lock);
753 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
754 spin_unlock_irq(&mapping->tree_lock);
755 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
758 put_locked_mapping_entry(mapping, index, entry);
762 put_unlocked_mapping_entry(mapping, index, entry2);
763 spin_unlock_irq(&mapping->tree_lock);
768 * Flush the mapping to the persistent domain within the byte range of [start,
769 * end]. This is required by data integrity operations to ensure file data is
770 * on persistent storage prior to completion of the operation.
772 int dax_writeback_mapping_range(struct address_space *mapping,
773 struct block_device *bdev, struct writeback_control *wbc)
775 struct inode *inode = mapping->host;
776 pgoff_t start_index, end_index;
777 pgoff_t indices[PAGEVEC_SIZE];
778 struct dax_device *dax_dev;
783 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
786 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
789 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
793 start_index = wbc->range_start >> PAGE_SHIFT;
794 end_index = wbc->range_end >> PAGE_SHIFT;
796 trace_dax_writeback_range(inode, start_index, end_index);
798 tag_pages_for_writeback(mapping, start_index, end_index);
800 pagevec_init(&pvec, 0);
802 pvec.nr = find_get_entries_tag(mapping, start_index,
803 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
804 pvec.pages, indices);
809 for (i = 0; i < pvec.nr; i++) {
810 if (indices[i] > end_index) {
815 ret = dax_writeback_one(bdev, dax_dev, mapping,
816 indices[i], pvec.pages[i]);
818 mapping_set_error(mapping, ret);
822 start_index = indices[pvec.nr - 1] + 1;
826 trace_dax_writeback_range_done(inode, start_index, end_index);
827 return (ret < 0 ? ret : 0);
829 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
831 static int dax_insert_mapping(struct address_space *mapping,
832 struct block_device *bdev, struct dax_device *dax_dev,
833 sector_t sector, size_t size, void **entryp,
834 struct vm_area_struct *vma, struct vm_fault *vmf)
836 unsigned long vaddr = vmf->address;
837 void *entry = *entryp;
843 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
847 id = dax_read_lock();
848 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
855 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
860 trace_dax_insert_mapping(mapping->host, vmf, ret);
861 return vm_insert_mixed(vma, vaddr, pfn);
865 * dax_pfn_mkwrite - handle first write to DAX page
866 * @vmf: The description of the fault
868 int dax_pfn_mkwrite(struct vm_fault *vmf)
870 struct file *file = vmf->vma->vm_file;
871 struct address_space *mapping = file->f_mapping;
872 struct inode *inode = mapping->host;
874 pgoff_t index = vmf->pgoff;
876 spin_lock_irq(&mapping->tree_lock);
877 entry = get_unlocked_mapping_entry(mapping, index, &slot);
878 if (!entry || !radix_tree_exceptional_entry(entry)) {
880 put_unlocked_mapping_entry(mapping, index, entry);
881 spin_unlock_irq(&mapping->tree_lock);
882 trace_dax_pfn_mkwrite_no_entry(inode, vmf, VM_FAULT_NOPAGE);
883 return VM_FAULT_NOPAGE;
885 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
886 entry = lock_slot(mapping, slot);
887 spin_unlock_irq(&mapping->tree_lock);
889 * If we race with somebody updating the PTE and finish_mkwrite_fault()
890 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
891 * the fault in either case.
893 finish_mkwrite_fault(vmf);
894 put_locked_mapping_entry(mapping, index, entry);
895 trace_dax_pfn_mkwrite(inode, vmf, VM_FAULT_NOPAGE);
896 return VM_FAULT_NOPAGE;
898 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
901 * The user has performed a load from a hole in the file. Allocating
902 * a new page in the file would cause excessive storage usage for
903 * workloads with sparse files. We allocate a page cache page instead.
904 * We'll kick it out of the page cache if it's ever written to,
905 * otherwise it will simply fall out of the page cache under memory
906 * pressure without ever having been dirtied.
908 static int dax_load_hole(struct address_space *mapping, void **entry,
909 struct vm_fault *vmf)
911 struct inode *inode = mapping->host;
915 /* Hole page already exists? Return it... */
916 if (!radix_tree_exceptional_entry(*entry)) {
921 /* This will replace locked radix tree entry with a hole page */
922 page = find_or_create_page(mapping, vmf->pgoff,
923 vmf->gfp_mask | __GFP_ZERO);
931 ret = finish_fault(vmf);
935 /* Grab reference for PTE that is now referencing the page */
937 ret = VM_FAULT_NOPAGE;
940 trace_dax_load_hole(inode, vmf, ret);
944 static bool dax_range_is_aligned(struct block_device *bdev,
945 unsigned int offset, unsigned int length)
947 unsigned short sector_size = bdev_logical_block_size(bdev);
949 if (!IS_ALIGNED(offset, sector_size))
951 if (!IS_ALIGNED(length, sector_size))
957 int __dax_zero_page_range(struct block_device *bdev,
958 struct dax_device *dax_dev, sector_t sector,
959 unsigned int offset, unsigned int size)
961 if (dax_range_is_aligned(bdev, offset, size)) {
962 sector_t start_sector = sector + (offset >> 9);
964 return blkdev_issue_zeroout(bdev, start_sector,
965 size >> 9, GFP_NOFS, 0);
972 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
976 id = dax_read_lock();
977 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
983 memset(kaddr + offset, 0, size);
984 dax_flush(dax_dev, pgoff, kaddr + offset, size);
989 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
991 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
993 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
997 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1000 struct block_device *bdev = iomap->bdev;
1001 struct dax_device *dax_dev = iomap->dax_dev;
1002 struct iov_iter *iter = data;
1003 loff_t end = pos + length, done = 0;
1007 if (iov_iter_rw(iter) == READ) {
1008 end = min(end, i_size_read(inode));
1012 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1013 return iov_iter_zero(min(length, end - pos), iter);
1016 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1020 * Write can allocate block for an area which has a hole page mapped
1021 * into page tables. We have to tear down these mappings so that data
1022 * written by write(2) is visible in mmap.
1024 if (iomap->flags & IOMAP_F_NEW) {
1025 invalidate_inode_pages2_range(inode->i_mapping,
1027 (end - 1) >> PAGE_SHIFT);
1030 id = dax_read_lock();
1032 unsigned offset = pos & (PAGE_SIZE - 1);
1033 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1034 const sector_t sector = dax_iomap_sector(iomap, pos);
1040 if (fatal_signal_pending(current)) {
1045 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1049 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1056 map_len = PFN_PHYS(map_len);
1059 if (map_len > end - pos)
1060 map_len = end - pos;
1062 if (iov_iter_rw(iter) == WRITE)
1063 map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1066 map_len = copy_to_iter(kaddr, map_len, iter);
1068 ret = map_len ? map_len : -EFAULT;
1076 dax_read_unlock(id);
1078 return done ? done : ret;
1082 * dax_iomap_rw - Perform I/O to a DAX file
1083 * @iocb: The control block for this I/O
1084 * @iter: The addresses to do I/O from or to
1085 * @ops: iomap ops passed from the file system
1087 * This function performs read and write operations to directly mapped
1088 * persistent memory. The callers needs to take care of read/write exclusion
1089 * and evicting any page cache pages in the region under I/O.
1092 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1093 const struct iomap_ops *ops)
1095 struct address_space *mapping = iocb->ki_filp->f_mapping;
1096 struct inode *inode = mapping->host;
1097 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1100 if (iov_iter_rw(iter) == WRITE) {
1101 lockdep_assert_held_exclusive(&inode->i_rwsem);
1102 flags |= IOMAP_WRITE;
1104 lockdep_assert_held(&inode->i_rwsem);
1107 while (iov_iter_count(iter)) {
1108 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1109 iter, dax_iomap_actor);
1116 iocb->ki_pos += done;
1117 return done ? done : ret;
1119 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1121 static int dax_fault_return(int error)
1124 return VM_FAULT_NOPAGE;
1125 if (error == -ENOMEM)
1126 return VM_FAULT_OOM;
1127 return VM_FAULT_SIGBUS;
1130 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1131 const struct iomap_ops *ops)
1133 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1134 struct inode *inode = mapping->host;
1135 unsigned long vaddr = vmf->address;
1136 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1138 struct iomap iomap = { 0 };
1139 unsigned flags = IOMAP_FAULT;
1140 int error, major = 0;
1144 trace_dax_pte_fault(inode, vmf, vmf_ret);
1146 * Check whether offset isn't beyond end of file now. Caller is supposed
1147 * to hold locks serializing us with truncate / punch hole so this is
1150 if (pos >= i_size_read(inode)) {
1151 vmf_ret = VM_FAULT_SIGBUS;
1155 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1156 flags |= IOMAP_WRITE;
1158 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1159 if (IS_ERR(entry)) {
1160 vmf_ret = dax_fault_return(PTR_ERR(entry));
1165 * It is possible, particularly with mixed reads & writes to private
1166 * mappings, that we have raced with a PMD fault that overlaps with
1167 * the PTE we need to set up. If so just return and the fault will be
1170 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1171 vmf_ret = VM_FAULT_NOPAGE;
1176 * Note that we don't bother to use iomap_apply here: DAX required
1177 * the file system block size to be equal the page size, which means
1178 * that we never have to deal with more than a single extent here.
1180 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1182 vmf_ret = dax_fault_return(error);
1185 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1186 error = -EIO; /* fs corruption? */
1187 goto error_finish_iomap;
1190 sector = dax_iomap_sector(&iomap, pos);
1192 if (vmf->cow_page) {
1193 switch (iomap.type) {
1195 case IOMAP_UNWRITTEN:
1196 clear_user_highpage(vmf->cow_page, vaddr);
1199 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1200 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1209 goto error_finish_iomap;
1211 __SetPageUptodate(vmf->cow_page);
1212 vmf_ret = finish_fault(vmf);
1214 vmf_ret = VM_FAULT_DONE_COW;
1218 switch (iomap.type) {
1220 if (iomap.flags & IOMAP_F_NEW) {
1221 count_vm_event(PGMAJFAULT);
1222 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1223 major = VM_FAULT_MAJOR;
1225 error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1226 sector, PAGE_SIZE, &entry, vmf->vma, vmf);
1227 /* -EBUSY is fine, somebody else faulted on the same PTE */
1228 if (error == -EBUSY)
1231 case IOMAP_UNWRITTEN:
1233 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1234 vmf_ret = dax_load_hole(mapping, &entry, vmf);
1245 vmf_ret = dax_fault_return(error) | major;
1247 if (ops->iomap_end) {
1248 int copied = PAGE_SIZE;
1250 if (vmf_ret & VM_FAULT_ERROR)
1253 * The fault is done by now and there's no way back (other
1254 * thread may be already happily using PTE we have installed).
1255 * Just ignore error from ->iomap_end since we cannot do much
1258 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1261 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1263 trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1267 #ifdef CONFIG_FS_DAX_PMD
1269 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1270 * more often than one might expect in the below functions.
1272 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1274 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1275 loff_t pos, void **entryp)
1277 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1278 const sector_t sector = dax_iomap_sector(iomap, pos);
1279 struct dax_device *dax_dev = iomap->dax_dev;
1280 struct block_device *bdev = iomap->bdev;
1281 struct inode *inode = mapping->host;
1282 const size_t size = PMD_SIZE;
1283 void *ret = NULL, *kaddr;
1289 if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1292 id = dax_read_lock();
1293 length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1295 goto unlock_fallback;
1296 length = PFN_PHYS(length);
1299 goto unlock_fallback;
1300 if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1301 goto unlock_fallback;
1302 if (!pfn_t_devmap(pfn))
1303 goto unlock_fallback;
1304 dax_read_unlock(id);
1306 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector,
1312 trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1313 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1314 pfn, vmf->flags & FAULT_FLAG_WRITE);
1317 dax_read_unlock(id);
1319 trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1320 return VM_FAULT_FALLBACK;
1323 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1326 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1327 unsigned long pmd_addr = vmf->address & PMD_MASK;
1328 struct inode *inode = mapping->host;
1329 struct page *zero_page;
1334 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1336 if (unlikely(!zero_page))
1339 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1340 RADIX_DAX_PMD | RADIX_DAX_HZP);
1345 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1346 if (!pmd_none(*(vmf->pmd))) {
1351 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1352 pmd_entry = pmd_mkhuge(pmd_entry);
1353 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1355 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1356 return VM_FAULT_NOPAGE;
1359 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1360 return VM_FAULT_FALLBACK;
1363 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1364 const struct iomap_ops *ops)
1366 struct vm_area_struct *vma = vmf->vma;
1367 struct address_space *mapping = vma->vm_file->f_mapping;
1368 unsigned long pmd_addr = vmf->address & PMD_MASK;
1369 bool write = vmf->flags & FAULT_FLAG_WRITE;
1370 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1371 struct inode *inode = mapping->host;
1372 int result = VM_FAULT_FALLBACK;
1373 struct iomap iomap = { 0 };
1374 pgoff_t max_pgoff, pgoff;
1380 * Check whether offset isn't beyond end of file now. Caller is
1381 * supposed to hold locks serializing us with truncate / punch hole so
1382 * this is a reliable test.
1384 pgoff = linear_page_index(vma, pmd_addr);
1385 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1387 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1390 * Make sure that the faulting address's PMD offset (color) matches
1391 * the PMD offset from the start of the file. This is necessary so
1392 * that a PMD range in the page table overlaps exactly with a PMD
1393 * range in the radix tree.
1395 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1396 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1399 /* Fall back to PTEs if we're going to COW */
1400 if (write && !(vma->vm_flags & VM_SHARED))
1403 /* If the PMD would extend outside the VMA */
1404 if (pmd_addr < vma->vm_start)
1406 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1409 if (pgoff > max_pgoff) {
1410 result = VM_FAULT_SIGBUS;
1414 /* If the PMD would extend beyond the file size */
1415 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1419 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1420 * PMD or a HZP entry. If it can't (because a 4k page is already in
1421 * the tree, for instance), it will return -EEXIST and we just fall
1422 * back to 4k entries.
1424 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1429 * It is possible, particularly with mixed reads & writes to private
1430 * mappings, that we have raced with a PTE fault that overlaps with
1431 * the PMD we need to set up. If so just return and the fault will be
1434 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1435 !pmd_devmap(*vmf->pmd)) {
1441 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1442 * setting up a mapping, so really we're using iomap_begin() as a way
1443 * to look up our filesystem block.
1445 pos = (loff_t)pgoff << PAGE_SHIFT;
1446 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1450 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1453 switch (iomap.type) {
1455 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1457 case IOMAP_UNWRITTEN:
1459 if (WARN_ON_ONCE(write))
1461 result = dax_pmd_load_hole(vmf, &iomap, &entry);
1469 if (ops->iomap_end) {
1470 int copied = PMD_SIZE;
1472 if (result == VM_FAULT_FALLBACK)
1475 * The fault is done by now and there's no way back (other
1476 * thread may be already happily using PMD we have installed).
1477 * Just ignore error from ->iomap_end since we cannot do much
1480 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1484 put_locked_mapping_entry(mapping, pgoff, entry);
1486 if (result == VM_FAULT_FALLBACK) {
1487 split_huge_pmd(vma, vmf->pmd, vmf->address);
1488 count_vm_event(THP_FAULT_FALLBACK);
1491 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1495 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1496 const struct iomap_ops *ops)
1498 return VM_FAULT_FALLBACK;
1500 #endif /* CONFIG_FS_DAX_PMD */
1503 * dax_iomap_fault - handle a page fault on a DAX file
1504 * @vmf: The description of the fault
1505 * @ops: iomap ops passed from the file system
1507 * When a page fault occurs, filesystems may call this helper in
1508 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1509 * has done all the necessary locking for page fault to proceed
1512 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1513 const struct iomap_ops *ops)
1517 return dax_iomap_pte_fault(vmf, ops);
1519 return dax_iomap_pmd_fault(vmf, ops);
1521 return VM_FAULT_FALLBACK;
1524 EXPORT_SYMBOL_GPL(dax_iomap_fault);