]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
dax: Factor out getting of pfn out of iomap
authorJan Kara <jack@suse.cz>
Wed, 1 Nov 2017 15:36:33 +0000 (16:36 +0100)
committerDan Williams <dan.j.williams@intel.com>
Fri, 3 Nov 2017 13:26:23 +0000 (06:26 -0700)
Factor out code to get pfn out of iomap that is shared between PTE and
PMD fault path.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
fs/dax.c

index 0bc42ac294ca9ff5f82bf5eb4db8761ce0204d59..116eef8d6c6915852aa5df30fe9de4cfdcce4d45 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -825,30 +825,53 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
        return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
 }
 
-static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
-                             loff_t pos, void *entry)
+static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
+                        pfn_t *pfnp)
 {
        const sector_t sector = dax_iomap_sector(iomap, pos);
-       struct vm_area_struct *vma = vmf->vma;
-       struct address_space *mapping = vma->vm_file->f_mapping;
-       unsigned long vaddr = vmf->address;
-       void *ret, *kaddr;
        pgoff_t pgoff;
+       void *kaddr;
        int id, rc;
-       pfn_t pfn;
+       long length;
 
-       rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
+       rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
        if (rc)
                return rc;
-
        id = dax_read_lock();
-       rc = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(PAGE_SIZE),
-                              &kaddr, &pfn);
-       if (rc < 0) {
-               dax_read_unlock(id);
-               return rc;
+       length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
+                                  &kaddr, pfnp);
+       if (length < 0) {
+               rc = length;
+               goto out;
        }
+       rc = -EINVAL;
+       if (PFN_PHYS(length) < size)
+               goto out;
+       if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
+               goto out;
+       /* For larger pages we need devmap */
+       if (length > 1 && !pfn_t_devmap(*pfnp))
+               goto out;
+       rc = 0;
+out:
        dax_read_unlock(id);
+       return rc;
+}
+
+static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
+                             loff_t pos, void *entry)
+{
+       const sector_t sector = dax_iomap_sector(iomap, pos);
+       struct vm_area_struct *vma = vmf->vma;
+       struct address_space *mapping = vma->vm_file->f_mapping;
+       unsigned long vaddr = vmf->address;
+       void *ret;
+       int rc;
+       pfn_t pfn;
+
+       rc = dax_iomap_pfn(iomap, pos, PAGE_SIZE, &pfn);
+       if (rc < 0)
+               return rc;
 
        ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
        if (IS_ERR(ret))
@@ -1223,46 +1246,26 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        const sector_t sector = dax_iomap_sector(iomap, pos);
-       struct dax_device *dax_dev = iomap->dax_dev;
-       struct block_device *bdev = iomap->bdev;
        struct inode *inode = mapping->host;
-       const size_t size = PMD_SIZE;
-       void *ret = NULL, *kaddr;
-       long length = 0;
-       pgoff_t pgoff;
+       void *ret = NULL;
        pfn_t pfn = {};
-       int id;
+       int rc;
 
-       if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
+       rc = dax_iomap_pfn(iomap, pos, PMD_SIZE, &pfn);
+       if (rc < 0)
                goto fallback;
 
-       id = dax_read_lock();
-       length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
-       if (length < 0)
-               goto unlock_fallback;
-       length = PFN_PHYS(length);
-
-       if (length < size)
-               goto unlock_fallback;
-       if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
-               goto unlock_fallback;
-       if (!pfn_t_devmap(pfn))
-               goto unlock_fallback;
-       dax_read_unlock(id);
-
        ret = dax_insert_mapping_entry(mapping, vmf, entry, sector,
                        RADIX_DAX_PMD);
        if (IS_ERR(ret))
                goto fallback;
 
-       trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
+       trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, ret);
        return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
                        pfn, vmf->flags & FAULT_FLAG_WRITE);
 
-unlock_fallback:
-       dax_read_unlock(id);
 fallback:
-       trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
+       trace_dax_pmd_insert_mapping_fallback(inode, vmf, PMD_SIZE, pfn, ret);
        return VM_FAULT_FALLBACK;
 }