@@ -226,8 +226,7 @@ static void dax_region_unregister(void *region)
}
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, int target_node, unsigned int align,
- unsigned long long pfn_flags)
+ struct resource *res, int target_node, unsigned int align)
{
struct dax_region *dax_region;
@@ -251,7 +250,6 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
dev_set_drvdata(parent, dax_region);
memcpy(&dax_region->res, res, sizeof(*res));
- dax_region->pfn_flags = pfn_flags;
kref_init(&dax_region->kref);
dax_region->id = region_id;
dax_region->align = align;
@@ -10,8 +10,7 @@ struct dax_device;
struct dax_region;
void dax_region_put(struct dax_region *dax_region);
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, int target_node, unsigned int align,
- unsigned long long flags);
+ struct resource *res, int target_node, unsigned int align);
enum dev_dax_subsys {
DEV_DAX_BUS,
@@ -23,7 +23,6 @@ void dax_bus_exit(void);
* @dev: parent device backing this region
* @align: allocation and mapping alignment for child dax devices
* @res: physical address range of the region
- * @pfn_flags: identify whether the pfns are paged back or not
*/
struct dax_region {
int id;
@@ -32,7 +31,6 @@ struct dax_region {
struct device *dev;
unsigned int align;
struct resource res;
- unsigned long long pfn_flags;
};
/**
@@ -41,14 +41,6 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return -EINVAL;
}
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
- && (vma->vm_flags & VM_DONTCOPY) == 0) {
- dev_info_ratelimited(dev,
- "%s: %s: fail, dax range requires MADV_DONTFORK\n",
- current->comm, func);
- return -EINVAL;
- }
-
if (!vma_is_dax(vma)) {
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
@@ -102,7 +94,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
}
@@ -127,12 +119,6 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- /* dax pmd mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "region lacks devmap flags\n");
- return VM_FAULT_SIGBUS;
- }
-
if (fault_size < dax_region->align)
return VM_FAULT_SIGBUS;
else if (fault_size > dax_region->align)
@@ -150,7 +136,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
@@ -177,12 +163,6 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- /* dax pud mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "region lacks devmap flags\n");
- return VM_FAULT_SIGBUS;
- }
-
if (fault_size < dax_region->align)
return VM_FAULT_SIGBUS;
else if (fault_size > dax_region->align)
@@ -200,7 +180,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
@@ -22,7 +22,7 @@ static int dax_hmem_probe(struct platform_device *pdev)
memcpy(&pgmap.res, res, sizeof(*res));
dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
- PMD_SIZE, PFN_DEV|PFN_MAP);
+ PMD_SIZE);
if (!dax_region)
return -ENOMEM;
@@ -53,8 +53,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
memcpy(&res, &pgmap.res, sizeof(res));
res.start += offset;
dax_region = alloc_dax_region(dev, region_id, &res,
- nd_region->target_node, le32_to_cpu(pfn_sb->align),
- PFN_DEV|PFN_MAP);
+ nd_region->target_node, le32_to_cpu(pfn_sb->align));
if (!dax_region)
return ERR_PTR(-ENOMEM);
All callers specify the same flags to alloc_dax_region(), so there is no need to allow for anything other than PFN_DEV|PFN_MAP, or carry a ->pfn_flags around on the region. Device-dax instances are always page backed. Cc: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/dax/bus.c | 4 +--- drivers/dax/bus.h | 3 +-- drivers/dax/dax-private.h | 2 -- drivers/dax/device.c | 26 +++----------------------- drivers/dax/hmem/hmem.c | 2 +- drivers/dax/pmem/core.c | 3 +-- 6 files changed, 7 insertions(+), 33 deletions(-)