2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/magic.h>
17 #include <linux/mount.h>
18 #include <linux/pfn_t.h>
19 #include <linux/hash.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/dax.h>
27 static dev_t dax_devt;
28 static struct class *dax_class;
29 static DEFINE_IDA(dax_minor_ida);
30 static int nr_dax = CONFIG_NR_DEV_DAX;
31 module_param(nr_dax, int, S_IRUGO);
32 static struct vfsmount *dax_mnt;
33 static struct kmem_cache *dax_cache __read_mostly;
34 static struct super_block *dax_superblock __read_mostly;
35 MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
38 * struct dax_region - mapping infrastructure for dax devices
39 * @id: kernel-wide unique region for a memory range
40 * @base: linear address corresponding to @res
41 * @kref: to pin while other agents have a need to do lookups
42 * @dev: parent device backing this region
43 * @align: allocation and mapping alignment for child dax devices
44 * @res: physical address range of the region
45 * @pfn_flags: identify whether the pfns are paged back or not
55 unsigned long pfn_flags;
59 * struct dax_dev - subdivision of a dax region
60 * @region - parent region
61 * @dev - device backing the character device
62 * @cdev - core chardev data
63 * @alive - !alive + rcu grace period == no new mappings can be established
64 * @id - child id in the region
65 * @num_resources - number of physical address extents in this device
66 * @res - array of physical address ranges
69 struct dax_region *region;
76 struct resource res[0];
79 static ssize_t id_show(struct device *dev,
80 struct device_attribute *attr, char *buf)
82 struct dax_region *dax_region;
86 dax_region = dev_get_drvdata(dev);
88 rc = sprintf(buf, "%d\n", dax_region->id);
93 static DEVICE_ATTR_RO(id);
95 static ssize_t region_size_show(struct device *dev,
96 struct device_attribute *attr, char *buf)
98 struct dax_region *dax_region;
102 dax_region = dev_get_drvdata(dev);
104 rc = sprintf(buf, "%llu\n", (unsigned long long)
105 resource_size(&dax_region->res));
110 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
111 region_size_show, NULL);
113 static ssize_t align_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
116 struct dax_region *dax_region;
120 dax_region = dev_get_drvdata(dev);
122 rc = sprintf(buf, "%u\n", dax_region->align);
127 static DEVICE_ATTR_RO(align);
129 static struct attribute *dax_region_attributes[] = {
130 &dev_attr_region_size.attr,
131 &dev_attr_align.attr,
136 static const struct attribute_group dax_region_attribute_group = {
137 .name = "dax_region",
138 .attrs = dax_region_attributes,
141 static const struct attribute_group *dax_region_attribute_groups[] = {
142 &dax_region_attribute_group,
146 static struct inode *dax_alloc_inode(struct super_block *sb)
148 return kmem_cache_alloc(dax_cache, GFP_KERNEL);
151 static void dax_i_callback(struct rcu_head *head)
153 struct inode *inode = container_of(head, struct inode, i_rcu);
155 kmem_cache_free(dax_cache, inode);
158 static void dax_destroy_inode(struct inode *inode)
160 call_rcu(&inode->i_rcu, dax_i_callback);
163 static const struct super_operations dax_sops = {
164 .statfs = simple_statfs,
165 .alloc_inode = dax_alloc_inode,
166 .destroy_inode = dax_destroy_inode,
167 .drop_inode = generic_delete_inode,
170 static struct dentry *dax_mount(struct file_system_type *fs_type,
171 int flags, const char *dev_name, void *data)
173 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
176 static struct file_system_type dax_type = {
179 .kill_sb = kill_anon_super,
182 static int dax_test(struct inode *inode, void *data)
184 return inode->i_cdev == data;
187 static int dax_set(struct inode *inode, void *data)
189 inode->i_cdev = data;
193 static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
197 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
198 dax_test, dax_set, cdev);
203 if (inode->i_state & I_NEW) {
204 inode->i_mode = S_IFCHR;
205 inode->i_flags = S_DAX;
206 inode->i_rdev = devt;
207 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
208 unlock_new_inode(inode);
213 static void init_once(void *inode)
215 inode_init_once(inode);
218 static int dax_inode_init(void)
222 dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
223 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
224 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
229 rc = register_filesystem(&dax_type);
231 goto err_register_fs;
233 dax_mnt = kern_mount(&dax_type);
234 if (IS_ERR(dax_mnt)) {
235 rc = PTR_ERR(dax_mnt);
238 dax_superblock = dax_mnt->mnt_sb;
243 unregister_filesystem(&dax_type);
245 kmem_cache_destroy(dax_cache);
250 static void dax_inode_exit(void)
252 kern_unmount(dax_mnt);
253 unregister_filesystem(&dax_type);
254 kmem_cache_destroy(dax_cache);
257 static void dax_region_free(struct kref *kref)
259 struct dax_region *dax_region;
261 dax_region = container_of(kref, struct dax_region, kref);
265 void dax_region_put(struct dax_region *dax_region)
267 kref_put(&dax_region->kref, dax_region_free);
269 EXPORT_SYMBOL_GPL(dax_region_put);
271 static void dax_region_unregister(void *region)
273 struct dax_region *dax_region = region;
275 sysfs_remove_groups(&dax_region->dev->kobj,
276 dax_region_attribute_groups);
277 dax_region_put(dax_region);
280 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
281 struct resource *res, unsigned int align, void *addr,
282 unsigned long pfn_flags)
284 struct dax_region *dax_region;
287 * The DAX core assumes that it can store its private data in
288 * parent->driver_data. This WARN is a reminder / safeguard for
289 * developers of device-dax drivers.
291 if (dev_get_drvdata(parent)) {
292 dev_WARN(parent, "dax core failed to setup private data\n");
296 if (!IS_ALIGNED(res->start, align)
297 || !IS_ALIGNED(resource_size(res), align))
300 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
304 dev_set_drvdata(parent, dax_region);
305 memcpy(&dax_region->res, res, sizeof(*res));
306 dax_region->pfn_flags = pfn_flags;
307 kref_init(&dax_region->kref);
308 dax_region->id = region_id;
309 ida_init(&dax_region->ida);
310 dax_region->align = align;
311 dax_region->dev = parent;
312 dax_region->base = addr;
313 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
318 kref_get(&dax_region->kref);
319 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
323 EXPORT_SYMBOL_GPL(alloc_dax_region);
325 static struct dax_dev *to_dax_dev(struct device *dev)
327 return container_of(dev, struct dax_dev, dev);
330 static ssize_t size_show(struct device *dev,
331 struct device_attribute *attr, char *buf)
333 struct dax_dev *dax_dev = to_dax_dev(dev);
334 unsigned long long size = 0;
337 for (i = 0; i < dax_dev->num_resources; i++)
338 size += resource_size(&dax_dev->res[i]);
340 return sprintf(buf, "%llu\n", size);
342 static DEVICE_ATTR_RO(size);
344 static struct attribute *dax_device_attributes[] = {
349 static const struct attribute_group dax_device_attribute_group = {
350 .attrs = dax_device_attributes,
353 static const struct attribute_group *dax_attribute_groups[] = {
354 &dax_device_attribute_group,
358 static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
361 struct dax_region *dax_region = dax_dev->region;
362 struct device *dev = &dax_dev->dev;
368 /* prevent private mappings from being established */
369 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
370 dev_info(dev, "%s: %s: fail, attempted private mapping\n",
371 current->comm, func);
375 mask = dax_region->align - 1;
376 if (vma->vm_start & mask || vma->vm_end & mask) {
377 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
378 current->comm, func, vma->vm_start, vma->vm_end,
383 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
384 && (vma->vm_flags & VM_DONTCOPY) == 0) {
385 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
386 current->comm, func);
390 if (!vma_is_dax(vma)) {
391 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
392 current->comm, func);
399 static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
402 struct resource *res;
406 for (i = 0; i < dax_dev->num_resources; i++) {
407 res = &dax_dev->res[i];
408 phys = pgoff * PAGE_SIZE + res->start;
409 if (phys >= res->start && phys <= res->end)
411 pgoff -= PHYS_PFN(resource_size(res));
414 if (i < dax_dev->num_resources) {
415 res = &dax_dev->res[i];
416 if (phys + size - 1 <= res->end)
423 static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
425 struct device *dev = &dax_dev->dev;
426 struct dax_region *dax_region;
427 int rc = VM_FAULT_SIGBUS;
431 if (check_vma(dax_dev, vmf->vma, __func__))
432 return VM_FAULT_SIGBUS;
434 dax_region = dax_dev->region;
435 if (dax_region->align > PAGE_SIZE) {
436 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
437 return VM_FAULT_SIGBUS;
440 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
442 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
444 return VM_FAULT_SIGBUS;
447 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
449 rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
453 if (rc < 0 && rc != -EBUSY)
454 return VM_FAULT_SIGBUS;
456 return VM_FAULT_NOPAGE;
459 static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
461 unsigned long pmd_addr = vmf->address & PMD_MASK;
462 struct device *dev = &dax_dev->dev;
463 struct dax_region *dax_region;
468 if (check_vma(dax_dev, vmf->vma, __func__))
469 return VM_FAULT_SIGBUS;
471 dax_region = dax_dev->region;
472 if (dax_region->align > PMD_SIZE) {
473 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
474 return VM_FAULT_SIGBUS;
477 /* dax pmd mappings require pfn_t_devmap() */
478 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
479 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
480 return VM_FAULT_SIGBUS;
483 pgoff = linear_page_index(vmf->vma, pmd_addr);
484 phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
486 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
488 return VM_FAULT_SIGBUS;
491 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
493 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
494 vmf->flags & FAULT_FLAG_WRITE);
497 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
498 static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
500 unsigned long pud_addr = vmf->address & PUD_MASK;
501 struct device *dev = &dax_dev->dev;
502 struct dax_region *dax_region;
507 if (check_vma(dax_dev, vmf->vma, __func__))
508 return VM_FAULT_SIGBUS;
510 dax_region = dax_dev->region;
511 if (dax_region->align > PUD_SIZE) {
512 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
513 return VM_FAULT_SIGBUS;
516 /* dax pud mappings require pfn_t_devmap() */
517 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
518 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
519 return VM_FAULT_SIGBUS;
522 pgoff = linear_page_index(vmf->vma, pud_addr);
523 phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
525 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
527 return VM_FAULT_SIGBUS;
530 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
532 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
533 vmf->flags & FAULT_FLAG_WRITE);
536 static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
538 return VM_FAULT_FALLBACK;
540 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
542 static int dax_dev_huge_fault(struct vm_fault *vmf,
543 enum page_entry_size pe_size)
546 struct file *filp = vmf->vma->vm_file;
547 struct dax_dev *dax_dev = filp->private_data;
549 dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
550 current->comm, (vmf->flags & FAULT_FLAG_WRITE)
552 vmf->vma->vm_start, vmf->vma->vm_end);
557 rc = __dax_dev_pte_fault(dax_dev, vmf);
560 rc = __dax_dev_pmd_fault(dax_dev, vmf);
563 rc = __dax_dev_pud_fault(dax_dev, vmf);
566 return VM_FAULT_FALLBACK;
573 static int dax_dev_fault(struct vm_fault *vmf)
575 return dax_dev_huge_fault(vmf, PE_SIZE_PTE);
578 static const struct vm_operations_struct dax_dev_vm_ops = {
579 .fault = dax_dev_fault,
580 .huge_fault = dax_dev_huge_fault,
583 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
585 struct dax_dev *dax_dev = filp->private_data;
588 dev_dbg(&dax_dev->dev, "%s\n", __func__);
590 rc = check_vma(dax_dev, vma, __func__);
594 vma->vm_ops = &dax_dev_vm_ops;
595 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
599 /* return an unmapped area aligned to the dax region specified alignment */
600 static unsigned long dax_get_unmapped_area(struct file *filp,
601 unsigned long addr, unsigned long len, unsigned long pgoff,
604 unsigned long off, off_end, off_align, len_align, addr_align, align;
605 struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
606 struct dax_region *dax_region;
608 if (!dax_dev || addr)
611 dax_region = dax_dev->region;
612 align = dax_region->align;
613 off = pgoff << PAGE_SHIFT;
615 off_align = round_up(off, align);
617 if ((off_end <= off_align) || ((off_end - off_align) < align))
620 len_align = len + align;
621 if ((off + len_align) < off)
624 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
626 if (!IS_ERR_VALUE(addr_align)) {
627 addr_align += (off - addr_align) & (align - 1);
631 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
634 static int dax_open(struct inode *inode, struct file *filp)
636 struct dax_dev *dax_dev;
638 dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
639 dev_dbg(&dax_dev->dev, "%s\n", __func__);
640 inode->i_mapping = dax_dev->inode->i_mapping;
641 inode->i_mapping->host = dax_dev->inode;
642 filp->f_mapping = inode->i_mapping;
643 filp->private_data = dax_dev;
644 inode->i_flags = S_DAX;
649 static int dax_release(struct inode *inode, struct file *filp)
651 struct dax_dev *dax_dev = filp->private_data;
653 dev_dbg(&dax_dev->dev, "%s\n", __func__);
657 static const struct file_operations dax_fops = {
658 .llseek = noop_llseek,
659 .owner = THIS_MODULE,
661 .release = dax_release,
662 .get_unmapped_area = dax_get_unmapped_area,
666 static void dax_dev_release(struct device *dev)
668 struct dax_dev *dax_dev = to_dax_dev(dev);
669 struct dax_region *dax_region = dax_dev->region;
671 ida_simple_remove(&dax_region->ida, dax_dev->id);
672 ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
673 dax_region_put(dax_region);
674 iput(dax_dev->inode);
678 static void unregister_dax_dev(void *dev)
680 struct dax_dev *dax_dev = to_dax_dev(dev);
681 struct cdev *cdev = &dax_dev->cdev;
683 dev_dbg(dev, "%s\n", __func__);
686 * Note, rcu is not protecting the liveness of dax_dev, rcu is
687 * ensuring that any fault handlers that might have seen
688 * dax_dev->alive == true, have completed. Any fault handlers
689 * that start after synchronize_rcu() has started will abort
690 * upon seeing dax_dev->alive == false.
692 dax_dev->alive = false;
694 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
696 device_unregister(dev);
699 struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
700 struct resource *res, int count)
702 struct device *parent = dax_region->dev;
703 struct dax_dev *dax_dev;
704 int rc = 0, minor, i;
709 dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
711 return ERR_PTR(-ENOMEM);
713 for (i = 0; i < count; i++) {
714 if (!IS_ALIGNED(res[i].start, dax_region->align)
715 || !IS_ALIGNED(resource_size(&res[i]),
716 dax_region->align)) {
720 dax_dev->res[i].start = res[i].start;
721 dax_dev->res[i].end = res[i].end;
727 dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
728 if (dax_dev->id < 0) {
733 minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
739 dev_t = MKDEV(MAJOR(dax_devt), minor);
741 dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
742 if (!dax_dev->inode) {
747 /* device_initialize() so cdev can reference kobj parent */
748 device_initialize(dev);
750 cdev = &dax_dev->cdev;
751 cdev_init(cdev, &dax_fops);
752 cdev->owner = parent->driver->owner;
753 cdev->kobj.parent = &dev->kobj;
754 rc = cdev_add(&dax_dev->cdev, dev_t, 1);
758 /* from here on we're committed to teardown via dax_dev_release() */
759 dax_dev->num_resources = count;
760 dax_dev->alive = true;
761 dax_dev->region = dax_region;
762 kref_get(&dax_region->kref);
765 dev->class = dax_class;
766 dev->parent = parent;
767 dev->groups = dax_attribute_groups;
768 dev->release = dax_dev_release;
769 dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
770 rc = device_add(dev);
776 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
783 iput(dax_dev->inode);
785 ida_simple_remove(&dax_minor_ida, minor);
787 ida_simple_remove(&dax_region->ida, dax_dev->id);
793 EXPORT_SYMBOL_GPL(devm_create_dax_dev);
795 static int __init dax_init(void)
799 rc = dax_inode_init();
803 nr_dax = max(nr_dax, 256);
804 rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
808 dax_class = class_create(THIS_MODULE, "dax");
809 if (IS_ERR(dax_class)) {
810 rc = PTR_ERR(dax_class);
817 unregister_chrdev_region(dax_devt, nr_dax);
823 static void __exit dax_exit(void)
825 class_destroy(dax_class);
826 unregister_chrdev_region(dax_devt, nr_dax);
827 ida_destroy(&dax_minor_ida);
831 MODULE_AUTHOR("Intel Corporation");
832 MODULE_LICENSE("GPL v2");
833 subsys_initcall(dax_init);
834 module_exit(dax_exit);