2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pfn_t.h>
29 #include <linux/slab.h>
30 #include <linux/pmem.h>
36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk;
38 struct nd_namespace_common *ndns;
40 /* One contiguous memory region per device */
41 phys_addr_t phys_addr;
42 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset;
45 void __pmem *virt_addr;
46 /* immutable base size of the namespace */
48 /* trim size when namespace capacity has been section aligned */
53 static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
59 return !!badblocks_check(bb, sector, len / 512, &first_bad,
66 static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
69 struct device *dev = disk_to_dev(pmem->pmem_disk);
73 sector = (offset - pmem->data_offset) / 512;
74 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
76 if (cleared > 0 && cleared / 512) {
77 dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
78 __func__, (unsigned long long) sector,
79 cleared / 512, cleared / 512 > 1 ? "s" : "");
80 badblocks_clear(&pmem->bb, sector, cleared / 512);
82 invalidate_pmem(pmem->virt_addr + offset, len);
85 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
86 unsigned int len, unsigned int off, int rw,
90 bool bad_pmem = false;
91 void *mem = kmap_atomic(page);
92 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
93 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
95 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
99 if (unlikely(bad_pmem))
102 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
103 flush_dcache_page(page);
106 flush_dcache_page(page);
107 memcpy_to_pmem(pmem_addr, mem + off, len);
108 if (unlikely(bad_pmem)) {
109 pmem_clear_poison(pmem, pmem_off, len);
110 memcpy_to_pmem(pmem_addr, mem + off, len);
118 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
124 struct bvec_iter iter;
125 struct block_device *bdev = bio->bi_bdev;
126 struct pmem_device *pmem = bdev->bd_disk->private_data;
128 do_acct = nd_iostat_start(bio, &start);
129 bio_for_each_segment(bvec, bio, iter) {
130 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
131 bvec.bv_offset, bio_data_dir(bio),
139 nd_iostat_end(bio, start);
141 if (bio_data_dir(bio))
145 return BLK_QC_T_NONE;
148 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
149 struct page *page, int rw)
151 struct pmem_device *pmem = bdev->bd_disk->private_data;
154 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
159 * The ->rw_page interface is subtle and tricky. The core
160 * retries on any error, so we can only invoke page_endio() in
161 * the successful completion case. Otherwise, we'll see crashes
162 * caused by double completion.
165 page_endio(page, rw & WRITE, 0);
170 static long pmem_direct_access(struct block_device *bdev, sector_t sector,
171 void __pmem **kaddr, pfn_t *pfn)
173 struct pmem_device *pmem = bdev->bd_disk->private_data;
174 resource_size_t offset = sector * 512 + pmem->data_offset;
176 *kaddr = pmem->virt_addr + offset;
177 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
179 return pmem->size - pmem->pfn_pad - offset;
182 static const struct block_device_operations pmem_fops = {
183 .owner = THIS_MODULE,
184 .rw_page = pmem_rw_page,
185 .direct_access = pmem_direct_access,
186 .revalidate_disk = nvdimm_revalidate_disk,
189 static struct pmem_device *pmem_alloc(struct device *dev,
190 struct resource *res, int id)
192 struct pmem_device *pmem;
193 struct request_queue *q;
195 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
197 return ERR_PTR(-ENOMEM);
199 pmem->phys_addr = res->start;
200 pmem->size = resource_size(res);
201 if (!arch_has_wmb_pmem())
202 dev_warn(dev, "unable to guarantee persistence of writes\n");
204 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
206 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
207 &pmem->phys_addr, pmem->size);
208 return ERR_PTR(-EBUSY);
211 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
213 return ERR_PTR(-ENOMEM);
215 pmem->pfn_flags = PFN_DEV;
216 if (pmem_should_map_pages(dev)) {
217 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
218 &q->q_usage_counter, NULL);
219 pmem->pfn_flags |= PFN_MAP;
221 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
222 pmem->phys_addr, pmem->size,
225 if (IS_ERR(pmem->virt_addr)) {
226 blk_cleanup_queue(q);
227 return (void __force *) pmem->virt_addr;
230 pmem->pmem_queue = q;
234 static void pmem_detach_disk(struct pmem_device *pmem)
236 if (!pmem->pmem_disk)
239 del_gendisk(pmem->pmem_disk);
240 put_disk(pmem->pmem_disk);
241 blk_cleanup_queue(pmem->pmem_queue);
244 static int pmem_attach_disk(struct device *dev,
245 struct nd_namespace_common *ndns, struct pmem_device *pmem)
247 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
248 int nid = dev_to_node(dev);
249 struct resource bb_res;
250 struct gendisk *disk;
252 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
253 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
254 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
255 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
256 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
258 disk = alloc_disk_node(0, nid);
260 blk_cleanup_queue(pmem->pmem_queue);
264 disk->fops = &pmem_fops;
265 disk->private_data = pmem;
266 disk->queue = pmem->pmem_queue;
267 disk->flags = GENHD_FL_EXT_DEVT;
268 nvdimm_namespace_disk_name(ndns, disk->disk_name);
269 disk->driverfs_dev = dev;
270 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
272 pmem->pmem_disk = disk;
273 devm_exit_badblocks(dev, &pmem->bb);
274 if (devm_init_badblocks(dev, &pmem->bb))
276 bb_res.start = nsio->res.start + pmem->data_offset;
277 bb_res.end = nsio->res.end;
278 if (is_nd_pfn(dev)) {
279 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
280 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
282 bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
283 bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
285 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
287 disk->bb = &pmem->bb;
289 revalidate_disk(disk);
294 static int pmem_rw_bytes(struct nd_namespace_common *ndns,
295 resource_size_t offset, void *buf, size_t size, int rw)
297 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
299 if (unlikely(offset + size > pmem->size)) {
300 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
305 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
307 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
309 return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
311 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
318 static int nd_pfn_init(struct nd_pfn *nd_pfn)
320 struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
321 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
322 struct nd_namespace_common *ndns = nd_pfn->ndns;
323 u32 start_pad = 0, end_trunc = 0;
324 resource_size_t start, size;
325 struct nd_namespace_io *nsio;
326 struct nd_region *nd_region;
335 nd_pfn->pfn_sb = pfn_sb;
336 rc = nd_pfn_validate(nd_pfn);
338 /* no info block, do init */;
342 nd_region = to_nd_region(nd_pfn->dev.parent);
344 dev_info(&nd_pfn->dev,
345 "%s is read-only, unable to init metadata\n",
346 dev_name(&nd_region->dev));
350 memset(pfn_sb, 0, sizeof(*pfn_sb));
353 * Check if pmem collides with 'System RAM' when section aligned and
354 * trim it accordingly
356 nsio = to_nd_namespace_io(&ndns->dev);
357 start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
358 size = resource_size(&nsio->res);
359 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
360 IORES_DESC_NONE) == REGION_MIXED) {
362 start = nsio->res.start;
363 start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
366 start = nsio->res.start;
367 size = PHYS_SECTION_ALIGN_UP(start + size) - start;
368 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
369 IORES_DESC_NONE) == REGION_MIXED) {
370 size = resource_size(&nsio->res);
371 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
374 if (start_pad + end_trunc)
375 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
376 dev_name(&ndns->dev), start_pad + end_trunc);
379 * Note, we use 64 here for the standard size of struct page,
380 * debugging options may cause it to be larger in which case the
381 * implementation will limit the pfns advertised through
382 * ->direct_access() to those that are included in the memmap.
385 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
386 if (nd_pfn->mode == PFN_MODE_PMEM)
387 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
389 else if (nd_pfn->mode == PFN_MODE_RAM)
390 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
394 if (offset + start_pad + end_trunc >= pmem->size) {
395 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
396 dev_name(&ndns->dev));
400 npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
401 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
402 pfn_sb->dataoff = cpu_to_le64(offset);
403 pfn_sb->npfns = cpu_to_le64(npfns);
404 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
405 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
406 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
407 pfn_sb->version_major = cpu_to_le16(1);
408 pfn_sb->version_minor = cpu_to_le16(1);
409 pfn_sb->start_pad = cpu_to_le32(start_pad);
410 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
411 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
412 pfn_sb->checksum = cpu_to_le64(checksum);
414 rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
420 nd_pfn->pfn_sb = NULL;
425 static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
427 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
428 struct pmem_device *pmem;
431 pmem = dev_get_drvdata(&nd_pfn->dev);
432 pmem_detach_disk(pmem);
434 /* release nd_pfn resources */
435 kfree(nd_pfn->pfn_sb);
436 nd_pfn->pfn_sb = NULL;
442 * We hotplug memory at section granularity, pad the reserved area from
443 * the previous section base to the namespace base address.
445 static unsigned long init_altmap_base(resource_size_t base)
447 unsigned long base_pfn = PHYS_PFN(base);
449 return PFN_SECTION_ALIGN_DOWN(base_pfn);
452 static unsigned long init_altmap_reserve(resource_size_t base)
454 unsigned long reserve = PHYS_PFN(SZ_8K);
455 unsigned long base_pfn = PHYS_PFN(base);
457 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
461 static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
465 struct request_queue *q;
466 struct pmem_device *pmem;
467 struct vmem_altmap *altmap;
468 struct device *dev = &nd_pfn->dev;
469 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
470 struct nd_namespace_common *ndns = nd_pfn->ndns;
471 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
472 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
473 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
474 resource_size_t base = nsio->res.start + start_pad;
475 struct vmem_altmap __altmap = {
476 .base_pfn = init_altmap_base(base),
477 .reserve = init_altmap_reserve(base),
480 pmem = dev_get_drvdata(dev);
481 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
482 pmem->pfn_pad = start_pad + end_trunc;
483 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
484 if (nd_pfn->mode == PFN_MODE_RAM) {
485 if (pmem->data_offset < SZ_8K)
487 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
489 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
490 nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
492 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
493 dev_info(&nd_pfn->dev,
494 "number of pfns truncated from %lld to %ld\n",
495 le64_to_cpu(nd_pfn->pfn_sb->npfns),
498 altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
505 /* establish pfn range for lookup, and switch to direct map */
506 q = pmem->pmem_queue;
507 memcpy(&res, &nsio->res, sizeof(res));
508 res.start += start_pad;
509 res.end -= end_trunc;
510 devm_memunmap(dev, (void __force *) pmem->virt_addr);
511 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
512 &q->q_usage_counter, altmap);
513 pmem->pfn_flags |= PFN_MAP;
514 if (IS_ERR(pmem->virt_addr)) {
515 rc = PTR_ERR(pmem->virt_addr);
519 /* attach pmem disk in "pfn-mode" */
520 rc = pmem_attach_disk(dev, ndns, pmem);
526 nvdimm_namespace_detach_pfn(ndns);
531 static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
533 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
536 if (!nd_pfn->uuid || !nd_pfn->ndns)
539 rc = nd_pfn_init(nd_pfn);
542 /* we need a valid pfn_sb before we can init a vmem_altmap */
543 return __nvdimm_namespace_attach_pfn(nd_pfn);
546 static int nd_pmem_probe(struct device *dev)
548 struct nd_region *nd_region = to_nd_region(dev->parent);
549 struct nd_namespace_common *ndns;
550 struct nd_namespace_io *nsio;
551 struct pmem_device *pmem;
553 ndns = nvdimm_namespace_common_probe(dev);
555 return PTR_ERR(ndns);
557 nsio = to_nd_namespace_io(&ndns->dev);
558 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
560 return PTR_ERR(pmem);
563 dev_set_drvdata(dev, pmem);
564 ndns->rw_bytes = pmem_rw_bytes;
565 if (devm_init_badblocks(dev, &pmem->bb))
567 nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
569 if (is_nd_btt(dev)) {
570 /* btt allocates its own request_queue */
571 blk_cleanup_queue(pmem->pmem_queue);
572 pmem->pmem_queue = NULL;
573 return nvdimm_namespace_attach_btt(ndns);
577 return nvdimm_namespace_attach_pfn(ndns);
579 if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
581 * We'll come back as either btt-pmem, or pfn-pmem, so
582 * drop the queue allocation for now.
584 blk_cleanup_queue(pmem->pmem_queue);
588 return pmem_attach_disk(dev, ndns, pmem);
591 static int nd_pmem_remove(struct device *dev)
593 struct pmem_device *pmem = dev_get_drvdata(dev);
596 nvdimm_namespace_detach_btt(pmem->ndns);
597 else if (is_nd_pfn(dev))
598 nvdimm_namespace_detach_pfn(pmem->ndns);
600 pmem_detach_disk(pmem);
605 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
607 struct pmem_device *pmem = dev_get_drvdata(dev);
608 struct nd_namespace_common *ndns = pmem->ndns;
609 struct nd_region *nd_region = to_nd_region(dev->parent);
610 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
611 struct resource res = {
612 .start = nsio->res.start + pmem->data_offset,
613 .end = nsio->res.end,
616 if (event != NVDIMM_REVALIDATE_POISON)
619 if (is_nd_pfn(dev)) {
620 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
621 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
623 res.start += __le32_to_cpu(pfn_sb->start_pad);
624 res.end -= __le32_to_cpu(pfn_sb->end_trunc);
627 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
630 MODULE_ALIAS("pmem");
631 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
632 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
633 static struct nd_device_driver nd_pmem_driver = {
634 .probe = nd_pmem_probe,
635 .remove = nd_pmem_remove,
636 .notify = nd_pmem_notify,
640 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
643 static int __init pmem_init(void)
645 return nd_driver_register(&nd_pmem_driver);
647 module_init(pmem_init);
649 static void pmem_exit(void)
651 driver_unregister(&nd_pmem_driver.drv);
653 module_exit(pmem_exit);
655 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
656 MODULE_LICENSE("GPL v2");