2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/sort.h>
25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
28 #include <linux/io-64-nonatomic-hi-lo.h>
30 static DEFINE_IDA(region_ida);
31 static DEFINE_PER_CPU(int, flush_idx);
33 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
34 struct nd_region_data *ndrd)
38 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
39 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
40 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
41 struct resource *res = &nvdimm->flush_wpq[i];
42 unsigned long pfn = PHYS_PFN(res->start);
43 void __iomem *flush_page;
45 /* check if flush hints share a page */
46 for (j = 0; j < i; j++) {
47 struct resource *res_j = &nvdimm->flush_wpq[j];
48 unsigned long pfn_j = PHYS_PFN(res_j->start);
55 flush_page = (void __iomem *) ((unsigned long)
56 ndrd_get_flush_wpq(ndrd, dimm, j)
59 flush_page = devm_nvdimm_ioremap(dev,
60 PFN_PHYS(pfn), PAGE_SIZE);
63 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
64 + (res->start & ~PAGE_MASK));
70 int nd_region_activate(struct nd_region *nd_region)
72 int i, j, num_flush = 0;
73 struct nd_region_data *ndrd;
74 struct device *dev = &nd_region->dev;
75 size_t flush_data_size = sizeof(void *);
77 nvdimm_bus_lock(&nd_region->dev);
78 for (i = 0; i < nd_region->ndr_mappings; i++) {
79 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
80 struct nvdimm *nvdimm = nd_mapping->nvdimm;
82 /* at least one null hint slot per-dimm for the "no-hint" case */
83 flush_data_size += sizeof(void *);
84 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
85 if (!nvdimm->num_flush)
87 flush_data_size += nvdimm->num_flush * sizeof(void *);
89 nvdimm_bus_unlock(&nd_region->dev);
91 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
94 dev_set_drvdata(dev, ndrd);
99 ndrd->hints_shift = ilog2(num_flush);
100 for (i = 0; i < nd_region->ndr_mappings; i++) {
101 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
102 struct nvdimm *nvdimm = nd_mapping->nvdimm;
103 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
110 * Clear out entries that are duplicates. This should prevent the
113 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
114 /* ignore if NULL already */
115 if (!ndrd_get_flush_wpq(ndrd, i, 0))
118 for (j = i + 1; j < nd_region->ndr_mappings; j++)
119 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
120 ndrd_get_flush_wpq(ndrd, j, 0))
121 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
127 static void nd_region_release(struct device *dev)
129 struct nd_region *nd_region = to_nd_region(dev);
132 for (i = 0; i < nd_region->ndr_mappings; i++) {
133 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
134 struct nvdimm *nvdimm = nd_mapping->nvdimm;
136 put_device(&nvdimm->dev);
138 free_percpu(nd_region->lane);
139 ida_simple_remove(®ion_ida, nd_region->id);
141 kfree(to_nd_blk_region(dev));
146 static struct device_type nd_blk_device_type = {
148 .release = nd_region_release,
151 static struct device_type nd_pmem_device_type = {
153 .release = nd_region_release,
156 static struct device_type nd_volatile_device_type = {
157 .name = "nd_volatile",
158 .release = nd_region_release,
161 bool is_nd_pmem(struct device *dev)
163 return dev ? dev->type == &nd_pmem_device_type : false;
166 bool is_nd_blk(struct device *dev)
168 return dev ? dev->type == &nd_blk_device_type : false;
171 struct nd_region *to_nd_region(struct device *dev)
173 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
175 WARN_ON(dev->type->release != nd_region_release);
178 EXPORT_SYMBOL_GPL(to_nd_region);
180 struct nd_blk_region *to_nd_blk_region(struct device *dev)
182 struct nd_region *nd_region = to_nd_region(dev);
184 WARN_ON(!is_nd_blk(dev));
185 return container_of(nd_region, struct nd_blk_region, nd_region);
187 EXPORT_SYMBOL_GPL(to_nd_blk_region);
189 void *nd_region_provider_data(struct nd_region *nd_region)
191 return nd_region->provider_data;
193 EXPORT_SYMBOL_GPL(nd_region_provider_data);
195 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
197 return ndbr->blk_provider_data;
199 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
201 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
203 ndbr->blk_provider_data = data;
205 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
208 * nd_region_to_nstype() - region to an integer namespace type
209 * @nd_region: region-device to interrogate
211 * This is the 'nstype' attribute of a region as well, an input to the
212 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
213 * namespace devices with namespace drivers.
215 int nd_region_to_nstype(struct nd_region *nd_region)
217 if (is_nd_pmem(&nd_region->dev)) {
220 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
221 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
222 struct nvdimm *nvdimm = nd_mapping->nvdimm;
224 if (test_bit(NDD_ALIASING, &nvdimm->flags))
228 return ND_DEVICE_NAMESPACE_PMEM;
230 return ND_DEVICE_NAMESPACE_IO;
231 } else if (is_nd_blk(&nd_region->dev)) {
232 return ND_DEVICE_NAMESPACE_BLK;
237 EXPORT_SYMBOL(nd_region_to_nstype);
239 static ssize_t size_show(struct device *dev,
240 struct device_attribute *attr, char *buf)
242 struct nd_region *nd_region = to_nd_region(dev);
243 unsigned long long size = 0;
245 if (is_nd_pmem(dev)) {
246 size = nd_region->ndr_size;
247 } else if (nd_region->ndr_mappings == 1) {
248 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
250 size = nd_mapping->size;
253 return sprintf(buf, "%llu\n", size);
255 static DEVICE_ATTR_RO(size);
257 static ssize_t deep_flush_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
260 struct nd_region *nd_region = to_nd_region(dev);
263 * NOTE: in the nvdimm_has_flush() error case this attribute is
266 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
269 static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t len)
273 int rc = strtobool(buf, &flush);
274 struct nd_region *nd_region = to_nd_region(dev);
280 nvdimm_flush(nd_region);
284 static DEVICE_ATTR_RW(deep_flush);
286 static ssize_t mappings_show(struct device *dev,
287 struct device_attribute *attr, char *buf)
289 struct nd_region *nd_region = to_nd_region(dev);
291 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
293 static DEVICE_ATTR_RO(mappings);
295 static ssize_t nstype_show(struct device *dev,
296 struct device_attribute *attr, char *buf)
298 struct nd_region *nd_region = to_nd_region(dev);
300 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
302 static DEVICE_ATTR_RO(nstype);
304 static ssize_t set_cookie_show(struct device *dev,
305 struct device_attribute *attr, char *buf)
307 struct nd_region *nd_region = to_nd_region(dev);
308 struct nd_interleave_set *nd_set = nd_region->nd_set;
310 if (is_nd_pmem(dev) && nd_set)
311 /* pass, should be precluded by region_visible */;
315 return sprintf(buf, "%#llx\n", nd_set->cookie);
317 static DEVICE_ATTR_RO(set_cookie);
319 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
321 resource_size_t blk_max_overlap = 0, available, overlap;
324 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
328 overlap = blk_max_overlap;
329 for (i = 0; i < nd_region->ndr_mappings; i++) {
330 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
331 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
333 /* if a dimm is disabled the available capacity is zero */
337 if (is_nd_pmem(&nd_region->dev)) {
338 available += nd_pmem_available_dpa(nd_region,
339 nd_mapping, &overlap);
340 if (overlap > blk_max_overlap) {
341 blk_max_overlap = overlap;
344 } else if (is_nd_blk(&nd_region->dev))
345 available += nd_blk_available_dpa(nd_region);
351 static ssize_t available_size_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
354 struct nd_region *nd_region = to_nd_region(dev);
355 unsigned long long available = 0;
358 * Flush in-flight updates and grab a snapshot of the available
359 * size. Of course, this value is potentially invalidated the
360 * memory nvdimm_bus_lock() is dropped, but that's userspace's
361 * problem to not race itself.
363 nvdimm_bus_lock(dev);
364 wait_nvdimm_bus_probe_idle(dev);
365 available = nd_region_available_dpa(nd_region);
366 nvdimm_bus_unlock(dev);
368 return sprintf(buf, "%llu\n", available);
370 static DEVICE_ATTR_RO(available_size);
372 static ssize_t init_namespaces_show(struct device *dev,
373 struct device_attribute *attr, char *buf)
375 struct nd_region_data *ndrd = dev_get_drvdata(dev);
378 nvdimm_bus_lock(dev);
380 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
383 nvdimm_bus_unlock(dev);
387 static DEVICE_ATTR_RO(init_namespaces);
389 static ssize_t namespace_seed_show(struct device *dev,
390 struct device_attribute *attr, char *buf)
392 struct nd_region *nd_region = to_nd_region(dev);
395 nvdimm_bus_lock(dev);
396 if (nd_region->ns_seed)
397 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
399 rc = sprintf(buf, "\n");
400 nvdimm_bus_unlock(dev);
403 static DEVICE_ATTR_RO(namespace_seed);
405 static ssize_t btt_seed_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
408 struct nd_region *nd_region = to_nd_region(dev);
411 nvdimm_bus_lock(dev);
412 if (nd_region->btt_seed)
413 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
415 rc = sprintf(buf, "\n");
416 nvdimm_bus_unlock(dev);
420 static DEVICE_ATTR_RO(btt_seed);
422 static ssize_t pfn_seed_show(struct device *dev,
423 struct device_attribute *attr, char *buf)
425 struct nd_region *nd_region = to_nd_region(dev);
428 nvdimm_bus_lock(dev);
429 if (nd_region->pfn_seed)
430 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
432 rc = sprintf(buf, "\n");
433 nvdimm_bus_unlock(dev);
437 static DEVICE_ATTR_RO(pfn_seed);
439 static ssize_t dax_seed_show(struct device *dev,
440 struct device_attribute *attr, char *buf)
442 struct nd_region *nd_region = to_nd_region(dev);
445 nvdimm_bus_lock(dev);
446 if (nd_region->dax_seed)
447 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
449 rc = sprintf(buf, "\n");
450 nvdimm_bus_unlock(dev);
454 static DEVICE_ATTR_RO(dax_seed);
456 static ssize_t read_only_show(struct device *dev,
457 struct device_attribute *attr, char *buf)
459 struct nd_region *nd_region = to_nd_region(dev);
461 return sprintf(buf, "%d\n", nd_region->ro);
464 static ssize_t read_only_store(struct device *dev,
465 struct device_attribute *attr, const char *buf, size_t len)
468 int rc = strtobool(buf, &ro);
469 struct nd_region *nd_region = to_nd_region(dev);
477 static DEVICE_ATTR_RW(read_only);
479 static ssize_t region_badblocks_show(struct device *dev,
480 struct device_attribute *attr, char *buf)
482 struct nd_region *nd_region = to_nd_region(dev);
484 return badblocks_show(&nd_region->bb, buf, 0);
487 static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
489 static ssize_t resource_show(struct device *dev,
490 struct device_attribute *attr, char *buf)
492 struct nd_region *nd_region = to_nd_region(dev);
494 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
496 static DEVICE_ATTR_RO(resource);
498 static struct attribute *nd_region_attributes[] = {
500 &dev_attr_nstype.attr,
501 &dev_attr_mappings.attr,
502 &dev_attr_btt_seed.attr,
503 &dev_attr_pfn_seed.attr,
504 &dev_attr_dax_seed.attr,
505 &dev_attr_deep_flush.attr,
506 &dev_attr_read_only.attr,
507 &dev_attr_set_cookie.attr,
508 &dev_attr_available_size.attr,
509 &dev_attr_namespace_seed.attr,
510 &dev_attr_init_namespaces.attr,
511 &dev_attr_badblocks.attr,
512 &dev_attr_resource.attr,
516 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
518 struct device *dev = container_of(kobj, typeof(*dev), kobj);
519 struct nd_region *nd_region = to_nd_region(dev);
520 struct nd_interleave_set *nd_set = nd_region->nd_set;
521 int type = nd_region_to_nstype(nd_region);
523 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
526 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
529 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
532 if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
535 if (a == &dev_attr_deep_flush.attr) {
536 int has_flush = nvdimm_has_flush(nd_region);
540 else if (has_flush == 0)
546 if (a != &dev_attr_set_cookie.attr
547 && a != &dev_attr_available_size.attr)
550 if ((type == ND_DEVICE_NAMESPACE_PMEM
551 || type == ND_DEVICE_NAMESPACE_BLK)
552 && a == &dev_attr_available_size.attr)
554 else if (is_nd_pmem(dev) && nd_set)
560 struct attribute_group nd_region_attribute_group = {
561 .attrs = nd_region_attributes,
562 .is_visible = region_visible,
564 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
566 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
568 struct nd_interleave_set *nd_set = nd_region->nd_set;
571 return nd_set->cookie;
575 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
577 struct nd_interleave_set *nd_set = nd_region->nd_set;
580 return nd_set->altcookie;
584 void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
586 struct nd_label_ent *label_ent, *e;
588 lockdep_assert_held(&nd_mapping->lock);
589 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
590 list_del(&label_ent->list);
596 * Upon successful probe/remove, take/release a reference on the
597 * associated interleave set (if present), and plant new btt + namespace
598 * seeds. Also, on the removal of a BLK region, notify the provider to
599 * disable the region.
601 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
602 struct device *dev, bool probe)
604 struct nd_region *nd_region;
606 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
609 nd_region = to_nd_region(dev);
610 for (i = 0; i < nd_region->ndr_mappings; i++) {
611 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
612 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
613 struct nvdimm *nvdimm = nd_mapping->nvdimm;
615 mutex_lock(&nd_mapping->lock);
616 nd_mapping_free_labels(nd_mapping);
617 mutex_unlock(&nd_mapping->lock);
620 nd_mapping->ndd = NULL;
622 atomic_dec(&nvdimm->busy);
628 if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
630 nd_region = to_nd_region(dev->parent);
631 nvdimm_bus_lock(dev);
632 if (nd_region->ns_seed == dev)
633 nd_region_create_ns_seed(nd_region);
634 nvdimm_bus_unlock(dev);
636 if (is_nd_btt(dev) && probe) {
637 struct nd_btt *nd_btt = to_nd_btt(dev);
639 nd_region = to_nd_region(dev->parent);
640 nvdimm_bus_lock(dev);
641 if (nd_region->btt_seed == dev)
642 nd_region_create_btt_seed(nd_region);
643 if (nd_region->ns_seed == &nd_btt->ndns->dev)
644 nd_region_create_ns_seed(nd_region);
645 nvdimm_bus_unlock(dev);
647 if (is_nd_pfn(dev) && probe) {
648 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
650 nd_region = to_nd_region(dev->parent);
651 nvdimm_bus_lock(dev);
652 if (nd_region->pfn_seed == dev)
653 nd_region_create_pfn_seed(nd_region);
654 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
655 nd_region_create_ns_seed(nd_region);
656 nvdimm_bus_unlock(dev);
658 if (is_nd_dax(dev) && probe) {
659 struct nd_dax *nd_dax = to_nd_dax(dev);
661 nd_region = to_nd_region(dev->parent);
662 nvdimm_bus_lock(dev);
663 if (nd_region->dax_seed == dev)
664 nd_region_create_dax_seed(nd_region);
665 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
666 nd_region_create_ns_seed(nd_region);
667 nvdimm_bus_unlock(dev);
671 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
673 nd_region_notify_driver_action(nvdimm_bus, dev, true);
676 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
678 nd_region_notify_driver_action(nvdimm_bus, dev, false);
681 static ssize_t mappingN(struct device *dev, char *buf, int n)
683 struct nd_region *nd_region = to_nd_region(dev);
684 struct nd_mapping *nd_mapping;
685 struct nvdimm *nvdimm;
687 if (n >= nd_region->ndr_mappings)
689 nd_mapping = &nd_region->mapping[n];
690 nvdimm = nd_mapping->nvdimm;
692 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
693 nd_mapping->start, nd_mapping->size);
696 #define REGION_MAPPING(idx) \
697 static ssize_t mapping##idx##_show(struct device *dev, \
698 struct device_attribute *attr, char *buf) \
700 return mappingN(dev, buf, idx); \
702 static DEVICE_ATTR_RO(mapping##idx)
705 * 32 should be enough for a while, even in the presence of socket
706 * interleave a 32-way interleave set is a degenerate case.
741 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
743 struct device *dev = container_of(kobj, struct device, kobj);
744 struct nd_region *nd_region = to_nd_region(dev);
746 if (n < nd_region->ndr_mappings)
751 static struct attribute *mapping_attributes[] = {
752 &dev_attr_mapping0.attr,
753 &dev_attr_mapping1.attr,
754 &dev_attr_mapping2.attr,
755 &dev_attr_mapping3.attr,
756 &dev_attr_mapping4.attr,
757 &dev_attr_mapping5.attr,
758 &dev_attr_mapping6.attr,
759 &dev_attr_mapping7.attr,
760 &dev_attr_mapping8.attr,
761 &dev_attr_mapping9.attr,
762 &dev_attr_mapping10.attr,
763 &dev_attr_mapping11.attr,
764 &dev_attr_mapping12.attr,
765 &dev_attr_mapping13.attr,
766 &dev_attr_mapping14.attr,
767 &dev_attr_mapping15.attr,
768 &dev_attr_mapping16.attr,
769 &dev_attr_mapping17.attr,
770 &dev_attr_mapping18.attr,
771 &dev_attr_mapping19.attr,
772 &dev_attr_mapping20.attr,
773 &dev_attr_mapping21.attr,
774 &dev_attr_mapping22.attr,
775 &dev_attr_mapping23.attr,
776 &dev_attr_mapping24.attr,
777 &dev_attr_mapping25.attr,
778 &dev_attr_mapping26.attr,
779 &dev_attr_mapping27.attr,
780 &dev_attr_mapping28.attr,
781 &dev_attr_mapping29.attr,
782 &dev_attr_mapping30.attr,
783 &dev_attr_mapping31.attr,
787 struct attribute_group nd_mapping_attribute_group = {
788 .is_visible = mapping_visible,
789 .attrs = mapping_attributes,
791 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
793 int nd_blk_region_init(struct nd_region *nd_region)
795 struct device *dev = &nd_region->dev;
796 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
801 if (nd_region->ndr_mappings < 1) {
802 dev_err(dev, "invalid BLK region\n");
806 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
810 * nd_region_acquire_lane - allocate and lock a lane
811 * @nd_region: region id and number of lanes possible
813 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
814 * We optimize for the common case where there are 256 lanes, one
815 * per-cpu. For larger systems we need to lock to share lanes. For now
816 * this implementation assumes the cost of maintaining an allocator for
817 * free lanes is on the order of the lock hold time, so it implements a
818 * static lane = cpu % num_lanes mapping.
820 * In the case of a BTT instance on top of a BLK namespace a lane may be
821 * acquired recursively. We lock on the first instance.
823 * In the case of a BTT instance on top of PMEM, we only acquire a lane
824 * for the BTT metadata updates.
826 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
828 unsigned int cpu, lane;
831 if (nd_region->num_lanes < nr_cpu_ids) {
832 struct nd_percpu_lane *ndl_lock, *ndl_count;
834 lane = cpu % nd_region->num_lanes;
835 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
836 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
837 if (ndl_count->count++ == 0)
838 spin_lock(&ndl_lock->lock);
844 EXPORT_SYMBOL(nd_region_acquire_lane);
846 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
848 if (nd_region->num_lanes < nr_cpu_ids) {
849 unsigned int cpu = get_cpu();
850 struct nd_percpu_lane *ndl_lock, *ndl_count;
852 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
853 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
854 if (--ndl_count->count == 0)
855 spin_unlock(&ndl_lock->lock);
860 EXPORT_SYMBOL(nd_region_release_lane);
862 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
863 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
866 struct nd_region *nd_region;
872 for (i = 0; i < ndr_desc->num_mappings; i++) {
873 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
874 struct nvdimm *nvdimm = mapping->nvdimm;
876 if ((mapping->start | mapping->size) % SZ_4K) {
877 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
878 caller, dev_name(&nvdimm->dev), i);
883 if (test_bit(NDD_UNARMED, &nvdimm->flags))
887 if (dev_type == &nd_blk_device_type) {
888 struct nd_blk_region_desc *ndbr_desc;
889 struct nd_blk_region *ndbr;
891 ndbr_desc = to_blk_region_desc(ndr_desc);
892 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
893 * ndr_desc->num_mappings,
896 nd_region = &ndbr->nd_region;
897 ndbr->enable = ndbr_desc->enable;
898 ndbr->do_io = ndbr_desc->do_io;
902 nd_region = kzalloc(sizeof(struct nd_region)
903 + sizeof(struct nd_mapping)
904 * ndr_desc->num_mappings,
906 region_buf = nd_region;
911 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL);
912 if (nd_region->id < 0)
915 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
916 if (!nd_region->lane)
919 for (i = 0; i < nr_cpu_ids; i++) {
920 struct nd_percpu_lane *ndl;
922 ndl = per_cpu_ptr(nd_region->lane, i);
923 spin_lock_init(&ndl->lock);
927 for (i = 0; i < ndr_desc->num_mappings; i++) {
928 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
929 struct nvdimm *nvdimm = mapping->nvdimm;
931 nd_region->mapping[i].nvdimm = nvdimm;
932 nd_region->mapping[i].start = mapping->start;
933 nd_region->mapping[i].size = mapping->size;
934 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
935 mutex_init(&nd_region->mapping[i].lock);
937 get_device(&nvdimm->dev);
939 nd_region->ndr_mappings = ndr_desc->num_mappings;
940 nd_region->provider_data = ndr_desc->provider_data;
941 nd_region->nd_set = ndr_desc->nd_set;
942 nd_region->num_lanes = ndr_desc->num_lanes;
943 nd_region->flags = ndr_desc->flags;
945 nd_region->numa_node = ndr_desc->numa_node;
946 ida_init(&nd_region->ns_ida);
947 ida_init(&nd_region->btt_ida);
948 ida_init(&nd_region->pfn_ida);
949 ida_init(&nd_region->dax_ida);
950 dev = &nd_region->dev;
951 dev_set_name(dev, "region%d", nd_region->id);
952 dev->parent = &nvdimm_bus->dev;
953 dev->type = dev_type;
954 dev->groups = ndr_desc->attr_groups;
955 nd_region->ndr_size = resource_size(ndr_desc->res);
956 nd_region->ndr_start = ndr_desc->res->start;
957 nd_device_register(dev);
962 ida_simple_remove(®ion_ida, nd_region->id);
968 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
969 struct nd_region_desc *ndr_desc)
971 ndr_desc->num_lanes = ND_MAX_LANES;
972 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
975 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
977 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
978 struct nd_region_desc *ndr_desc)
980 if (ndr_desc->num_mappings > 1)
982 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
983 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
986 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
988 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
989 struct nd_region_desc *ndr_desc)
991 ndr_desc->num_lanes = ND_MAX_LANES;
992 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
995 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
998 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
999 * @nd_region: blk or interleaved pmem region
1001 void nvdimm_flush(struct nd_region *nd_region)
1003 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1007 * Try to encourage some diversity in flush hint addresses
1008 * across cpus assuming a limited number of flush hints.
1010 idx = this_cpu_read(flush_idx);
1011 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1014 * The first wmb() is needed to 'sfence' all previous writes
1015 * such that they are architecturally visible for the platform
1016 * buffer flush. Note that we've already arranged for pmem
1017 * writes to avoid the cache via memcpy_flushcache(). The final
1018 * wmb() ensures ordering for the NVDIMM flush write.
1021 for (i = 0; i < nd_region->ndr_mappings; i++)
1022 if (ndrd_get_flush_wpq(ndrd, i, 0))
1023 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1026 EXPORT_SYMBOL_GPL(nvdimm_flush);
1029 * nvdimm_has_flush - determine write flushing requirements
1030 * @nd_region: blk or interleaved pmem region
1032 * Returns 1 if writes require flushing
1033 * Returns 0 if writes do not require flushing
1034 * Returns -ENXIO if flushing capability can not be determined
1036 int nvdimm_has_flush(struct nd_region *nd_region)
1040 /* no nvdimm or pmem api == flushing capability unknown */
1041 if (nd_region->ndr_mappings == 0
1042 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1045 for (i = 0; i < nd_region->ndr_mappings; i++) {
1046 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1047 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1049 /* flush hints present / available */
1050 if (nvdimm->num_flush)
1055 * The platform defines dimm devices without hints, assume
1056 * platform persistence mechanism like ADR
1060 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1062 void __exit nd_region_devs_exit(void)
1064 ida_destroy(®ion_ida);