2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/delay.h>
19 #include <linux/list.h>
20 #include <linux/acpi.h>
21 #include <linux/sort.h>
22 #include <linux/pmem.h>
25 #include <asm/cacheflush.h>
29 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
32 #include <linux/io-64-nonatomic-hi-lo.h>
34 static bool force_enable_dimms;
35 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
38 static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
39 module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
40 MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
42 /* after three payloads of overflow, it's dead jim */
43 static unsigned int scrub_overflow_abort = 3;
44 module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
45 MODULE_PARM_DESC(scrub_overflow_abort,
46 "Number of times we overflow ARS results before abort");
48 static struct workqueue_struct *nfit_wq;
50 struct nfit_table_prev {
51 struct list_head spas;
52 struct list_head memdevs;
53 struct list_head dcrs;
54 struct list_head bdws;
55 struct list_head idts;
56 struct list_head flushes;
59 static u8 nfit_uuid[NFIT_UUID_MAX][16];
61 const u8 *to_nfit_uuid(enum nfit_uuids id)
65 EXPORT_SYMBOL(to_nfit_uuid);
67 static struct acpi_nfit_desc *to_acpi_nfit_desc(
68 struct nvdimm_bus_descriptor *nd_desc)
70 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
73 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
75 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
78 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
81 if (!nd_desc->provider_name
82 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
85 return to_acpi_device(acpi_desc->dev);
88 static int xlat_status(void *buf, unsigned int cmd)
90 struct nd_cmd_clear_error *clear_err;
91 struct nd_cmd_ars_status *ars_status;
92 struct nd_cmd_ars_start *ars_start;
93 struct nd_cmd_ars_cap *ars_cap;
99 if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
103 if (ars_cap->status & 0xffff)
106 /* No supported scan types for this range */
107 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
108 if ((ars_cap->status >> 16 & flags) == 0)
111 case ND_CMD_ARS_START:
113 /* ARS is in progress */
114 if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
118 if (ars_start->status & 0xffff)
121 case ND_CMD_ARS_STATUS:
124 if (ars_status->status & 0xffff)
126 /* Check extended status (Upper two bytes) */
127 if (ars_status->status == NFIT_ARS_STATUS_DONE)
130 /* ARS is in progress */
131 if (ars_status->status == NFIT_ARS_STATUS_BUSY)
134 /* No ARS performed for the current boot */
135 if (ars_status->status == NFIT_ARS_STATUS_NONE)
139 * ARS interrupted, either we overflowed or some other
140 * agent wants the scan to stop. If we didn't overflow
141 * then just continue with the returned results.
143 if (ars_status->status == NFIT_ARS_STATUS_INTR) {
144 if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
150 if (ars_status->status >> 16)
153 case ND_CMD_CLEAR_ERROR:
155 if (clear_err->status & 0xffff)
157 if (!clear_err->cleared)
159 if (clear_err->length > clear_err->cleared)
160 return clear_err->cleared;
169 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
170 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
171 unsigned int buf_len, int *cmd_rc)
173 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
174 const struct nd_cmd_desc *desc = NULL;
175 union acpi_object in_obj, in_buf, *out_obj;
176 struct device *dev = acpi_desc->dev;
177 const char *cmd_name, *dimm_name;
178 unsigned long dsm_mask;
185 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
186 struct acpi_device *adev = nfit_mem->adev;
190 dimm_name = nvdimm_name(nvdimm);
191 cmd_name = nvdimm_cmd_name(cmd);
192 dsm_mask = nfit_mem->dsm_mask;
193 desc = nd_cmd_dimm_desc(cmd);
194 uuid = to_nfit_uuid(NFIT_DEV_DIMM);
195 handle = adev->handle;
197 struct acpi_device *adev = to_acpi_dev(acpi_desc);
199 cmd_name = nvdimm_bus_cmd_name(cmd);
200 dsm_mask = nd_desc->dsm_mask;
201 desc = nd_cmd_bus_desc(cmd);
202 uuid = to_nfit_uuid(NFIT_DEV_BUS);
203 handle = adev->handle;
207 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
210 if (!test_bit(cmd, &dsm_mask))
213 in_obj.type = ACPI_TYPE_PACKAGE;
214 in_obj.package.count = 1;
215 in_obj.package.elements = &in_buf;
216 in_buf.type = ACPI_TYPE_BUFFER;
217 in_buf.buffer.pointer = buf;
218 in_buf.buffer.length = 0;
220 /* libnvdimm has already validated the input envelope */
221 for (i = 0; i < desc->in_num; i++)
222 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
225 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
226 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
227 dimm_name, cmd_name, in_buf.buffer.length);
228 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
229 4, in_buf.buffer.pointer, min_t(u32, 128,
230 in_buf.buffer.length), true);
233 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
235 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
240 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
241 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
242 __func__, dimm_name, cmd_name, out_obj->type);
247 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
248 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
249 dimm_name, cmd_name, out_obj->buffer.length);
250 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
251 4, out_obj->buffer.pointer, min_t(u32, 128,
252 out_obj->buffer.length), true);
255 for (i = 0, offset = 0; i < desc->out_num; i++) {
256 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
257 (u32 *) out_obj->buffer.pointer);
259 if (offset + out_size > out_obj->buffer.length) {
260 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
261 __func__, dimm_name, cmd_name, i);
265 if (in_buf.buffer.length + offset + out_size > buf_len) {
266 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
267 __func__, dimm_name, cmd_name, i);
271 memcpy(buf + in_buf.buffer.length + offset,
272 out_obj->buffer.pointer + offset, out_size);
275 if (offset + in_buf.buffer.length < buf_len) {
278 * status valid, return the number of bytes left
279 * unfilled in the output buffer
281 rc = buf_len - offset - in_buf.buffer.length;
283 *cmd_rc = xlat_status(buf, cmd);
285 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
286 __func__, dimm_name, cmd_name, buf_len,
299 static const char *spa_type_name(u16 type)
301 static const char *to_name[] = {
302 [NFIT_SPA_VOLATILE] = "volatile",
303 [NFIT_SPA_PM] = "pmem",
304 [NFIT_SPA_DCR] = "dimm-control-region",
305 [NFIT_SPA_BDW] = "block-data-window",
306 [NFIT_SPA_VDISK] = "volatile-disk",
307 [NFIT_SPA_VCD] = "volatile-cd",
308 [NFIT_SPA_PDISK] = "persistent-disk",
309 [NFIT_SPA_PCD] = "persistent-cd",
313 if (type > NFIT_SPA_PCD)
316 return to_name[type];
319 static int nfit_spa_type(struct acpi_nfit_system_address *spa)
323 for (i = 0; i < NFIT_UUID_MAX; i++)
324 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
329 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
330 struct nfit_table_prev *prev,
331 struct acpi_nfit_system_address *spa)
333 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
334 struct device *dev = acpi_desc->dev;
335 struct nfit_spa *nfit_spa;
337 list_for_each_entry(nfit_spa, &prev->spas, list) {
338 if (memcmp(nfit_spa->spa, spa, length) == 0) {
339 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
344 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
347 INIT_LIST_HEAD(&nfit_spa->list);
349 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
350 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
352 spa_type_name(nfit_spa_type(spa)));
356 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
357 struct nfit_table_prev *prev,
358 struct acpi_nfit_memory_map *memdev)
360 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
361 struct device *dev = acpi_desc->dev;
362 struct nfit_memdev *nfit_memdev;
364 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
365 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
366 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
370 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
373 INIT_LIST_HEAD(&nfit_memdev->list);
374 nfit_memdev->memdev = memdev;
375 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
376 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
377 __func__, memdev->device_handle, memdev->range_index,
378 memdev->region_index);
382 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
383 struct nfit_table_prev *prev,
384 struct acpi_nfit_control_region *dcr)
386 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
387 struct device *dev = acpi_desc->dev;
388 struct nfit_dcr *nfit_dcr;
390 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
391 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
392 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
396 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
399 INIT_LIST_HEAD(&nfit_dcr->list);
401 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
402 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
403 dcr->region_index, dcr->windows);
407 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
408 struct nfit_table_prev *prev,
409 struct acpi_nfit_data_region *bdw)
411 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
412 struct device *dev = acpi_desc->dev;
413 struct nfit_bdw *nfit_bdw;
415 list_for_each_entry(nfit_bdw, &prev->bdws, list)
416 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
417 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
421 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
424 INIT_LIST_HEAD(&nfit_bdw->list);
426 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
427 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
428 bdw->region_index, bdw->windows);
432 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
433 struct nfit_table_prev *prev,
434 struct acpi_nfit_interleave *idt)
436 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
437 struct device *dev = acpi_desc->dev;
438 struct nfit_idt *nfit_idt;
440 list_for_each_entry(nfit_idt, &prev->idts, list)
441 if (memcmp(nfit_idt->idt, idt, length) == 0) {
442 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
446 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
449 INIT_LIST_HEAD(&nfit_idt->list);
451 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
452 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
453 idt->interleave_index, idt->line_count);
457 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
458 struct nfit_table_prev *prev,
459 struct acpi_nfit_flush_address *flush)
461 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
462 struct device *dev = acpi_desc->dev;
463 struct nfit_flush *nfit_flush;
465 list_for_each_entry(nfit_flush, &prev->flushes, list)
466 if (memcmp(nfit_flush->flush, flush, length) == 0) {
467 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
471 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
474 INIT_LIST_HEAD(&nfit_flush->list);
475 nfit_flush->flush = flush;
476 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
477 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
478 flush->device_handle, flush->hint_count);
482 static void *add_table(struct acpi_nfit_desc *acpi_desc,
483 struct nfit_table_prev *prev, void *table, const void *end)
485 struct device *dev = acpi_desc->dev;
486 struct acpi_nfit_header *hdr;
487 void *err = ERR_PTR(-ENOMEM);
494 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
500 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
501 if (!add_spa(acpi_desc, prev, table))
504 case ACPI_NFIT_TYPE_MEMORY_MAP:
505 if (!add_memdev(acpi_desc, prev, table))
508 case ACPI_NFIT_TYPE_CONTROL_REGION:
509 if (!add_dcr(acpi_desc, prev, table))
512 case ACPI_NFIT_TYPE_DATA_REGION:
513 if (!add_bdw(acpi_desc, prev, table))
516 case ACPI_NFIT_TYPE_INTERLEAVE:
517 if (!add_idt(acpi_desc, prev, table))
520 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
521 if (!add_flush(acpi_desc, prev, table))
524 case ACPI_NFIT_TYPE_SMBIOS:
525 dev_dbg(dev, "%s: smbios\n", __func__);
528 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
532 return table + hdr->length;
535 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
536 struct nfit_mem *nfit_mem)
538 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
539 u16 dcr = nfit_mem->dcr->region_index;
540 struct nfit_spa *nfit_spa;
542 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
543 u16 range_index = nfit_spa->spa->range_index;
544 int type = nfit_spa_type(nfit_spa->spa);
545 struct nfit_memdev *nfit_memdev;
547 if (type != NFIT_SPA_BDW)
550 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
551 if (nfit_memdev->memdev->range_index != range_index)
553 if (nfit_memdev->memdev->device_handle != device_handle)
555 if (nfit_memdev->memdev->region_index != dcr)
558 nfit_mem->spa_bdw = nfit_spa->spa;
563 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
564 nfit_mem->spa_dcr->range_index);
565 nfit_mem->bdw = NULL;
568 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
569 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
571 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
572 struct nfit_memdev *nfit_memdev;
573 struct nfit_flush *nfit_flush;
574 struct nfit_bdw *nfit_bdw;
575 struct nfit_idt *nfit_idt;
576 u16 idt_idx, range_index;
578 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
579 if (nfit_bdw->bdw->region_index != dcr)
581 nfit_mem->bdw = nfit_bdw->bdw;
588 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
590 if (!nfit_mem->spa_bdw)
593 range_index = nfit_mem->spa_bdw->range_index;
594 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
595 if (nfit_memdev->memdev->range_index != range_index ||
596 nfit_memdev->memdev->region_index != dcr)
598 nfit_mem->memdev_bdw = nfit_memdev->memdev;
599 idt_idx = nfit_memdev->memdev->interleave_index;
600 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
601 if (nfit_idt->idt->interleave_index != idt_idx)
603 nfit_mem->idt_bdw = nfit_idt->idt;
607 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
608 if (nfit_flush->flush->device_handle !=
609 nfit_memdev->memdev->device_handle)
611 nfit_mem->nfit_flush = nfit_flush;
618 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
619 struct acpi_nfit_system_address *spa)
621 struct nfit_mem *nfit_mem, *found;
622 struct nfit_memdev *nfit_memdev;
623 int type = nfit_spa_type(spa);
633 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
634 struct nfit_dcr *nfit_dcr;
638 if (nfit_memdev->memdev->range_index != spa->range_index)
641 dcr = nfit_memdev->memdev->region_index;
642 device_handle = nfit_memdev->memdev->device_handle;
643 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
644 if (__to_nfit_memdev(nfit_mem)->device_handle
653 nfit_mem = devm_kzalloc(acpi_desc->dev,
654 sizeof(*nfit_mem), GFP_KERNEL);
657 INIT_LIST_HEAD(&nfit_mem->list);
658 list_add(&nfit_mem->list, &acpi_desc->dimms);
661 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
662 if (nfit_dcr->dcr->region_index != dcr)
665 * Record the control region for the dimm. For
666 * the ACPI 6.1 case, where there are separate
667 * control regions for the pmem vs blk
668 * interfaces, be sure to record the extended
672 nfit_mem->dcr = nfit_dcr->dcr;
673 else if (nfit_mem->dcr->windows == 0
674 && nfit_dcr->dcr->windows)
675 nfit_mem->dcr = nfit_dcr->dcr;
679 if (dcr && !nfit_mem->dcr) {
680 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
681 spa->range_index, dcr);
685 if (type == NFIT_SPA_DCR) {
686 struct nfit_idt *nfit_idt;
689 /* multiple dimms may share a SPA when interleaved */
690 nfit_mem->spa_dcr = spa;
691 nfit_mem->memdev_dcr = nfit_memdev->memdev;
692 idt_idx = nfit_memdev->memdev->interleave_index;
693 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
694 if (nfit_idt->idt->interleave_index != idt_idx)
696 nfit_mem->idt_dcr = nfit_idt->idt;
699 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
702 * A single dimm may belong to multiple SPA-PM
703 * ranges, record at least one in addition to
706 nfit_mem->memdev_pmem = nfit_memdev->memdev;
713 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
715 struct nfit_mem *a = container_of(_a, typeof(*a), list);
716 struct nfit_mem *b = container_of(_b, typeof(*b), list);
717 u32 handleA, handleB;
719 handleA = __to_nfit_memdev(a)->device_handle;
720 handleB = __to_nfit_memdev(b)->device_handle;
721 if (handleA < handleB)
723 else if (handleA > handleB)
728 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
730 struct nfit_spa *nfit_spa;
733 * For each SPA-DCR or SPA-PMEM address range find its
734 * corresponding MEMDEV(s). From each MEMDEV find the
735 * corresponding DCR. Then, if we're operating on a SPA-DCR,
736 * try to find a SPA-BDW and a corresponding BDW that references
737 * the DCR. Throw it all into an nfit_mem object. Note, that
740 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
743 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
748 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
753 static ssize_t revision_show(struct device *dev,
754 struct device_attribute *attr, char *buf)
756 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
757 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
758 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
760 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
762 static DEVICE_ATTR_RO(revision);
764 static struct attribute *acpi_nfit_attributes[] = {
765 &dev_attr_revision.attr,
769 static struct attribute_group acpi_nfit_attribute_group = {
771 .attrs = acpi_nfit_attributes,
774 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
775 &nvdimm_bus_attribute_group,
776 &acpi_nfit_attribute_group,
780 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
782 struct nvdimm *nvdimm = to_nvdimm(dev);
783 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
785 return __to_nfit_memdev(nfit_mem);
788 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
790 struct nvdimm *nvdimm = to_nvdimm(dev);
791 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
793 return nfit_mem->dcr;
796 static ssize_t handle_show(struct device *dev,
797 struct device_attribute *attr, char *buf)
799 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
801 return sprintf(buf, "%#x\n", memdev->device_handle);
803 static DEVICE_ATTR_RO(handle);
805 static ssize_t phys_id_show(struct device *dev,
806 struct device_attribute *attr, char *buf)
808 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
810 return sprintf(buf, "%#x\n", memdev->physical_id);
812 static DEVICE_ATTR_RO(phys_id);
814 static ssize_t vendor_show(struct device *dev,
815 struct device_attribute *attr, char *buf)
817 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
819 return sprintf(buf, "%#x\n", dcr->vendor_id);
821 static DEVICE_ATTR_RO(vendor);
823 static ssize_t rev_id_show(struct device *dev,
824 struct device_attribute *attr, char *buf)
826 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
828 return sprintf(buf, "%#x\n", dcr->revision_id);
830 static DEVICE_ATTR_RO(rev_id);
832 static ssize_t device_show(struct device *dev,
833 struct device_attribute *attr, char *buf)
835 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
837 return sprintf(buf, "%#x\n", dcr->device_id);
839 static DEVICE_ATTR_RO(device);
841 static ssize_t format_show(struct device *dev,
842 struct device_attribute *attr, char *buf)
844 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
846 return sprintf(buf, "%#x\n", dcr->code);
848 static DEVICE_ATTR_RO(format);
850 static ssize_t serial_show(struct device *dev,
851 struct device_attribute *attr, char *buf)
853 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
855 return sprintf(buf, "%#x\n", dcr->serial_number);
857 static DEVICE_ATTR_RO(serial);
859 static ssize_t flags_show(struct device *dev,
860 struct device_attribute *attr, char *buf)
862 u16 flags = to_nfit_memdev(dev)->flags;
864 return sprintf(buf, "%s%s%s%s%s\n",
865 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
866 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
867 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
868 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
869 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
871 static DEVICE_ATTR_RO(flags);
873 static struct attribute *acpi_nfit_dimm_attributes[] = {
874 &dev_attr_handle.attr,
875 &dev_attr_phys_id.attr,
876 &dev_attr_vendor.attr,
877 &dev_attr_device.attr,
878 &dev_attr_format.attr,
879 &dev_attr_serial.attr,
880 &dev_attr_rev_id.attr,
881 &dev_attr_flags.attr,
885 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
886 struct attribute *a, int n)
888 struct device *dev = container_of(kobj, struct device, kobj);
890 if (to_nfit_dcr(dev))
896 static struct attribute_group acpi_nfit_dimm_attribute_group = {
898 .attrs = acpi_nfit_dimm_attributes,
899 .is_visible = acpi_nfit_dimm_attr_visible,
902 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
903 &nvdimm_attribute_group,
904 &nd_device_attribute_group,
905 &acpi_nfit_dimm_attribute_group,
909 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
912 struct nfit_mem *nfit_mem;
914 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
915 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
916 return nfit_mem->nvdimm;
921 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
922 struct nfit_mem *nfit_mem, u32 device_handle)
924 struct acpi_device *adev, *adev_dimm;
925 struct device *dev = acpi_desc->dev;
926 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
929 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
930 adev = to_acpi_dev(acpi_desc);
934 adev_dimm = acpi_find_child_device(adev, device_handle, false);
935 nfit_mem->adev = adev_dimm;
937 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
939 return force_enable_dimms ? 0 : -ENODEV;
942 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
943 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
944 set_bit(i, &nfit_mem->dsm_mask);
949 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
951 struct nfit_mem *nfit_mem;
954 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
955 struct nvdimm *nvdimm;
956 unsigned long flags = 0;
961 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
962 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
968 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
969 flags |= NDD_ALIASING;
971 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
972 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
973 flags |= NDD_UNARMED;
975 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
979 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
980 acpi_nfit_dimm_attribute_groups,
981 flags, &nfit_mem->dsm_mask);
985 nfit_mem->nvdimm = nvdimm;
988 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
991 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
993 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
994 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
995 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
996 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
1000 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1003 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1005 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1006 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1007 struct acpi_device *adev;
1010 nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
1011 adev = to_acpi_dev(acpi_desc);
1015 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
1016 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
1017 set_bit(i, &nd_desc->dsm_mask);
1020 static ssize_t range_index_show(struct device *dev,
1021 struct device_attribute *attr, char *buf)
1023 struct nd_region *nd_region = to_nd_region(dev);
1024 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1026 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1028 static DEVICE_ATTR_RO(range_index);
1030 static struct attribute *acpi_nfit_region_attributes[] = {
1031 &dev_attr_range_index.attr,
1035 static struct attribute_group acpi_nfit_region_attribute_group = {
1037 .attrs = acpi_nfit_region_attributes,
1040 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1041 &nd_region_attribute_group,
1042 &nd_mapping_attribute_group,
1043 &nd_device_attribute_group,
1044 &nd_numa_attribute_group,
1045 &acpi_nfit_region_attribute_group,
1049 /* enough info to uniquely specify an interleave set */
1050 struct nfit_set_info {
1051 struct nfit_set_info_map {
1058 static size_t sizeof_nfit_set_info(int num_mappings)
1060 return sizeof(struct nfit_set_info)
1061 + num_mappings * sizeof(struct nfit_set_info_map);
1064 static int cmp_map(const void *m0, const void *m1)
1066 const struct nfit_set_info_map *map0 = m0;
1067 const struct nfit_set_info_map *map1 = m1;
1069 return memcmp(&map0->region_offset, &map1->region_offset,
1073 /* Retrieve the nth entry referencing this spa */
1074 static struct acpi_nfit_memory_map *memdev_from_spa(
1075 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1077 struct nfit_memdev *nfit_memdev;
1079 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1080 if (nfit_memdev->memdev->range_index == range_index)
1082 return nfit_memdev->memdev;
1086 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1087 struct nd_region_desc *ndr_desc,
1088 struct acpi_nfit_system_address *spa)
1090 int i, spa_type = nfit_spa_type(spa);
1091 struct device *dev = acpi_desc->dev;
1092 struct nd_interleave_set *nd_set;
1093 u16 nr = ndr_desc->num_mappings;
1094 struct nfit_set_info *info;
1096 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1101 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1105 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1108 for (i = 0; i < nr; i++) {
1109 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
1110 struct nfit_set_info_map *map = &info->mapping[i];
1111 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1112 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1113 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1114 spa->range_index, i);
1116 if (!memdev || !nfit_mem->dcr) {
1117 dev_err(dev, "%s: failed to find DCR\n", __func__);
1121 map->region_offset = memdev->region_offset;
1122 map->serial_number = nfit_mem->dcr->serial_number;
1125 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1127 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1128 ndr_desc->nd_set = nd_set;
1129 devm_kfree(dev, info);
1134 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1136 struct acpi_nfit_interleave *idt = mmio->idt;
1137 u32 sub_line_offset, line_index, line_offset;
1138 u64 line_no, table_skip_count, table_offset;
1140 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1141 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1142 line_offset = idt->line_offset[line_index]
1144 table_offset = table_skip_count * mmio->table_size;
1146 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1149 static void wmb_blk(struct nfit_blk *nfit_blk)
1152 if (nfit_blk->nvdimm_flush) {
1154 * The first wmb() is needed to 'sfence' all previous writes
1155 * such that they are architecturally visible for the platform
1156 * buffer flush. Note that we've already arranged for pmem
1157 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1158 * final wmb() ensures ordering for the NVDIMM flush write.
1161 writeq(1, nfit_blk->nvdimm_flush);
1167 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
1169 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1170 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1172 if (mmio->num_lines)
1173 offset = to_interleave_offset(offset, mmio);
1175 return readl(mmio->addr.base + offset);
1178 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1179 resource_size_t dpa, unsigned int len, unsigned int write)
1182 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1185 BCW_OFFSET_MASK = (1ULL << 48)-1,
1187 BCW_LEN_MASK = (1ULL << 8) - 1,
1191 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1192 len = len >> L1_CACHE_SHIFT;
1193 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1194 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1196 offset = nfit_blk->cmd_offset + mmio->size * bw;
1197 if (mmio->num_lines)
1198 offset = to_interleave_offset(offset, mmio);
1200 writeq(cmd, mmio->addr.base + offset);
1203 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
1204 readq(mmio->addr.base + offset);
1207 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1208 resource_size_t dpa, void *iobuf, size_t len, int rw,
1211 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1212 unsigned int copied = 0;
1216 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1217 + lane * mmio->size;
1218 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1223 if (mmio->num_lines) {
1226 offset = to_interleave_offset(base_offset + copied,
1228 div_u64_rem(offset, mmio->line_size, &line_offset);
1229 c = min_t(size_t, len, mmio->line_size - line_offset);
1231 offset = base_offset + nfit_blk->bdw_offset;
1236 memcpy_to_pmem(mmio->addr.aperture + offset,
1239 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
1240 mmio_flush_range((void __force *)
1241 mmio->addr.aperture + offset, c);
1243 memcpy_from_pmem(iobuf + copied,
1244 mmio->addr.aperture + offset, c);
1254 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1258 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1259 resource_size_t dpa, void *iobuf, u64 len, int rw)
1261 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1262 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1263 struct nd_region *nd_region = nfit_blk->nd_region;
1264 unsigned int lane, copied = 0;
1267 lane = nd_region_acquire_lane(nd_region);
1269 u64 c = min(len, mmio->size);
1271 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1272 iobuf + copied, c, rw, lane);
1279 nd_region_release_lane(nd_region, lane);
1284 static void nfit_spa_mapping_release(struct kref *kref)
1286 struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1287 struct acpi_nfit_system_address *spa = spa_map->spa;
1288 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1290 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1291 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
1292 if (spa_map->type == SPA_MAP_APERTURE)
1293 memunmap((void __force *)spa_map->addr.aperture);
1295 iounmap(spa_map->addr.base);
1296 release_mem_region(spa->address, spa->length);
1297 list_del(&spa_map->list);
1301 static struct nfit_spa_mapping *find_spa_mapping(
1302 struct acpi_nfit_desc *acpi_desc,
1303 struct acpi_nfit_system_address *spa)
1305 struct nfit_spa_mapping *spa_map;
1307 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1308 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1309 if (spa_map->spa == spa)
1315 static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1316 struct acpi_nfit_system_address *spa)
1318 struct nfit_spa_mapping *spa_map;
1320 mutex_lock(&acpi_desc->spa_map_mutex);
1321 spa_map = find_spa_mapping(acpi_desc, spa);
1324 kref_put(&spa_map->kref, nfit_spa_mapping_release);
1325 mutex_unlock(&acpi_desc->spa_map_mutex);
1328 static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1329 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1331 resource_size_t start = spa->address;
1332 resource_size_t n = spa->length;
1333 struct nfit_spa_mapping *spa_map;
1334 struct resource *res;
1336 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1338 spa_map = find_spa_mapping(acpi_desc, spa);
1340 kref_get(&spa_map->kref);
1341 return spa_map->addr.base;
1344 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1348 INIT_LIST_HEAD(&spa_map->list);
1350 kref_init(&spa_map->kref);
1351 spa_map->acpi_desc = acpi_desc;
1353 res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1357 spa_map->type = type;
1358 if (type == SPA_MAP_APERTURE)
1359 spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1360 ARCH_MEMREMAP_PMEM);
1362 spa_map->addr.base = ioremap_nocache(start, n);
1365 if (!spa_map->addr.base)
1368 list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
1369 return spa_map->addr.base;
1372 release_mem_region(start, n);
1379 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1380 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1381 * @nfit_spa: spa table to map
1382 * @type: aperture or control region
1384 * In the case where block-data-window apertures and
1385 * dimm-control-regions are interleaved they will end up sharing a
1386 * single request_mem_region() + ioremap() for the address range. In
1387 * the style of devm nfit_spa_map() mappings are automatically dropped
1388 * when all region devices referencing the same mapping are disabled /
1391 static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1392 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1394 void __iomem *iomem;
1396 mutex_lock(&acpi_desc->spa_map_mutex);
1397 iomem = __nfit_spa_map(acpi_desc, spa, type);
1398 mutex_unlock(&acpi_desc->spa_map_mutex);
1403 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1404 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1407 mmio->num_lines = idt->line_count;
1408 mmio->line_size = idt->line_size;
1409 if (interleave_ways == 0)
1411 mmio->table_size = mmio->num_lines * interleave_ways
1418 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1419 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1421 struct nd_cmd_dimm_flags flags;
1424 memset(&flags, 0, sizeof(flags));
1425 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
1426 sizeof(flags), NULL);
1428 if (rc >= 0 && flags.status == 0)
1429 nfit_blk->dimm_flags = flags.flags;
1430 else if (rc == -ENOTTY) {
1431 /* fall back to a conservative default */
1432 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
1440 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1443 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1444 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1445 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1446 struct nfit_flush *nfit_flush;
1447 struct nfit_blk_mmio *mmio;
1448 struct nfit_blk *nfit_blk;
1449 struct nfit_mem *nfit_mem;
1450 struct nvdimm *nvdimm;
1453 nvdimm = nd_blk_region_to_dimm(ndbr);
1454 nfit_mem = nvdimm_provider_data(nvdimm);
1455 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1456 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1457 nfit_mem ? "" : " nfit_mem",
1458 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1459 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
1463 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1466 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1467 nfit_blk->nd_region = to_nd_region(dev);
1469 /* map block aperture memory */
1470 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1471 mmio = &nfit_blk->mmio[BDW];
1472 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
1474 if (!mmio->addr.base) {
1475 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1476 nvdimm_name(nvdimm));
1479 mmio->size = nfit_mem->bdw->size;
1480 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1481 mmio->idt = nfit_mem->idt_bdw;
1482 mmio->spa = nfit_mem->spa_bdw;
1483 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1484 nfit_mem->memdev_bdw->interleave_ways);
1486 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1487 __func__, nvdimm_name(nvdimm));
1491 /* map block control memory */
1492 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1493 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1494 mmio = &nfit_blk->mmio[DCR];
1495 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
1497 if (!mmio->addr.base) {
1498 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1499 nvdimm_name(nvdimm));
1502 mmio->size = nfit_mem->dcr->window_size;
1503 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1504 mmio->idt = nfit_mem->idt_dcr;
1505 mmio->spa = nfit_mem->spa_dcr;
1506 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1507 nfit_mem->memdev_dcr->interleave_ways);
1509 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1510 __func__, nvdimm_name(nvdimm));
1514 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1516 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1517 __func__, nvdimm_name(nvdimm));
1521 nfit_flush = nfit_mem->nfit_flush;
1522 if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1523 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1524 nfit_flush->flush->hint_address[0], 8);
1525 if (!nfit_blk->nvdimm_flush)
1529 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
1530 dev_warn(dev, "unable to guarantee persistence of writes\n");
1532 if (mmio->line_size == 0)
1535 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1536 + 8 > mmio->line_size) {
1537 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1539 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1540 + 8 > mmio->line_size) {
1541 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1548 static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1551 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1552 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1553 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1554 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1558 return; /* never enabled */
1560 /* auto-free BLK spa mappings */
1561 for (i = 0; i < 2; i++) {
1562 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1564 if (mmio->addr.base)
1565 nfit_spa_unmap(acpi_desc, mmio->spa);
1567 nd_blk_region_set_provider_data(ndbr, NULL);
1568 /* devm will free nfit_blk */
1571 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1572 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
1574 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1575 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1578 cmd->address = spa->address;
1579 cmd->length = spa->length;
1580 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1581 sizeof(*cmd), &cmd_rc);
1587 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
1591 struct nd_cmd_ars_start ars_start;
1592 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1593 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1595 memset(&ars_start, 0, sizeof(ars_start));
1596 ars_start.address = spa->address;
1597 ars_start.length = spa->length;
1598 if (nfit_spa_type(spa) == NFIT_SPA_PM)
1599 ars_start.type = ND_ARS_PERSISTENT;
1600 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1601 ars_start.type = ND_ARS_VOLATILE;
1605 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1606 sizeof(ars_start), &cmd_rc);
1613 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
1616 struct nd_cmd_ars_start ars_start;
1617 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1618 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1620 memset(&ars_start, 0, sizeof(ars_start));
1621 ars_start.address = ars_status->restart_address;
1622 ars_start.length = ars_status->restart_length;
1623 ars_start.type = ars_status->type;
1624 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1625 sizeof(ars_start), &cmd_rc);
1631 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
1633 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1634 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1637 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
1638 acpi_desc->ars_status_size, &cmd_rc);
1644 static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
1645 struct nd_cmd_ars_status *ars_status)
1650 for (i = 0; i < ars_status->num_records; i++) {
1651 rc = nvdimm_bus_add_poison(nvdimm_bus,
1652 ars_status->records[i].err_address,
1653 ars_status->records[i].length);
1661 static void acpi_nfit_remove_resource(void *data)
1663 struct resource *res = data;
1665 remove_resource(res);
1668 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
1669 struct nd_region_desc *ndr_desc)
1671 struct resource *res, *nd_res = ndr_desc->res;
1674 /* No operation if the region is already registered as PMEM */
1675 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
1676 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
1677 if (is_pmem == REGION_INTERSECTS)
1680 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
1684 res->name = "Persistent Memory";
1685 res->start = nd_res->start;
1686 res->end = nd_res->end;
1687 res->flags = IORESOURCE_MEM;
1688 res->desc = IORES_DESC_PERSISTENT_MEMORY;
1690 ret = insert_resource(&iomem_resource, res);
1694 ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
1696 remove_resource(res);
1703 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1704 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1705 struct acpi_nfit_memory_map *memdev,
1706 struct nfit_spa *nfit_spa)
1708 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1709 memdev->device_handle);
1710 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1711 struct nd_blk_region_desc *ndbr_desc;
1712 struct nfit_mem *nfit_mem;
1716 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
1717 spa->range_index, memdev->device_handle);
1721 nd_mapping->nvdimm = nvdimm;
1722 switch (nfit_spa_type(spa)) {
1724 case NFIT_SPA_VOLATILE:
1725 nd_mapping->start = memdev->address;
1726 nd_mapping->size = memdev->region_size;
1729 nfit_mem = nvdimm_provider_data(nvdimm);
1730 if (!nfit_mem || !nfit_mem->bdw) {
1731 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1732 spa->range_index, nvdimm_name(nvdimm));
1734 nd_mapping->size = nfit_mem->bdw->capacity;
1735 nd_mapping->start = nfit_mem->bdw->start_address;
1736 ndr_desc->num_lanes = nfit_mem->bdw->windows;
1740 ndr_desc->nd_mapping = nd_mapping;
1741 ndr_desc->num_mappings = blk_valid;
1742 ndbr_desc = to_blk_region_desc(ndr_desc);
1743 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1744 ndbr_desc->disable = acpi_nfit_blk_region_disable;
1745 ndbr_desc->do_io = acpi_desc->blk_do_io;
1746 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1748 if (!nfit_spa->nd_region)
1756 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1757 struct nfit_spa *nfit_spa)
1759 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
1760 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1761 struct nd_blk_region_desc ndbr_desc;
1762 struct nd_region_desc *ndr_desc;
1763 struct nfit_memdev *nfit_memdev;
1764 struct nvdimm_bus *nvdimm_bus;
1765 struct resource res;
1768 if (nfit_spa->nd_region)
1771 if (spa->range_index == 0) {
1772 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
1777 memset(&res, 0, sizeof(res));
1778 memset(&nd_mappings, 0, sizeof(nd_mappings));
1779 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
1780 res.start = spa->address;
1781 res.end = res.start + spa->length - 1;
1782 ndr_desc = &ndbr_desc.ndr_desc;
1783 ndr_desc->res = &res;
1784 ndr_desc->provider_data = nfit_spa;
1785 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
1786 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
1787 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
1788 spa->proximity_domain);
1790 ndr_desc->numa_node = NUMA_NO_NODE;
1792 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1793 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1794 struct nd_mapping *nd_mapping;
1796 if (memdev->range_index != spa->range_index)
1798 if (count >= ND_MAX_MAPPINGS) {
1799 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
1800 spa->range_index, ND_MAX_MAPPINGS);
1803 nd_mapping = &nd_mappings[count++];
1804 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
1810 ndr_desc->nd_mapping = nd_mappings;
1811 ndr_desc->num_mappings = count;
1812 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
1816 nvdimm_bus = acpi_desc->nvdimm_bus;
1817 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
1818 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
1820 dev_warn(acpi_desc->dev,
1821 "failed to insert pmem resource to iomem: %d\n",
1826 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
1828 if (!nfit_spa->nd_region)
1830 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1831 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
1833 if (!nfit_spa->nd_region)
1839 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
1840 nfit_spa->spa->range_index);
1844 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
1847 struct device *dev = acpi_desc->dev;
1848 struct nd_cmd_ars_status *ars_status;
1850 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
1851 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
1855 if (acpi_desc->ars_status)
1856 devm_kfree(dev, acpi_desc->ars_status);
1857 acpi_desc->ars_status = NULL;
1858 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
1861 acpi_desc->ars_status = ars_status;
1862 acpi_desc->ars_status_size = max_ars;
1866 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
1867 struct nfit_spa *nfit_spa)
1869 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1872 if (!nfit_spa->max_ars) {
1873 struct nd_cmd_ars_cap ars_cap;
1875 memset(&ars_cap, 0, sizeof(ars_cap));
1876 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
1879 nfit_spa->max_ars = ars_cap.max_ars_out;
1880 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
1881 /* check that the supported scrub types match the spa type */
1882 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
1883 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
1885 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
1886 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
1890 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
1893 rc = ars_get_status(acpi_desc);
1894 if (rc < 0 && rc != -ENOSPC)
1897 if (ars_status_process_records(acpi_desc->nvdimm_bus,
1898 acpi_desc->ars_status))
1904 static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
1905 struct nfit_spa *nfit_spa)
1907 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1908 unsigned int overflow_retry = scrub_overflow_abort;
1909 u64 init_ars_start = 0, init_ars_len = 0;
1910 struct device *dev = acpi_desc->dev;
1911 unsigned int tmo = scrub_timeout;
1914 if (nfit_spa->ars_done || !nfit_spa->nd_region)
1917 rc = ars_start(acpi_desc, nfit_spa);
1919 * If we timed out the initial scan we'll still be busy here,
1920 * and will wait another timeout before giving up permanently.
1922 if (rc < 0 && rc != -EBUSY)
1926 u64 ars_start, ars_len;
1928 if (acpi_desc->cancel)
1930 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
1933 if (rc == -EBUSY && !tmo) {
1934 dev_warn(dev, "range %d ars timeout, aborting\n",
1941 * Note, entries may be appended to the list
1942 * while the lock is dropped, but the workqueue
1943 * being active prevents entries being deleted /
1946 mutex_unlock(&acpi_desc->init_mutex);
1949 mutex_lock(&acpi_desc->init_mutex);
1953 /* we got some results, but there are more pending... */
1954 if (rc == -ENOSPC && overflow_retry--) {
1955 if (!init_ars_len) {
1956 init_ars_len = acpi_desc->ars_status->length;
1957 init_ars_start = acpi_desc->ars_status->address;
1959 rc = ars_continue(acpi_desc);
1963 dev_warn(dev, "range %d ars continuation failed\n",
1969 ars_start = init_ars_start;
1970 ars_len = init_ars_len;
1972 ars_start = acpi_desc->ars_status->address;
1973 ars_len = acpi_desc->ars_status->length;
1975 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
1976 spa->range_index, ars_start, ars_len);
1977 /* notify the region about new poison entries */
1978 nvdimm_region_notify(nfit_spa->nd_region,
1979 NVDIMM_REVALIDATE_POISON);
1984 static void acpi_nfit_scrub(struct work_struct *work)
1987 u64 init_scrub_length = 0;
1988 struct nfit_spa *nfit_spa;
1989 u64 init_scrub_address = 0;
1990 bool init_ars_done = false;
1991 struct acpi_nfit_desc *acpi_desc;
1992 unsigned int tmo = scrub_timeout;
1993 unsigned int overflow_retry = scrub_overflow_abort;
1995 acpi_desc = container_of(work, typeof(*acpi_desc), work);
1996 dev = acpi_desc->dev;
1999 * We scrub in 2 phases. The first phase waits for any platform
2000 * firmware initiated scrubs to complete and then we go search for the
2001 * affected spa regions to mark them scanned. In the second phase we
2002 * initiate a directed scrub for every range that was not scrubbed in
2006 /* process platform firmware initiated scrubs */
2008 mutex_lock(&acpi_desc->init_mutex);
2009 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2010 struct nd_cmd_ars_status *ars_status;
2011 struct acpi_nfit_system_address *spa;
2012 u64 ars_start, ars_len;
2015 if (acpi_desc->cancel)
2018 if (nfit_spa->nd_region)
2021 if (init_ars_done) {
2023 * No need to re-query, we're now just
2024 * reconciling all the ranges covered by the
2029 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2031 if (rc == -ENOTTY) {
2032 /* no ars capability, just register spa and move on */
2033 acpi_nfit_register_region(acpi_desc, nfit_spa);
2037 if (rc == -EBUSY && !tmo) {
2038 /* fallthrough to directed scrub in phase 2 */
2039 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2041 } else if (rc == -EBUSY) {
2042 mutex_unlock(&acpi_desc->init_mutex);
2048 /* we got some results, but there are more pending... */
2049 if (rc == -ENOSPC && overflow_retry--) {
2050 ars_status = acpi_desc->ars_status;
2052 * Record the original scrub range, so that we
2053 * can recall all the ranges impacted by the
2056 if (!init_scrub_length) {
2057 init_scrub_length = ars_status->length;
2058 init_scrub_address = ars_status->address;
2060 rc = ars_continue(acpi_desc);
2062 mutex_unlock(&acpi_desc->init_mutex);
2069 * Initial scrub failed, we'll give it one more
2075 /* We got some final results, record completed ranges */
2076 ars_status = acpi_desc->ars_status;
2077 if (init_scrub_length) {
2078 ars_start = init_scrub_address;
2079 ars_len = ars_start + init_scrub_length;
2081 ars_start = ars_status->address;
2082 ars_len = ars_status->length;
2084 spa = nfit_spa->spa;
2086 if (!init_ars_done) {
2087 init_ars_done = true;
2088 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2089 ars_start, ars_len);
2091 if (ars_start <= spa->address && ars_start + ars_len
2092 >= spa->address + spa->length)
2093 acpi_nfit_register_region(acpi_desc, nfit_spa);
2097 * For all the ranges not covered by an initial scrub we still
2098 * want to see if there are errors, but it's ok to discover them
2101 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2103 * Flag all the ranges that still need scrubbing, but
2104 * register them now to make data available.
2106 if (nfit_spa->nd_region)
2107 nfit_spa->ars_done = 1;
2109 acpi_nfit_register_region(acpi_desc, nfit_spa);
2112 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2113 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2114 mutex_unlock(&acpi_desc->init_mutex);
2117 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2119 struct nfit_spa *nfit_spa;
2122 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2123 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2124 /* BLK regions don't need to wait for ars results */
2125 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2130 queue_work(nfit_wq, &acpi_desc->work);
2134 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2135 struct nfit_table_prev *prev)
2137 struct device *dev = acpi_desc->dev;
2139 if (!list_empty(&prev->spas) ||
2140 !list_empty(&prev->memdevs) ||
2141 !list_empty(&prev->dcrs) ||
2142 !list_empty(&prev->bdws) ||
2143 !list_empty(&prev->idts) ||
2144 !list_empty(&prev->flushes)) {
2145 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2151 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
2153 struct device *dev = acpi_desc->dev;
2154 struct nfit_table_prev prev;
2159 mutex_lock(&acpi_desc->init_mutex);
2161 INIT_LIST_HEAD(&prev.spas);
2162 INIT_LIST_HEAD(&prev.memdevs);
2163 INIT_LIST_HEAD(&prev.dcrs);
2164 INIT_LIST_HEAD(&prev.bdws);
2165 INIT_LIST_HEAD(&prev.idts);
2166 INIT_LIST_HEAD(&prev.flushes);
2168 list_cut_position(&prev.spas, &acpi_desc->spas,
2169 acpi_desc->spas.prev);
2170 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2171 acpi_desc->memdevs.prev);
2172 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2173 acpi_desc->dcrs.prev);
2174 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2175 acpi_desc->bdws.prev);
2176 list_cut_position(&prev.idts, &acpi_desc->idts,
2177 acpi_desc->idts.prev);
2178 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2179 acpi_desc->flushes.prev);
2181 data = (u8 *) acpi_desc->nfit;
2183 while (!IS_ERR_OR_NULL(data))
2184 data = add_table(acpi_desc, &prev, data, end);
2187 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2193 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2197 if (nfit_mem_init(acpi_desc) != 0) {
2202 acpi_nfit_init_dsms(acpi_desc);
2204 rc = acpi_nfit_register_dimms(acpi_desc);
2208 rc = acpi_nfit_register_regions(acpi_desc);
2211 mutex_unlock(&acpi_desc->init_mutex);
2214 EXPORT_SYMBOL_GPL(acpi_nfit_init);
2216 struct acpi_nfit_flush_work {
2217 struct work_struct work;
2218 struct completion cmp;
2221 static void flush_probe(struct work_struct *work)
2223 struct acpi_nfit_flush_work *flush;
2225 flush = container_of(work, typeof(*flush), work);
2226 complete(&flush->cmp);
2229 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2231 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2232 struct device *dev = acpi_desc->dev;
2233 struct acpi_nfit_flush_work flush;
2235 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2240 * Scrub work could take 10s of seconds, userspace may give up so we
2241 * need to be interruptible while waiting.
2243 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2244 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2245 queue_work(nfit_wq, &flush.work);
2246 return wait_for_completion_interruptible(&flush.cmp);
2249 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2250 struct nvdimm *nvdimm, unsigned int cmd)
2252 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2256 if (cmd != ND_CMD_ARS_START)
2260 * The kernel and userspace may race to initiate a scrub, but
2261 * the scrub thread is prepared to lose that initial race. It
2262 * just needs guarantees that any ars it initiates are not
2263 * interrupted by any intervening start reqeusts from userspace.
2265 if (work_busy(&acpi_desc->work))
2271 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2273 struct nvdimm_bus_descriptor *nd_desc;
2275 dev_set_drvdata(dev, acpi_desc);
2276 acpi_desc->dev = dev;
2277 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2278 nd_desc = &acpi_desc->nd_desc;
2279 nd_desc->provider_name = "ACPI.NFIT";
2280 nd_desc->ndctl = acpi_nfit_ctl;
2281 nd_desc->flush_probe = acpi_nfit_flush_probe;
2282 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
2283 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2285 INIT_LIST_HEAD(&acpi_desc->spa_maps);
2286 INIT_LIST_HEAD(&acpi_desc->spas);
2287 INIT_LIST_HEAD(&acpi_desc->dcrs);
2288 INIT_LIST_HEAD(&acpi_desc->bdws);
2289 INIT_LIST_HEAD(&acpi_desc->idts);
2290 INIT_LIST_HEAD(&acpi_desc->flushes);
2291 INIT_LIST_HEAD(&acpi_desc->memdevs);
2292 INIT_LIST_HEAD(&acpi_desc->dimms);
2293 mutex_init(&acpi_desc->spa_map_mutex);
2294 mutex_init(&acpi_desc->init_mutex);
2295 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
2297 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
2299 static int acpi_nfit_add(struct acpi_device *adev)
2301 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2302 struct acpi_nfit_desc *acpi_desc;
2303 struct device *dev = &adev->dev;
2304 struct acpi_table_header *tbl;
2305 acpi_status status = AE_OK;
2309 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
2310 if (ACPI_FAILURE(status)) {
2311 /* This is ok, we could have an nvdimm hotplugged later */
2312 dev_dbg(dev, "failed to find NFIT at startup\n");
2316 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2319 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2320 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2321 if (!acpi_desc->nvdimm_bus)
2325 * Save the acpi header for later and then skip it,
2326 * making nfit point to the first nfit table header.
2328 acpi_desc->acpi_header = *tbl;
2329 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
2330 sz -= sizeof(struct acpi_table_nfit);
2332 /* Evaluate _FIT and override with that if present */
2333 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2334 if (ACPI_SUCCESS(status) && buf.length > 0) {
2335 union acpi_object *obj;
2337 * Adjust for the acpi_object header of the _FIT
2340 if (obj->type == ACPI_TYPE_BUFFER) {
2342 (struct acpi_nfit_header *)obj->buffer.pointer;
2343 sz = obj->buffer.length;
2345 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2346 __func__, (int) obj->type);
2349 rc = acpi_nfit_init(acpi_desc, sz);
2351 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2357 static int acpi_nfit_remove(struct acpi_device *adev)
2359 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2361 acpi_desc->cancel = 1;
2362 flush_workqueue(nfit_wq);
2363 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2367 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2369 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2370 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2371 struct acpi_nfit_header *nfit_saved;
2372 union acpi_object *obj;
2373 struct device *dev = &adev->dev;
2377 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2381 /* dev->driver may be null if we're being removed */
2382 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
2387 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2390 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2391 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2392 if (!acpi_desc->nvdimm_bus)
2396 * Finish previous registration before considering new
2399 flush_workqueue(nfit_wq);
2403 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2404 if (ACPI_FAILURE(status)) {
2405 dev_err(dev, "failed to evaluate _FIT\n");
2409 nfit_saved = acpi_desc->nfit;
2411 if (obj->type == ACPI_TYPE_BUFFER) {
2413 (struct acpi_nfit_header *)obj->buffer.pointer;
2414 ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
2416 /* Merge failed, restore old nfit, and exit */
2417 acpi_desc->nfit = nfit_saved;
2418 dev_err(dev, "failed to merge updated NFIT\n");
2421 /* Bad _FIT, restore old nfit */
2422 dev_err(dev, "Invalid _FIT\n");
2430 static const struct acpi_device_id acpi_nfit_ids[] = {
2434 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2436 static struct acpi_driver acpi_nfit_driver = {
2437 .name = KBUILD_MODNAME,
2438 .ids = acpi_nfit_ids,
2440 .add = acpi_nfit_add,
2441 .remove = acpi_nfit_remove,
2442 .notify = acpi_nfit_notify,
2446 static __init int nfit_init(void)
2448 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2449 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2450 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2451 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2452 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2453 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2454 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2456 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2457 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2458 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2459 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2460 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2461 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2462 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2463 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2464 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2465 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
2467 nfit_wq = create_singlethread_workqueue("nfit");
2471 return acpi_bus_register_driver(&acpi_nfit_driver);
2474 static __exit void nfit_exit(void)
2476 acpi_bus_unregister_driver(&acpi_nfit_driver);
2477 destroy_workqueue(nfit_wq);
2480 module_init(nfit_init);
2481 module_exit(nfit_exit);
2482 MODULE_LICENSE("GPL v2");
2483 MODULE_AUTHOR("Intel Corporation");