2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/pmem.h>
26 #include <asm/cacheflush.h>
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33 #include <linux/io-64-nonatomic-hi-lo.h>
35 static bool force_enable_dimms;
36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
39 static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
40 module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
41 MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
43 /* after three payloads of overflow, it's dead jim */
44 static unsigned int scrub_overflow_abort = 3;
45 module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
46 MODULE_PARM_DESC(scrub_overflow_abort,
47 "Number of times we overflow ARS results before abort");
49 static bool disable_vendor_specific;
50 module_param(disable_vendor_specific, bool, S_IRUGO);
51 MODULE_PARM_DESC(disable_vendor_specific,
52 "Limit commands to the publicly specified set\n");
54 LIST_HEAD(acpi_descs);
55 DEFINE_MUTEX(acpi_desc_lock);
57 static struct workqueue_struct *nfit_wq;
59 struct nfit_table_prev {
60 struct list_head spas;
61 struct list_head memdevs;
62 struct list_head dcrs;
63 struct list_head bdws;
64 struct list_head idts;
65 struct list_head flushes;
68 static u8 nfit_uuid[NFIT_UUID_MAX][16];
70 const u8 *to_nfit_uuid(enum nfit_uuids id)
74 EXPORT_SYMBOL(to_nfit_uuid);
76 static struct acpi_nfit_desc *to_acpi_nfit_desc(
77 struct nvdimm_bus_descriptor *nd_desc)
79 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
82 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
84 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
87 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
90 if (!nd_desc->provider_name
91 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
94 return to_acpi_device(acpi_desc->dev);
97 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
99 struct nd_cmd_clear_error *clear_err;
100 struct nd_cmd_ars_status *ars_status;
105 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
112 /* No supported scan types for this range */
113 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
114 if ((status >> 16 & flags) == 0)
117 case ND_CMD_ARS_START:
118 /* ARS is in progress */
119 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
126 case ND_CMD_ARS_STATUS:
131 /* Check extended status (Upper two bytes) */
132 if (status == NFIT_ARS_STATUS_DONE)
135 /* ARS is in progress */
136 if (status == NFIT_ARS_STATUS_BUSY)
139 /* No ARS performed for the current boot */
140 if (status == NFIT_ARS_STATUS_NONE)
144 * ARS interrupted, either we overflowed or some other
145 * agent wants the scan to stop. If we didn't overflow
146 * then just continue with the returned results.
148 if (status == NFIT_ARS_STATUS_INTR) {
149 if (ars_status->out_length >= 40 && (ars_status->flags
150 & NFIT_ARS_F_OVERFLOW))
159 case ND_CMD_CLEAR_ERROR:
163 if (!clear_err->cleared)
165 if (clear_err->length > clear_err->cleared)
166 return clear_err->cleared;
172 /* all other non-zero status results in an error */
178 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
182 return xlat_bus_status(buf, cmd, status);
188 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
189 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
191 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
192 union acpi_object in_obj, in_buf, *out_obj;
193 const struct nd_cmd_desc *desc = NULL;
194 struct device *dev = acpi_desc->dev;
195 struct nd_cmd_pkg *call_pkg = NULL;
196 const char *cmd_name, *dimm_name;
197 unsigned long cmd_mask, dsm_mask;
198 u32 offset, fw_status = 0;
205 if (cmd == ND_CMD_CALL) {
207 func = call_pkg->nd_command;
211 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
212 struct acpi_device *adev = nfit_mem->adev;
216 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
219 dimm_name = nvdimm_name(nvdimm);
220 cmd_name = nvdimm_cmd_name(cmd);
221 cmd_mask = nvdimm_cmd_mask(nvdimm);
222 dsm_mask = nfit_mem->dsm_mask;
223 desc = nd_cmd_dimm_desc(cmd);
224 uuid = to_nfit_uuid(nfit_mem->family);
225 handle = adev->handle;
227 struct acpi_device *adev = to_acpi_dev(acpi_desc);
229 cmd_name = nvdimm_bus_cmd_name(cmd);
230 cmd_mask = nd_desc->cmd_mask;
232 desc = nd_cmd_bus_desc(cmd);
233 uuid = to_nfit_uuid(NFIT_DEV_BUS);
234 handle = adev->handle;
238 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
241 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
244 in_obj.type = ACPI_TYPE_PACKAGE;
245 in_obj.package.count = 1;
246 in_obj.package.elements = &in_buf;
247 in_buf.type = ACPI_TYPE_BUFFER;
248 in_buf.buffer.pointer = buf;
249 in_buf.buffer.length = 0;
251 /* libnvdimm has already validated the input envelope */
252 for (i = 0; i < desc->in_num; i++)
253 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
257 /* skip over package wrapper */
258 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
259 in_buf.buffer.length = call_pkg->nd_size_in;
262 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
263 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
264 __func__, dimm_name, cmd, func,
265 in_buf.buffer.length);
266 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
267 in_buf.buffer.pointer,
268 min_t(u32, 256, in_buf.buffer.length), true);
271 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
273 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
279 call_pkg->nd_fw_size = out_obj->buffer.length;
280 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
281 out_obj->buffer.pointer,
282 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
286 * Need to support FW function w/o known size in advance.
287 * Caller can determine required size based upon nd_fw_size.
288 * If we return an error (like elsewhere) then caller wouldn't
289 * be able to rely upon data returned to make calculation.
294 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
295 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
296 __func__, dimm_name, cmd_name, out_obj->type);
301 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
302 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
303 dimm_name, cmd_name, out_obj->buffer.length);
304 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
305 4, out_obj->buffer.pointer, min_t(u32, 128,
306 out_obj->buffer.length), true);
309 for (i = 0, offset = 0; i < desc->out_num; i++) {
310 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
311 (u32 *) out_obj->buffer.pointer,
312 out_obj->buffer.length - offset);
314 if (offset + out_size > out_obj->buffer.length) {
315 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
316 __func__, dimm_name, cmd_name, i);
320 if (in_buf.buffer.length + offset + out_size > buf_len) {
321 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
322 __func__, dimm_name, cmd_name, i);
326 memcpy(buf + in_buf.buffer.length + offset,
327 out_obj->buffer.pointer + offset, out_size);
332 * Set fw_status for all the commands with a known format to be
333 * later interpreted by xlat_status().
335 if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
336 || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
337 fw_status = *(u32 *) out_obj->buffer.pointer;
339 if (offset + in_buf.buffer.length < buf_len) {
342 * status valid, return the number of bytes left
343 * unfilled in the output buffer
345 rc = buf_len - offset - in_buf.buffer.length;
347 *cmd_rc = xlat_status(nvdimm, buf, cmd,
350 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
351 __func__, dimm_name, cmd_name, buf_len,
358 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
366 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
368 static const char *spa_type_name(u16 type)
370 static const char *to_name[] = {
371 [NFIT_SPA_VOLATILE] = "volatile",
372 [NFIT_SPA_PM] = "pmem",
373 [NFIT_SPA_DCR] = "dimm-control-region",
374 [NFIT_SPA_BDW] = "block-data-window",
375 [NFIT_SPA_VDISK] = "volatile-disk",
376 [NFIT_SPA_VCD] = "volatile-cd",
377 [NFIT_SPA_PDISK] = "persistent-disk",
378 [NFIT_SPA_PCD] = "persistent-cd",
382 if (type > NFIT_SPA_PCD)
385 return to_name[type];
388 int nfit_spa_type(struct acpi_nfit_system_address *spa)
392 for (i = 0; i < NFIT_UUID_MAX; i++)
393 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
398 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
399 struct nfit_table_prev *prev,
400 struct acpi_nfit_system_address *spa)
402 struct device *dev = acpi_desc->dev;
403 struct nfit_spa *nfit_spa;
405 if (spa->header.length != sizeof(*spa))
408 list_for_each_entry(nfit_spa, &prev->spas, list) {
409 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
410 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
415 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
419 INIT_LIST_HEAD(&nfit_spa->list);
420 memcpy(nfit_spa->spa, spa, sizeof(*spa));
421 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
422 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
424 spa_type_name(nfit_spa_type(spa)));
428 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
429 struct nfit_table_prev *prev,
430 struct acpi_nfit_memory_map *memdev)
432 struct device *dev = acpi_desc->dev;
433 struct nfit_memdev *nfit_memdev;
435 if (memdev->header.length != sizeof(*memdev))
438 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
439 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
440 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
444 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
448 INIT_LIST_HEAD(&nfit_memdev->list);
449 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
450 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
451 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
452 __func__, memdev->device_handle, memdev->range_index,
453 memdev->region_index);
458 * An implementation may provide a truncated control region if no block windows
461 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
463 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
468 return offsetof(struct acpi_nfit_control_region, window_size);
471 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
472 struct nfit_table_prev *prev,
473 struct acpi_nfit_control_region *dcr)
475 struct device *dev = acpi_desc->dev;
476 struct nfit_dcr *nfit_dcr;
478 if (!sizeof_dcr(dcr))
481 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
482 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
483 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
487 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
491 INIT_LIST_HEAD(&nfit_dcr->list);
492 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
493 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
494 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
495 dcr->region_index, dcr->windows);
499 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
500 struct nfit_table_prev *prev,
501 struct acpi_nfit_data_region *bdw)
503 struct device *dev = acpi_desc->dev;
504 struct nfit_bdw *nfit_bdw;
506 if (bdw->header.length != sizeof(*bdw))
508 list_for_each_entry(nfit_bdw, &prev->bdws, list)
509 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
510 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
514 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
518 INIT_LIST_HEAD(&nfit_bdw->list);
519 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
520 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
521 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
522 bdw->region_index, bdw->windows);
526 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
528 if (idt->header.length < sizeof(*idt))
530 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
533 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
534 struct nfit_table_prev *prev,
535 struct acpi_nfit_interleave *idt)
537 struct device *dev = acpi_desc->dev;
538 struct nfit_idt *nfit_idt;
540 if (!sizeof_idt(idt))
543 list_for_each_entry(nfit_idt, &prev->idts, list) {
544 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
547 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
548 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
553 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
557 INIT_LIST_HEAD(&nfit_idt->list);
558 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
559 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
560 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
561 idt->interleave_index, idt->line_count);
565 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
567 if (flush->header.length < sizeof(*flush))
569 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
572 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
573 struct nfit_table_prev *prev,
574 struct acpi_nfit_flush_address *flush)
576 struct device *dev = acpi_desc->dev;
577 struct nfit_flush *nfit_flush;
579 if (!sizeof_flush(flush))
582 list_for_each_entry(nfit_flush, &prev->flushes, list) {
583 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
586 if (memcmp(nfit_flush->flush, flush,
587 sizeof_flush(flush)) == 0) {
588 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
593 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
594 + sizeof_flush(flush), GFP_KERNEL);
597 INIT_LIST_HEAD(&nfit_flush->list);
598 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
599 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
600 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
601 flush->device_handle, flush->hint_count);
605 static void *add_table(struct acpi_nfit_desc *acpi_desc,
606 struct nfit_table_prev *prev, void *table, const void *end)
608 struct device *dev = acpi_desc->dev;
609 struct acpi_nfit_header *hdr;
610 void *err = ERR_PTR(-ENOMEM);
617 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
623 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
624 if (!add_spa(acpi_desc, prev, table))
627 case ACPI_NFIT_TYPE_MEMORY_MAP:
628 if (!add_memdev(acpi_desc, prev, table))
631 case ACPI_NFIT_TYPE_CONTROL_REGION:
632 if (!add_dcr(acpi_desc, prev, table))
635 case ACPI_NFIT_TYPE_DATA_REGION:
636 if (!add_bdw(acpi_desc, prev, table))
639 case ACPI_NFIT_TYPE_INTERLEAVE:
640 if (!add_idt(acpi_desc, prev, table))
643 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
644 if (!add_flush(acpi_desc, prev, table))
647 case ACPI_NFIT_TYPE_SMBIOS:
648 dev_dbg(dev, "%s: smbios\n", __func__);
651 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
655 return table + hdr->length;
658 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
659 struct nfit_mem *nfit_mem)
661 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
662 u16 dcr = nfit_mem->dcr->region_index;
663 struct nfit_spa *nfit_spa;
665 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
666 u16 range_index = nfit_spa->spa->range_index;
667 int type = nfit_spa_type(nfit_spa->spa);
668 struct nfit_memdev *nfit_memdev;
670 if (type != NFIT_SPA_BDW)
673 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
674 if (nfit_memdev->memdev->range_index != range_index)
676 if (nfit_memdev->memdev->device_handle != device_handle)
678 if (nfit_memdev->memdev->region_index != dcr)
681 nfit_mem->spa_bdw = nfit_spa->spa;
686 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
687 nfit_mem->spa_dcr->range_index);
688 nfit_mem->bdw = NULL;
691 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
692 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
694 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
695 struct nfit_memdev *nfit_memdev;
696 struct nfit_bdw *nfit_bdw;
697 struct nfit_idt *nfit_idt;
698 u16 idt_idx, range_index;
700 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
701 if (nfit_bdw->bdw->region_index != dcr)
703 nfit_mem->bdw = nfit_bdw->bdw;
710 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
712 if (!nfit_mem->spa_bdw)
715 range_index = nfit_mem->spa_bdw->range_index;
716 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
717 if (nfit_memdev->memdev->range_index != range_index ||
718 nfit_memdev->memdev->region_index != dcr)
720 nfit_mem->memdev_bdw = nfit_memdev->memdev;
721 idt_idx = nfit_memdev->memdev->interleave_index;
722 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
723 if (nfit_idt->idt->interleave_index != idt_idx)
725 nfit_mem->idt_bdw = nfit_idt->idt;
732 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
733 struct acpi_nfit_system_address *spa)
735 struct nfit_mem *nfit_mem, *found;
736 struct nfit_memdev *nfit_memdev;
737 int type = nfit_spa_type(spa);
747 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
748 struct nfit_flush *nfit_flush;
749 struct nfit_dcr *nfit_dcr;
753 if (nfit_memdev->memdev->range_index != spa->range_index)
756 dcr = nfit_memdev->memdev->region_index;
757 device_handle = nfit_memdev->memdev->device_handle;
758 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
759 if (__to_nfit_memdev(nfit_mem)->device_handle
768 nfit_mem = devm_kzalloc(acpi_desc->dev,
769 sizeof(*nfit_mem), GFP_KERNEL);
772 INIT_LIST_HEAD(&nfit_mem->list);
773 nfit_mem->acpi_desc = acpi_desc;
774 list_add(&nfit_mem->list, &acpi_desc->dimms);
777 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
778 if (nfit_dcr->dcr->region_index != dcr)
781 * Record the control region for the dimm. For
782 * the ACPI 6.1 case, where there are separate
783 * control regions for the pmem vs blk
784 * interfaces, be sure to record the extended
788 nfit_mem->dcr = nfit_dcr->dcr;
789 else if (nfit_mem->dcr->windows == 0
790 && nfit_dcr->dcr->windows)
791 nfit_mem->dcr = nfit_dcr->dcr;
795 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
796 struct acpi_nfit_flush_address *flush;
799 if (nfit_flush->flush->device_handle != device_handle)
801 nfit_mem->nfit_flush = nfit_flush;
802 flush = nfit_flush->flush;
803 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
805 * sizeof(struct resource), GFP_KERNEL);
806 if (!nfit_mem->flush_wpq)
808 for (i = 0; i < flush->hint_count; i++) {
809 struct resource *res = &nfit_mem->flush_wpq[i];
811 res->start = flush->hint_address[i];
812 res->end = res->start + 8 - 1;
817 if (dcr && !nfit_mem->dcr) {
818 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
819 spa->range_index, dcr);
823 if (type == NFIT_SPA_DCR) {
824 struct nfit_idt *nfit_idt;
827 /* multiple dimms may share a SPA when interleaved */
828 nfit_mem->spa_dcr = spa;
829 nfit_mem->memdev_dcr = nfit_memdev->memdev;
830 idt_idx = nfit_memdev->memdev->interleave_index;
831 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
832 if (nfit_idt->idt->interleave_index != idt_idx)
834 nfit_mem->idt_dcr = nfit_idt->idt;
837 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
840 * A single dimm may belong to multiple SPA-PM
841 * ranges, record at least one in addition to
844 nfit_mem->memdev_pmem = nfit_memdev->memdev;
851 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
853 struct nfit_mem *a = container_of(_a, typeof(*a), list);
854 struct nfit_mem *b = container_of(_b, typeof(*b), list);
855 u32 handleA, handleB;
857 handleA = __to_nfit_memdev(a)->device_handle;
858 handleB = __to_nfit_memdev(b)->device_handle;
859 if (handleA < handleB)
861 else if (handleA > handleB)
866 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
868 struct nfit_spa *nfit_spa;
871 * For each SPA-DCR or SPA-PMEM address range find its
872 * corresponding MEMDEV(s). From each MEMDEV find the
873 * corresponding DCR. Then, if we're operating on a SPA-DCR,
874 * try to find a SPA-BDW and a corresponding BDW that references
875 * the DCR. Throw it all into an nfit_mem object. Note, that
878 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
881 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
886 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
891 static ssize_t revision_show(struct device *dev,
892 struct device_attribute *attr, char *buf)
894 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
895 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
896 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
898 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
900 static DEVICE_ATTR_RO(revision);
902 static ssize_t hw_error_scrub_show(struct device *dev,
903 struct device_attribute *attr, char *buf)
905 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
906 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
907 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
909 return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
913 * The 'hw_error_scrub' attribute can have the following values written to it:
914 * '0': Switch to the default mode where an exception will only insert
915 * the address of the memory error into the poison and badblocks lists.
916 * '1': Enable a full scrub to happen if an exception for a memory error is
919 static ssize_t hw_error_scrub_store(struct device *dev,
920 struct device_attribute *attr, const char *buf, size_t size)
922 struct nvdimm_bus_descriptor *nd_desc;
926 rc = kstrtol(buf, 0, &val);
931 nd_desc = dev_get_drvdata(dev);
933 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
936 case HW_ERROR_SCRUB_ON:
937 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
939 case HW_ERROR_SCRUB_OFF:
940 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
952 static DEVICE_ATTR_RW(hw_error_scrub);
955 * This shows the number of full Address Range Scrubs that have been
956 * completed since driver load time. Userspace can wait on this using
957 * select/poll etc. A '+' at the end indicates an ARS is in progress
959 static ssize_t scrub_show(struct device *dev,
960 struct device_attribute *attr, char *buf)
962 struct nvdimm_bus_descriptor *nd_desc;
966 nd_desc = dev_get_drvdata(dev);
968 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
970 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
971 (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
977 static ssize_t scrub_store(struct device *dev,
978 struct device_attribute *attr, const char *buf, size_t size)
980 struct nvdimm_bus_descriptor *nd_desc;
984 rc = kstrtol(buf, 0, &val);
991 nd_desc = dev_get_drvdata(dev);
993 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
995 rc = acpi_nfit_ars_rescan(acpi_desc);
1002 static DEVICE_ATTR_RW(scrub);
1004 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1006 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1007 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1008 | 1 << ND_CMD_ARS_STATUS;
1010 return (nd_desc->cmd_mask & mask) == mask;
1013 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1015 struct device *dev = container_of(kobj, struct device, kobj);
1016 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1018 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1023 static struct attribute *acpi_nfit_attributes[] = {
1024 &dev_attr_revision.attr,
1025 &dev_attr_scrub.attr,
1026 &dev_attr_hw_error_scrub.attr,
1030 static struct attribute_group acpi_nfit_attribute_group = {
1032 .attrs = acpi_nfit_attributes,
1033 .is_visible = nfit_visible,
1036 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1037 &nvdimm_bus_attribute_group,
1038 &acpi_nfit_attribute_group,
1042 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1044 struct nvdimm *nvdimm = to_nvdimm(dev);
1045 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1047 return __to_nfit_memdev(nfit_mem);
1050 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1052 struct nvdimm *nvdimm = to_nvdimm(dev);
1053 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1055 return nfit_mem->dcr;
1058 static ssize_t handle_show(struct device *dev,
1059 struct device_attribute *attr, char *buf)
1061 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1063 return sprintf(buf, "%#x\n", memdev->device_handle);
1065 static DEVICE_ATTR_RO(handle);
1067 static ssize_t phys_id_show(struct device *dev,
1068 struct device_attribute *attr, char *buf)
1070 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1072 return sprintf(buf, "%#x\n", memdev->physical_id);
1074 static DEVICE_ATTR_RO(phys_id);
1076 static ssize_t vendor_show(struct device *dev,
1077 struct device_attribute *attr, char *buf)
1079 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1081 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1083 static DEVICE_ATTR_RO(vendor);
1085 static ssize_t rev_id_show(struct device *dev,
1086 struct device_attribute *attr, char *buf)
1088 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1090 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1092 static DEVICE_ATTR_RO(rev_id);
1094 static ssize_t device_show(struct device *dev,
1095 struct device_attribute *attr, char *buf)
1097 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1099 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1101 static DEVICE_ATTR_RO(device);
1103 static ssize_t subsystem_vendor_show(struct device *dev,
1104 struct device_attribute *attr, char *buf)
1106 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1108 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1110 static DEVICE_ATTR_RO(subsystem_vendor);
1112 static ssize_t subsystem_rev_id_show(struct device *dev,
1113 struct device_attribute *attr, char *buf)
1115 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1117 return sprintf(buf, "0x%04x\n",
1118 be16_to_cpu(dcr->subsystem_revision_id));
1120 static DEVICE_ATTR_RO(subsystem_rev_id);
1122 static ssize_t subsystem_device_show(struct device *dev,
1123 struct device_attribute *attr, char *buf)
1125 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1127 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1129 static DEVICE_ATTR_RO(subsystem_device);
1131 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1133 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1136 if (nfit_mem->memdev_pmem)
1138 if (nfit_mem->memdev_bdw)
1143 static ssize_t format_show(struct device *dev,
1144 struct device_attribute *attr, char *buf)
1146 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1148 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1150 static DEVICE_ATTR_RO(format);
1152 static ssize_t format1_show(struct device *dev,
1153 struct device_attribute *attr, char *buf)
1156 ssize_t rc = -ENXIO;
1157 struct nfit_mem *nfit_mem;
1158 struct nfit_memdev *nfit_memdev;
1159 struct acpi_nfit_desc *acpi_desc;
1160 struct nvdimm *nvdimm = to_nvdimm(dev);
1161 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1163 nfit_mem = nvdimm_provider_data(nvdimm);
1164 acpi_desc = nfit_mem->acpi_desc;
1165 handle = to_nfit_memdev(dev)->device_handle;
1167 /* assumes DIMMs have at most 2 published interface codes */
1168 mutex_lock(&acpi_desc->init_mutex);
1169 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1170 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1171 struct nfit_dcr *nfit_dcr;
1173 if (memdev->device_handle != handle)
1176 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1177 if (nfit_dcr->dcr->region_index != memdev->region_index)
1179 if (nfit_dcr->dcr->code == dcr->code)
1181 rc = sprintf(buf, "0x%04x\n",
1182 le16_to_cpu(nfit_dcr->dcr->code));
1188 mutex_unlock(&acpi_desc->init_mutex);
1191 static DEVICE_ATTR_RO(format1);
1193 static ssize_t formats_show(struct device *dev,
1194 struct device_attribute *attr, char *buf)
1196 struct nvdimm *nvdimm = to_nvdimm(dev);
1198 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1200 static DEVICE_ATTR_RO(formats);
1202 static ssize_t serial_show(struct device *dev,
1203 struct device_attribute *attr, char *buf)
1205 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1207 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1209 static DEVICE_ATTR_RO(serial);
1211 static ssize_t family_show(struct device *dev,
1212 struct device_attribute *attr, char *buf)
1214 struct nvdimm *nvdimm = to_nvdimm(dev);
1215 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1217 if (nfit_mem->family < 0)
1219 return sprintf(buf, "%d\n", nfit_mem->family);
1221 static DEVICE_ATTR_RO(family);
1223 static ssize_t dsm_mask_show(struct device *dev,
1224 struct device_attribute *attr, char *buf)
1226 struct nvdimm *nvdimm = to_nvdimm(dev);
1227 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1229 if (nfit_mem->family < 0)
1231 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1233 static DEVICE_ATTR_RO(dsm_mask);
1235 static ssize_t flags_show(struct device *dev,
1236 struct device_attribute *attr, char *buf)
1238 u16 flags = to_nfit_memdev(dev)->flags;
1240 return sprintf(buf, "%s%s%s%s%s\n",
1241 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1242 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1243 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1244 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1245 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
1247 static DEVICE_ATTR_RO(flags);
1249 static ssize_t id_show(struct device *dev,
1250 struct device_attribute *attr, char *buf)
1252 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1254 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1255 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1256 be16_to_cpu(dcr->vendor_id),
1257 dcr->manufacturing_location,
1258 be16_to_cpu(dcr->manufacturing_date),
1259 be32_to_cpu(dcr->serial_number));
1261 return sprintf(buf, "%04x-%08x\n",
1262 be16_to_cpu(dcr->vendor_id),
1263 be32_to_cpu(dcr->serial_number));
1265 static DEVICE_ATTR_RO(id);
1267 static struct attribute *acpi_nfit_dimm_attributes[] = {
1268 &dev_attr_handle.attr,
1269 &dev_attr_phys_id.attr,
1270 &dev_attr_vendor.attr,
1271 &dev_attr_device.attr,
1272 &dev_attr_rev_id.attr,
1273 &dev_attr_subsystem_vendor.attr,
1274 &dev_attr_subsystem_device.attr,
1275 &dev_attr_subsystem_rev_id.attr,
1276 &dev_attr_format.attr,
1277 &dev_attr_formats.attr,
1278 &dev_attr_format1.attr,
1279 &dev_attr_serial.attr,
1280 &dev_attr_flags.attr,
1282 &dev_attr_family.attr,
1283 &dev_attr_dsm_mask.attr,
1287 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1288 struct attribute *a, int n)
1290 struct device *dev = container_of(kobj, struct device, kobj);
1291 struct nvdimm *nvdimm = to_nvdimm(dev);
1293 if (!to_nfit_dcr(dev))
1295 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1300 static struct attribute_group acpi_nfit_dimm_attribute_group = {
1302 .attrs = acpi_nfit_dimm_attributes,
1303 .is_visible = acpi_nfit_dimm_attr_visible,
1306 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1307 &nvdimm_attribute_group,
1308 &nd_device_attribute_group,
1309 &acpi_nfit_dimm_attribute_group,
1313 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1316 struct nfit_mem *nfit_mem;
1318 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1319 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1320 return nfit_mem->nvdimm;
1325 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1327 struct nfit_mem *nfit_mem;
1328 struct acpi_nfit_desc *acpi_desc;
1330 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
1333 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1334 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1339 acpi_desc = dev_get_drvdata(dev->parent);
1344 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1347 nfit_mem = dev_get_drvdata(dev);
1348 if (nfit_mem && nfit_mem->flags_attr)
1349 sysfs_notify_dirent(nfit_mem->flags_attr);
1351 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1353 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1355 struct acpi_device *adev = data;
1356 struct device *dev = &adev->dev;
1358 device_lock(dev->parent);
1359 __acpi_nvdimm_notify(dev, event);
1360 device_unlock(dev->parent);
1363 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1364 struct nfit_mem *nfit_mem, u32 device_handle)
1366 struct acpi_device *adev, *adev_dimm;
1367 struct device *dev = acpi_desc->dev;
1368 unsigned long dsm_mask;
1372 /* nfit test assumes 1:1 relationship between commands and dsms */
1373 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1374 nfit_mem->family = NVDIMM_FAMILY_INTEL;
1375 adev = to_acpi_dev(acpi_desc);
1379 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1380 nfit_mem->adev = adev_dimm;
1382 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1384 return force_enable_dimms ? 0 : -ENODEV;
1387 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1388 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1389 dev_err(dev, "%s: notification registration failed\n",
1390 dev_name(&adev_dimm->dev));
1395 * Until standardization materializes we need to consider 4
1396 * different command sets. Note, that checking for function0 (bit0)
1397 * tells us if any commands are reachable through this uuid.
1399 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
1400 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1403 /* limit the supported commands to those that are publicly documented */
1404 nfit_mem->family = i;
1405 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1407 if (disable_vendor_specific)
1408 dsm_mask &= ~(1 << ND_CMD_VENDOR);
1409 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1410 dsm_mask = 0x1c3c76;
1411 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1413 if (disable_vendor_specific)
1414 dsm_mask &= ~(1 << 8);
1415 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1416 dsm_mask = 0xffffffff;
1418 dev_dbg(dev, "unknown dimm command family\n");
1419 nfit_mem->family = -1;
1420 /* DSMs are optional, continue loading the driver... */
1424 uuid = to_nfit_uuid(nfit_mem->family);
1425 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1426 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
1427 set_bit(i, &nfit_mem->dsm_mask);
1432 static void shutdown_dimm_notify(void *data)
1434 struct acpi_nfit_desc *acpi_desc = data;
1435 struct nfit_mem *nfit_mem;
1437 mutex_lock(&acpi_desc->init_mutex);
1439 * Clear out the nfit_mem->flags_attr and shut down dimm event
1442 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1443 struct acpi_device *adev_dimm = nfit_mem->adev;
1445 if (nfit_mem->flags_attr) {
1446 sysfs_put(nfit_mem->flags_attr);
1447 nfit_mem->flags_attr = NULL;
1450 acpi_remove_notify_handler(adev_dimm->handle,
1451 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1453 mutex_unlock(&acpi_desc->init_mutex);
1456 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1458 struct nfit_mem *nfit_mem;
1459 int dimm_count = 0, rc;
1460 struct nvdimm *nvdimm;
1462 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1463 struct acpi_nfit_flush_address *flush;
1464 unsigned long flags = 0, cmd_mask;
1468 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1469 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1475 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1476 flags |= NDD_ALIASING;
1478 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1479 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1480 flags |= NDD_UNARMED;
1482 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1487 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1488 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1489 * userspace interface.
1491 cmd_mask = 1UL << ND_CMD_CALL;
1492 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1493 cmd_mask |= nfit_mem->dsm_mask;
1495 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1497 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
1498 acpi_nfit_dimm_attribute_groups,
1499 flags, cmd_mask, flush ? flush->hint_count : 0,
1500 nfit_mem->flush_wpq);
1504 nfit_mem->nvdimm = nvdimm;
1507 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1510 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
1511 nvdimm_name(nvdimm),
1512 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1513 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1514 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
1515 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
1519 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1524 * Now that dimms are successfully registered, and async registration
1525 * is flushed, attempt to enable event notification.
1527 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1528 struct kernfs_node *nfit_kernfs;
1530 nvdimm = nfit_mem->nvdimm;
1531 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
1533 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
1535 sysfs_put(nfit_kernfs);
1536 if (!nfit_mem->flags_attr)
1537 dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
1538 nvdimm_name(nvdimm));
1541 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
1545 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1547 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1548 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1549 struct acpi_device *adev;
1552 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
1553 adev = to_acpi_dev(acpi_desc);
1557 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
1558 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
1559 set_bit(i, &nd_desc->cmd_mask);
1562 static ssize_t range_index_show(struct device *dev,
1563 struct device_attribute *attr, char *buf)
1565 struct nd_region *nd_region = to_nd_region(dev);
1566 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1568 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1570 static DEVICE_ATTR_RO(range_index);
1572 static struct attribute *acpi_nfit_region_attributes[] = {
1573 &dev_attr_range_index.attr,
1577 static struct attribute_group acpi_nfit_region_attribute_group = {
1579 .attrs = acpi_nfit_region_attributes,
1582 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1583 &nd_region_attribute_group,
1584 &nd_mapping_attribute_group,
1585 &nd_device_attribute_group,
1586 &nd_numa_attribute_group,
1587 &acpi_nfit_region_attribute_group,
1591 /* enough info to uniquely specify an interleave set */
1592 struct nfit_set_info {
1593 struct nfit_set_info_map {
1600 static size_t sizeof_nfit_set_info(int num_mappings)
1602 return sizeof(struct nfit_set_info)
1603 + num_mappings * sizeof(struct nfit_set_info_map);
1606 static int cmp_map_compat(const void *m0, const void *m1)
1608 const struct nfit_set_info_map *map0 = m0;
1609 const struct nfit_set_info_map *map1 = m1;
1611 return memcmp(&map0->region_offset, &map1->region_offset,
1615 static int cmp_map(const void *m0, const void *m1)
1617 const struct nfit_set_info_map *map0 = m0;
1618 const struct nfit_set_info_map *map1 = m1;
1620 return map0->region_offset - map1->region_offset;
1623 /* Retrieve the nth entry referencing this spa */
1624 static struct acpi_nfit_memory_map *memdev_from_spa(
1625 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1627 struct nfit_memdev *nfit_memdev;
1629 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1630 if (nfit_memdev->memdev->range_index == range_index)
1632 return nfit_memdev->memdev;
1636 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1637 struct nd_region_desc *ndr_desc,
1638 struct acpi_nfit_system_address *spa)
1640 int i, spa_type = nfit_spa_type(spa);
1641 struct device *dev = acpi_desc->dev;
1642 struct nd_interleave_set *nd_set;
1643 u16 nr = ndr_desc->num_mappings;
1644 struct nfit_set_info *info;
1646 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1651 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1655 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1658 for (i = 0; i < nr; i++) {
1659 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1660 struct nfit_set_info_map *map = &info->mapping[i];
1661 struct nvdimm *nvdimm = mapping->nvdimm;
1662 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1663 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1664 spa->range_index, i);
1666 if (!memdev || !nfit_mem->dcr) {
1667 dev_err(dev, "%s: failed to find DCR\n", __func__);
1671 map->region_offset = memdev->region_offset;
1672 map->serial_number = nfit_mem->dcr->serial_number;
1675 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1677 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1679 /* support namespaces created with the wrong sort order */
1680 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1681 cmp_map_compat, NULL);
1682 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1684 ndr_desc->nd_set = nd_set;
1685 devm_kfree(dev, info);
1690 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1692 struct acpi_nfit_interleave *idt = mmio->idt;
1693 u32 sub_line_offset, line_index, line_offset;
1694 u64 line_no, table_skip_count, table_offset;
1696 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1697 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1698 line_offset = idt->line_offset[line_index]
1700 table_offset = table_skip_count * mmio->table_size;
1702 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1705 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
1707 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1708 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1709 const u32 STATUS_MASK = 0x80000037;
1711 if (mmio->num_lines)
1712 offset = to_interleave_offset(offset, mmio);
1714 return readl(mmio->addr.base + offset) & STATUS_MASK;
1717 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1718 resource_size_t dpa, unsigned int len, unsigned int write)
1721 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1724 BCW_OFFSET_MASK = (1ULL << 48)-1,
1726 BCW_LEN_MASK = (1ULL << 8) - 1,
1730 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1731 len = len >> L1_CACHE_SHIFT;
1732 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1733 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1735 offset = nfit_blk->cmd_offset + mmio->size * bw;
1736 if (mmio->num_lines)
1737 offset = to_interleave_offset(offset, mmio);
1739 writeq(cmd, mmio->addr.base + offset);
1740 nvdimm_flush(nfit_blk->nd_region);
1742 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
1743 readq(mmio->addr.base + offset);
1746 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1747 resource_size_t dpa, void *iobuf, size_t len, int rw,
1750 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1751 unsigned int copied = 0;
1755 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1756 + lane * mmio->size;
1757 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1762 if (mmio->num_lines) {
1765 offset = to_interleave_offset(base_offset + copied,
1767 div_u64_rem(offset, mmio->line_size, &line_offset);
1768 c = min_t(size_t, len, mmio->line_size - line_offset);
1770 offset = base_offset + nfit_blk->bdw_offset;
1775 memcpy_to_pmem(mmio->addr.aperture + offset,
1778 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
1779 mmio_flush_range((void __force *)
1780 mmio->addr.aperture + offset, c);
1782 memcpy_from_pmem(iobuf + copied,
1783 mmio->addr.aperture + offset, c);
1791 nvdimm_flush(nfit_blk->nd_region);
1793 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1797 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1798 resource_size_t dpa, void *iobuf, u64 len, int rw)
1800 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1801 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1802 struct nd_region *nd_region = nfit_blk->nd_region;
1803 unsigned int lane, copied = 0;
1806 lane = nd_region_acquire_lane(nd_region);
1808 u64 c = min(len, mmio->size);
1810 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1811 iobuf + copied, c, rw, lane);
1818 nd_region_release_lane(nd_region, lane);
1823 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1824 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1827 mmio->num_lines = idt->line_count;
1828 mmio->line_size = idt->line_size;
1829 if (interleave_ways == 0)
1831 mmio->table_size = mmio->num_lines * interleave_ways
1838 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1839 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1841 struct nd_cmd_dimm_flags flags;
1844 memset(&flags, 0, sizeof(flags));
1845 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
1846 sizeof(flags), NULL);
1848 if (rc >= 0 && flags.status == 0)
1849 nfit_blk->dimm_flags = flags.flags;
1850 else if (rc == -ENOTTY) {
1851 /* fall back to a conservative default */
1852 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
1860 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1863 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1864 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1865 struct nfit_blk_mmio *mmio;
1866 struct nfit_blk *nfit_blk;
1867 struct nfit_mem *nfit_mem;
1868 struct nvdimm *nvdimm;
1871 nvdimm = nd_blk_region_to_dimm(ndbr);
1872 nfit_mem = nvdimm_provider_data(nvdimm);
1873 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1874 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1875 nfit_mem ? "" : " nfit_mem",
1876 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1877 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
1881 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1884 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1885 nfit_blk->nd_region = to_nd_region(dev);
1887 /* map block aperture memory */
1888 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1889 mmio = &nfit_blk->mmio[BDW];
1890 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
1891 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
1892 if (!mmio->addr.base) {
1893 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1894 nvdimm_name(nvdimm));
1897 mmio->size = nfit_mem->bdw->size;
1898 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1899 mmio->idt = nfit_mem->idt_bdw;
1900 mmio->spa = nfit_mem->spa_bdw;
1901 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1902 nfit_mem->memdev_bdw->interleave_ways);
1904 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1905 __func__, nvdimm_name(nvdimm));
1909 /* map block control memory */
1910 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1911 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1912 mmio = &nfit_blk->mmio[DCR];
1913 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
1914 nfit_mem->spa_dcr->length);
1915 if (!mmio->addr.base) {
1916 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1917 nvdimm_name(nvdimm));
1920 mmio->size = nfit_mem->dcr->window_size;
1921 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1922 mmio->idt = nfit_mem->idt_dcr;
1923 mmio->spa = nfit_mem->spa_dcr;
1924 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1925 nfit_mem->memdev_dcr->interleave_ways);
1927 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1928 __func__, nvdimm_name(nvdimm));
1932 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1934 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1935 __func__, nvdimm_name(nvdimm));
1939 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
1940 dev_warn(dev, "unable to guarantee persistence of writes\n");
1942 if (mmio->line_size == 0)
1945 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1946 + 8 > mmio->line_size) {
1947 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1949 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1950 + 8 > mmio->line_size) {
1951 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1958 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1959 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
1961 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1962 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1965 cmd->address = spa->address;
1966 cmd->length = spa->length;
1967 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1968 sizeof(*cmd), &cmd_rc);
1974 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
1978 struct nd_cmd_ars_start ars_start;
1979 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1980 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1982 memset(&ars_start, 0, sizeof(ars_start));
1983 ars_start.address = spa->address;
1984 ars_start.length = spa->length;
1985 if (nfit_spa_type(spa) == NFIT_SPA_PM)
1986 ars_start.type = ND_ARS_PERSISTENT;
1987 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1988 ars_start.type = ND_ARS_VOLATILE;
1992 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1993 sizeof(ars_start), &cmd_rc);
2000 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2003 struct nd_cmd_ars_start ars_start;
2004 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2005 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2007 memset(&ars_start, 0, sizeof(ars_start));
2008 ars_start.address = ars_status->restart_address;
2009 ars_start.length = ars_status->restart_length;
2010 ars_start.type = ars_status->type;
2011 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2012 sizeof(ars_start), &cmd_rc);
2018 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2020 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2021 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2024 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2025 acpi_desc->ars_status_size, &cmd_rc);
2031 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
2032 struct nd_cmd_ars_status *ars_status)
2034 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2039 * First record starts at 44 byte offset from the start of the
2042 if (ars_status->out_length < 44)
2044 for (i = 0; i < ars_status->num_records; i++) {
2045 /* only process full records */
2046 if (ars_status->out_length
2047 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2049 rc = nvdimm_bus_add_poison(nvdimm_bus,
2050 ars_status->records[i].err_address,
2051 ars_status->records[i].length);
2055 if (i < ars_status->num_records)
2056 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2061 static void acpi_nfit_remove_resource(void *data)
2063 struct resource *res = data;
2065 remove_resource(res);
2068 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2069 struct nd_region_desc *ndr_desc)
2071 struct resource *res, *nd_res = ndr_desc->res;
2074 /* No operation if the region is already registered as PMEM */
2075 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2076 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2077 if (is_pmem == REGION_INTERSECTS)
2080 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2084 res->name = "Persistent Memory";
2085 res->start = nd_res->start;
2086 res->end = nd_res->end;
2087 res->flags = IORESOURCE_MEM;
2088 res->desc = IORES_DESC_PERSISTENT_MEMORY;
2090 ret = insert_resource(&iomem_resource, res);
2094 ret = devm_add_action_or_reset(acpi_desc->dev,
2095 acpi_nfit_remove_resource,
2103 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2104 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2105 struct acpi_nfit_memory_map *memdev,
2106 struct nfit_spa *nfit_spa)
2108 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2109 memdev->device_handle);
2110 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2111 struct nd_blk_region_desc *ndbr_desc;
2112 struct nfit_mem *nfit_mem;
2116 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2117 spa->range_index, memdev->device_handle);
2121 mapping->nvdimm = nvdimm;
2122 switch (nfit_spa_type(spa)) {
2124 case NFIT_SPA_VOLATILE:
2125 mapping->start = memdev->address;
2126 mapping->size = memdev->region_size;
2129 nfit_mem = nvdimm_provider_data(nvdimm);
2130 if (!nfit_mem || !nfit_mem->bdw) {
2131 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2132 spa->range_index, nvdimm_name(nvdimm));
2134 mapping->size = nfit_mem->bdw->capacity;
2135 mapping->start = nfit_mem->bdw->start_address;
2136 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2140 ndr_desc->mapping = mapping;
2141 ndr_desc->num_mappings = blk_valid;
2142 ndbr_desc = to_blk_region_desc(ndr_desc);
2143 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2144 ndbr_desc->do_io = acpi_desc->blk_do_io;
2145 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2147 if (!nfit_spa->nd_region)
2155 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2157 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2158 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2159 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2160 nfit_spa_type(spa) == NFIT_SPA_PCD);
2163 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2164 struct nfit_spa *nfit_spa)
2166 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2167 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2168 struct nd_blk_region_desc ndbr_desc;
2169 struct nd_region_desc *ndr_desc;
2170 struct nfit_memdev *nfit_memdev;
2171 struct nvdimm_bus *nvdimm_bus;
2172 struct resource res;
2175 if (nfit_spa->nd_region)
2178 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2179 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2184 memset(&res, 0, sizeof(res));
2185 memset(&mappings, 0, sizeof(mappings));
2186 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2187 res.start = spa->address;
2188 res.end = res.start + spa->length - 1;
2189 ndr_desc = &ndbr_desc.ndr_desc;
2190 ndr_desc->res = &res;
2191 ndr_desc->provider_data = nfit_spa;
2192 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2193 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2194 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2195 spa->proximity_domain);
2197 ndr_desc->numa_node = NUMA_NO_NODE;
2199 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2200 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2201 struct nd_mapping_desc *mapping;
2203 if (memdev->range_index != spa->range_index)
2205 if (count >= ND_MAX_MAPPINGS) {
2206 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2207 spa->range_index, ND_MAX_MAPPINGS);
2210 mapping = &mappings[count++];
2211 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2217 ndr_desc->mapping = mappings;
2218 ndr_desc->num_mappings = count;
2219 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2223 nvdimm_bus = acpi_desc->nvdimm_bus;
2224 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2225 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2227 dev_warn(acpi_desc->dev,
2228 "failed to insert pmem resource to iomem: %d\n",
2233 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2235 if (!nfit_spa->nd_region)
2237 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
2238 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2240 if (!nfit_spa->nd_region)
2242 } else if (nfit_spa_is_virtual(spa)) {
2243 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2245 if (!nfit_spa->nd_region)
2251 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2252 nfit_spa->spa->range_index);
2256 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2259 struct device *dev = acpi_desc->dev;
2260 struct nd_cmd_ars_status *ars_status;
2262 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2263 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2267 if (acpi_desc->ars_status)
2268 devm_kfree(dev, acpi_desc->ars_status);
2269 acpi_desc->ars_status = NULL;
2270 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2273 acpi_desc->ars_status = ars_status;
2274 acpi_desc->ars_status_size = max_ars;
2278 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2279 struct nfit_spa *nfit_spa)
2281 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2284 if (!nfit_spa->max_ars) {
2285 struct nd_cmd_ars_cap ars_cap;
2287 memset(&ars_cap, 0, sizeof(ars_cap));
2288 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2291 nfit_spa->max_ars = ars_cap.max_ars_out;
2292 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2293 /* check that the supported scrub types match the spa type */
2294 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2295 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2297 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2298 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2302 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2305 rc = ars_get_status(acpi_desc);
2306 if (rc < 0 && rc != -ENOSPC)
2309 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
2315 static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2316 struct nfit_spa *nfit_spa)
2318 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2319 unsigned int overflow_retry = scrub_overflow_abort;
2320 u64 init_ars_start = 0, init_ars_len = 0;
2321 struct device *dev = acpi_desc->dev;
2322 unsigned int tmo = scrub_timeout;
2325 if (!nfit_spa->ars_required || !nfit_spa->nd_region)
2328 rc = ars_start(acpi_desc, nfit_spa);
2330 * If we timed out the initial scan we'll still be busy here,
2331 * and will wait another timeout before giving up permanently.
2333 if (rc < 0 && rc != -EBUSY)
2337 u64 ars_start, ars_len;
2339 if (acpi_desc->cancel)
2341 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2344 if (rc == -EBUSY && !tmo) {
2345 dev_warn(dev, "range %d ars timeout, aborting\n",
2352 * Note, entries may be appended to the list
2353 * while the lock is dropped, but the workqueue
2354 * being active prevents entries being deleted /
2357 mutex_unlock(&acpi_desc->init_mutex);
2360 mutex_lock(&acpi_desc->init_mutex);
2364 /* we got some results, but there are more pending... */
2365 if (rc == -ENOSPC && overflow_retry--) {
2366 if (!init_ars_len) {
2367 init_ars_len = acpi_desc->ars_status->length;
2368 init_ars_start = acpi_desc->ars_status->address;
2370 rc = ars_continue(acpi_desc);
2374 dev_warn(dev, "range %d ars continuation failed\n",
2380 ars_start = init_ars_start;
2381 ars_len = init_ars_len;
2383 ars_start = acpi_desc->ars_status->address;
2384 ars_len = acpi_desc->ars_status->length;
2386 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2387 spa->range_index, ars_start, ars_len);
2388 /* notify the region about new poison entries */
2389 nvdimm_region_notify(nfit_spa->nd_region,
2390 NVDIMM_REVALIDATE_POISON);
2395 static void acpi_nfit_scrub(struct work_struct *work)
2398 u64 init_scrub_length = 0;
2399 struct nfit_spa *nfit_spa;
2400 u64 init_scrub_address = 0;
2401 bool init_ars_done = false;
2402 struct acpi_nfit_desc *acpi_desc;
2403 unsigned int tmo = scrub_timeout;
2404 unsigned int overflow_retry = scrub_overflow_abort;
2406 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2407 dev = acpi_desc->dev;
2410 * We scrub in 2 phases. The first phase waits for any platform
2411 * firmware initiated scrubs to complete and then we go search for the
2412 * affected spa regions to mark them scanned. In the second phase we
2413 * initiate a directed scrub for every range that was not scrubbed in
2414 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2415 * the first phase, but really only care about running phase 2, where
2416 * regions can be notified of new poison.
2419 /* process platform firmware initiated scrubs */
2421 mutex_lock(&acpi_desc->init_mutex);
2422 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2423 struct nd_cmd_ars_status *ars_status;
2424 struct acpi_nfit_system_address *spa;
2425 u64 ars_start, ars_len;
2428 if (acpi_desc->cancel)
2431 if (nfit_spa->nd_region)
2434 if (init_ars_done) {
2436 * No need to re-query, we're now just
2437 * reconciling all the ranges covered by the
2442 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2444 if (rc == -ENOTTY) {
2445 /* no ars capability, just register spa and move on */
2446 acpi_nfit_register_region(acpi_desc, nfit_spa);
2450 if (rc == -EBUSY && !tmo) {
2451 /* fallthrough to directed scrub in phase 2 */
2452 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2454 } else if (rc == -EBUSY) {
2455 mutex_unlock(&acpi_desc->init_mutex);
2461 /* we got some results, but there are more pending... */
2462 if (rc == -ENOSPC && overflow_retry--) {
2463 ars_status = acpi_desc->ars_status;
2465 * Record the original scrub range, so that we
2466 * can recall all the ranges impacted by the
2469 if (!init_scrub_length) {
2470 init_scrub_length = ars_status->length;
2471 init_scrub_address = ars_status->address;
2473 rc = ars_continue(acpi_desc);
2475 mutex_unlock(&acpi_desc->init_mutex);
2482 * Initial scrub failed, we'll give it one more
2488 /* We got some final results, record completed ranges */
2489 ars_status = acpi_desc->ars_status;
2490 if (init_scrub_length) {
2491 ars_start = init_scrub_address;
2492 ars_len = ars_start + init_scrub_length;
2494 ars_start = ars_status->address;
2495 ars_len = ars_status->length;
2497 spa = nfit_spa->spa;
2499 if (!init_ars_done) {
2500 init_ars_done = true;
2501 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2502 ars_start, ars_len);
2504 if (ars_start <= spa->address && ars_start + ars_len
2505 >= spa->address + spa->length)
2506 acpi_nfit_register_region(acpi_desc, nfit_spa);
2510 * For all the ranges not covered by an initial scrub we still
2511 * want to see if there are errors, but it's ok to discover them
2514 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2516 * Flag all the ranges that still need scrubbing, but
2517 * register them now to make data available.
2519 if (!nfit_spa->nd_region) {
2520 nfit_spa->ars_required = 1;
2521 acpi_nfit_register_region(acpi_desc, nfit_spa);
2525 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2526 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2527 acpi_desc->scrub_count++;
2528 if (acpi_desc->scrub_count_state)
2529 sysfs_notify_dirent(acpi_desc->scrub_count_state);
2530 mutex_unlock(&acpi_desc->init_mutex);
2533 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2535 struct nfit_spa *nfit_spa;
2538 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2539 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2540 /* BLK regions don't need to wait for ars results */
2541 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2546 queue_work(nfit_wq, &acpi_desc->work);
2550 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2551 struct nfit_table_prev *prev)
2553 struct device *dev = acpi_desc->dev;
2555 if (!list_empty(&prev->spas) ||
2556 !list_empty(&prev->memdevs) ||
2557 !list_empty(&prev->dcrs) ||
2558 !list_empty(&prev->bdws) ||
2559 !list_empty(&prev->idts) ||
2560 !list_empty(&prev->flushes)) {
2561 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2567 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
2569 struct device *dev = acpi_desc->dev;
2570 struct kernfs_node *nfit;
2571 struct device *bus_dev;
2573 if (!ars_supported(acpi_desc->nvdimm_bus))
2576 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2577 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
2579 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
2582 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
2584 if (!acpi_desc->scrub_count_state) {
2585 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
2592 static void acpi_nfit_destruct(void *data)
2594 struct acpi_nfit_desc *acpi_desc = data;
2595 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2598 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2601 mutex_lock(&acpi_desc_lock);
2602 acpi_desc->cancel = 1;
2604 * Bounce the nvdimm bus lock to make sure any in-flight
2605 * acpi_nfit_ars_rescan() submissions have had a chance to
2606 * either submit or see ->cancel set.
2608 device_lock(bus_dev);
2609 device_unlock(bus_dev);
2611 flush_workqueue(nfit_wq);
2612 if (acpi_desc->scrub_count_state)
2613 sysfs_put(acpi_desc->scrub_count_state);
2614 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2615 acpi_desc->nvdimm_bus = NULL;
2616 list_del(&acpi_desc->list);
2617 mutex_unlock(&acpi_desc_lock);
2620 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
2622 struct device *dev = acpi_desc->dev;
2623 struct nfit_table_prev prev;
2627 if (!acpi_desc->nvdimm_bus) {
2628 acpi_nfit_init_dsms(acpi_desc);
2630 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
2631 &acpi_desc->nd_desc);
2632 if (!acpi_desc->nvdimm_bus)
2635 rc = devm_add_action_or_reset(dev, acpi_nfit_destruct,
2640 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
2644 /* register this acpi_desc for mce notifications */
2645 mutex_lock(&acpi_desc_lock);
2646 list_add_tail(&acpi_desc->list, &acpi_descs);
2647 mutex_unlock(&acpi_desc_lock);
2650 mutex_lock(&acpi_desc->init_mutex);
2652 INIT_LIST_HEAD(&prev.spas);
2653 INIT_LIST_HEAD(&prev.memdevs);
2654 INIT_LIST_HEAD(&prev.dcrs);
2655 INIT_LIST_HEAD(&prev.bdws);
2656 INIT_LIST_HEAD(&prev.idts);
2657 INIT_LIST_HEAD(&prev.flushes);
2659 list_cut_position(&prev.spas, &acpi_desc->spas,
2660 acpi_desc->spas.prev);
2661 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2662 acpi_desc->memdevs.prev);
2663 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2664 acpi_desc->dcrs.prev);
2665 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2666 acpi_desc->bdws.prev);
2667 list_cut_position(&prev.idts, &acpi_desc->idts,
2668 acpi_desc->idts.prev);
2669 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2670 acpi_desc->flushes.prev);
2673 while (!IS_ERR_OR_NULL(data))
2674 data = add_table(acpi_desc, &prev, data, end);
2677 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2683 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2687 rc = nfit_mem_init(acpi_desc);
2691 rc = acpi_nfit_register_dimms(acpi_desc);
2695 rc = acpi_nfit_register_regions(acpi_desc);
2698 mutex_unlock(&acpi_desc->init_mutex);
2701 EXPORT_SYMBOL_GPL(acpi_nfit_init);
2703 struct acpi_nfit_flush_work {
2704 struct work_struct work;
2705 struct completion cmp;
2708 static void flush_probe(struct work_struct *work)
2710 struct acpi_nfit_flush_work *flush;
2712 flush = container_of(work, typeof(*flush), work);
2713 complete(&flush->cmp);
2716 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2718 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2719 struct device *dev = acpi_desc->dev;
2720 struct acpi_nfit_flush_work flush;
2723 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2728 * Scrub work could take 10s of seconds, userspace may give up so we
2729 * need to be interruptible while waiting.
2731 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2732 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2733 queue_work(nfit_wq, &flush.work);
2735 rc = wait_for_completion_interruptible(&flush.cmp);
2736 cancel_work_sync(&flush.work);
2740 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2741 struct nvdimm *nvdimm, unsigned int cmd)
2743 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2747 if (cmd != ND_CMD_ARS_START)
2751 * The kernel and userspace may race to initiate a scrub, but
2752 * the scrub thread is prepared to lose that initial race. It
2753 * just needs guarantees that any ars it initiates are not
2754 * interrupted by any intervening start reqeusts from userspace.
2756 if (work_busy(&acpi_desc->work))
2762 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
2764 struct device *dev = acpi_desc->dev;
2765 struct nfit_spa *nfit_spa;
2767 if (work_busy(&acpi_desc->work))
2770 if (acpi_desc->cancel)
2773 mutex_lock(&acpi_desc->init_mutex);
2774 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2775 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2777 if (nfit_spa_type(spa) != NFIT_SPA_PM)
2780 nfit_spa->ars_required = 1;
2782 queue_work(nfit_wq, &acpi_desc->work);
2783 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
2784 mutex_unlock(&acpi_desc->init_mutex);
2789 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2791 struct nvdimm_bus_descriptor *nd_desc;
2793 dev_set_drvdata(dev, acpi_desc);
2794 acpi_desc->dev = dev;
2795 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2796 nd_desc = &acpi_desc->nd_desc;
2797 nd_desc->provider_name = "ACPI.NFIT";
2798 nd_desc->module = THIS_MODULE;
2799 nd_desc->ndctl = acpi_nfit_ctl;
2800 nd_desc->flush_probe = acpi_nfit_flush_probe;
2801 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
2802 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2804 INIT_LIST_HEAD(&acpi_desc->spas);
2805 INIT_LIST_HEAD(&acpi_desc->dcrs);
2806 INIT_LIST_HEAD(&acpi_desc->bdws);
2807 INIT_LIST_HEAD(&acpi_desc->idts);
2808 INIT_LIST_HEAD(&acpi_desc->flushes);
2809 INIT_LIST_HEAD(&acpi_desc->memdevs);
2810 INIT_LIST_HEAD(&acpi_desc->dimms);
2811 INIT_LIST_HEAD(&acpi_desc->list);
2812 mutex_init(&acpi_desc->init_mutex);
2813 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
2815 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
2817 static int acpi_nfit_add(struct acpi_device *adev)
2819 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2820 struct acpi_nfit_desc *acpi_desc;
2821 struct device *dev = &adev->dev;
2822 struct acpi_table_header *tbl;
2823 acpi_status status = AE_OK;
2827 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
2828 if (ACPI_FAILURE(status)) {
2829 /* This is ok, we could have an nvdimm hotplugged later */
2830 dev_dbg(dev, "failed to find NFIT at startup\n");
2835 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2838 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2840 /* Save the acpi header for exporting the revision via sysfs */
2841 acpi_desc->acpi_header = *tbl;
2843 /* Evaluate _FIT and override with that if present */
2844 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2845 if (ACPI_SUCCESS(status) && buf.length > 0) {
2846 union acpi_object *obj = buf.pointer;
2848 if (obj->type == ACPI_TYPE_BUFFER)
2849 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2850 obj->buffer.length);
2852 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2853 __func__, (int) obj->type);
2856 /* skip over the lead-in header table */
2857 rc = acpi_nfit_init(acpi_desc, (void *) tbl
2858 + sizeof(struct acpi_table_nfit),
2859 sz - sizeof(struct acpi_table_nfit));
2863 static int acpi_nfit_remove(struct acpi_device *adev)
2865 /* see acpi_nfit_destruct */
2869 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
2871 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
2872 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2873 union acpi_object *obj;
2877 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2879 if (event != NFIT_NOTIFY_UPDATE)
2883 /* dev->driver may be null if we're being removed */
2884 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
2889 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2892 acpi_nfit_desc_init(acpi_desc, dev);
2895 * Finish previous registration before considering new
2898 flush_workqueue(nfit_wq);
2902 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
2903 if (ACPI_FAILURE(status)) {
2904 dev_err(dev, "failed to evaluate _FIT\n");
2909 if (obj->type == ACPI_TYPE_BUFFER) {
2910 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2911 obj->buffer.length);
2913 dev_err(dev, "failed to merge updated NFIT\n");
2915 dev_err(dev, "Invalid _FIT\n");
2918 EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
2920 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2922 device_lock(&adev->dev);
2923 __acpi_nfit_notify(&adev->dev, adev->handle, event);
2924 device_unlock(&adev->dev);
2927 static const struct acpi_device_id acpi_nfit_ids[] = {
2931 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2933 static struct acpi_driver acpi_nfit_driver = {
2934 .name = KBUILD_MODNAME,
2935 .ids = acpi_nfit_ids,
2937 .add = acpi_nfit_add,
2938 .remove = acpi_nfit_remove,
2939 .notify = acpi_nfit_notify,
2943 static __init int nfit_init(void)
2945 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2946 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2947 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2948 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2949 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2950 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2951 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2953 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2954 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2955 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2956 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2957 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2958 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2959 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2960 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2961 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2962 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
2963 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
2964 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
2965 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
2967 nfit_wq = create_singlethread_workqueue("nfit");
2971 nfit_mce_register();
2973 return acpi_bus_register_driver(&acpi_nfit_driver);
2976 static __exit void nfit_exit(void)
2978 nfit_mce_unregister();
2979 acpi_bus_unregister_driver(&acpi_nfit_driver);
2980 destroy_workqueue(nfit_wq);
2981 WARN_ON(!list_empty(&acpi_descs));
2984 module_init(nfit_init);
2985 module_exit(nfit_exit);
2986 MODULE_LICENSE("GPL v2");
2987 MODULE_AUTHOR("Intel Corporation");