]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/nvdimm/dimm_devs.c
scsi: zero per-cmd private driver data for each MQ I/O
[karo-tx-linux.git] / drivers / nvdimm / dimm_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include "nd-core.h"
22 #include "label.h"
23 #include "nd.h"
24
25 static DEFINE_IDA(dimm_ida);
26
27 /*
28  * Retrieve bus and dimm handle and return if this bus supports
29  * get_config_data commands
30  */
31 int nvdimm_check_config_data(struct device *dev)
32 {
33         struct nvdimm *nvdimm = to_nvdimm(dev);
34
35         if (!nvdimm->cmd_mask ||
36             !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
37                 if (test_bit(NDD_ALIASING, &nvdimm->flags))
38                         return -ENXIO;
39                 else
40                         return -ENOTTY;
41         }
42
43         return 0;
44 }
45
46 static int validate_dimm(struct nvdimm_drvdata *ndd)
47 {
48         int rc;
49
50         if (!ndd)
51                 return -EINVAL;
52
53         rc = nvdimm_check_config_data(ndd->dev);
54         if (rc)
55                 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
56                                 __builtin_return_address(0), __func__, rc);
57         return rc;
58 }
59
60 /**
61  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
62  * @nvdimm: dimm to initialize
63  */
64 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
65 {
66         struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
67         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
68         struct nvdimm_bus_descriptor *nd_desc;
69         int rc = validate_dimm(ndd);
70         int cmd_rc = 0;
71
72         if (rc)
73                 return rc;
74
75         if (cmd->config_size)
76                 return 0; /* already valid */
77
78         memset(cmd, 0, sizeof(*cmd));
79         nd_desc = nvdimm_bus->nd_desc;
80         rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
81                         ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
82         if (rc < 0)
83                 return rc;
84         return cmd_rc;
85 }
86
87 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
88 {
89         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
90         struct nd_cmd_get_config_data_hdr *cmd;
91         struct nvdimm_bus_descriptor *nd_desc;
92         int rc = validate_dimm(ndd);
93         u32 max_cmd_size, config_size;
94         size_t offset;
95
96         if (rc)
97                 return rc;
98
99         if (ndd->data)
100                 return 0;
101
102         if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
103                         || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
104                 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
105                                 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
106                 return -ENXIO;
107         }
108
109         ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
110         if (!ndd->data)
111                 ndd->data = vmalloc(ndd->nsarea.config_size);
112
113         if (!ndd->data)
114                 return -ENOMEM;
115
116         max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
117         cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
118         if (!cmd)
119                 return -ENOMEM;
120
121         nd_desc = nvdimm_bus->nd_desc;
122         for (config_size = ndd->nsarea.config_size, offset = 0;
123                         config_size; config_size -= cmd->in_length,
124                         offset += cmd->in_length) {
125                 cmd->in_length = min(config_size, max_cmd_size);
126                 cmd->in_offset = offset;
127                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
128                                 ND_CMD_GET_CONFIG_DATA, cmd,
129                                 cmd->in_length + sizeof(*cmd), NULL);
130                 if (rc || cmd->status) {
131                         rc = -ENXIO;
132                         break;
133                 }
134                 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
135         }
136         dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
137         kfree(cmd);
138
139         return rc;
140 }
141
142 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
143                 void *buf, size_t len)
144 {
145         int rc = validate_dimm(ndd);
146         size_t max_cmd_size, buf_offset;
147         struct nd_cmd_set_config_hdr *cmd;
148         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
149         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
150
151         if (rc)
152                 return rc;
153
154         if (!ndd->data)
155                 return -ENXIO;
156
157         if (offset + len > ndd->nsarea.config_size)
158                 return -ENXIO;
159
160         max_cmd_size = min_t(u32, PAGE_SIZE, len);
161         max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
162         cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
163         if (!cmd)
164                 return -ENOMEM;
165
166         for (buf_offset = 0; len; len -= cmd->in_length,
167                         buf_offset += cmd->in_length) {
168                 size_t cmd_size;
169                 u32 *status;
170
171                 cmd->in_offset = offset + buf_offset;
172                 cmd->in_length = min(max_cmd_size, len);
173                 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
174
175                 /* status is output in the last 4-bytes of the command buffer */
176                 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
177                 status = ((void *) cmd) + cmd_size - sizeof(u32);
178
179                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
180                                 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
181                 if (rc || *status) {
182                         rc = rc ? rc : -ENXIO;
183                         break;
184                 }
185         }
186         kfree(cmd);
187
188         return rc;
189 }
190
191 void nvdimm_set_aliasing(struct device *dev)
192 {
193         struct nvdimm *nvdimm = to_nvdimm(dev);
194
195         set_bit(NDD_ALIASING, &nvdimm->flags);
196 }
197
198 void nvdimm_set_locked(struct device *dev)
199 {
200         struct nvdimm *nvdimm = to_nvdimm(dev);
201
202         set_bit(NDD_LOCKED, &nvdimm->flags);
203 }
204
205 static void nvdimm_release(struct device *dev)
206 {
207         struct nvdimm *nvdimm = to_nvdimm(dev);
208
209         ida_simple_remove(&dimm_ida, nvdimm->id);
210         kfree(nvdimm);
211 }
212
213 static struct device_type nvdimm_device_type = {
214         .name = "nvdimm",
215         .release = nvdimm_release,
216 };
217
218 bool is_nvdimm(struct device *dev)
219 {
220         return dev->type == &nvdimm_device_type;
221 }
222
223 struct nvdimm *to_nvdimm(struct device *dev)
224 {
225         struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
226
227         WARN_ON(!is_nvdimm(dev));
228         return nvdimm;
229 }
230 EXPORT_SYMBOL_GPL(to_nvdimm);
231
232 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
233 {
234         struct nd_region *nd_region = &ndbr->nd_region;
235         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
236
237         return nd_mapping->nvdimm;
238 }
239 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
240
241 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
242 {
243         struct nvdimm *nvdimm = nd_mapping->nvdimm;
244
245         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
246
247         return dev_get_drvdata(&nvdimm->dev);
248 }
249 EXPORT_SYMBOL(to_ndd);
250
251 void nvdimm_drvdata_release(struct kref *kref)
252 {
253         struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
254         struct device *dev = ndd->dev;
255         struct resource *res, *_r;
256
257         dev_dbg(dev, "%s\n", __func__);
258
259         nvdimm_bus_lock(dev);
260         for_each_dpa_resource_safe(ndd, res, _r)
261                 nvdimm_free_dpa(ndd, res);
262         nvdimm_bus_unlock(dev);
263
264         kvfree(ndd->data);
265         kfree(ndd);
266         put_device(dev);
267 }
268
269 void get_ndd(struct nvdimm_drvdata *ndd)
270 {
271         kref_get(&ndd->kref);
272 }
273
274 void put_ndd(struct nvdimm_drvdata *ndd)
275 {
276         if (ndd)
277                 kref_put(&ndd->kref, nvdimm_drvdata_release);
278 }
279
280 const char *nvdimm_name(struct nvdimm *nvdimm)
281 {
282         return dev_name(&nvdimm->dev);
283 }
284 EXPORT_SYMBOL_GPL(nvdimm_name);
285
286 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
287 {
288         return &nvdimm->dev.kobj;
289 }
290 EXPORT_SYMBOL_GPL(nvdimm_kobj);
291
292 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
293 {
294         return nvdimm->cmd_mask;
295 }
296 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
297
298 void *nvdimm_provider_data(struct nvdimm *nvdimm)
299 {
300         if (nvdimm)
301                 return nvdimm->provider_data;
302         return NULL;
303 }
304 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
305
306 static ssize_t commands_show(struct device *dev,
307                 struct device_attribute *attr, char *buf)
308 {
309         struct nvdimm *nvdimm = to_nvdimm(dev);
310         int cmd, len = 0;
311
312         if (!nvdimm->cmd_mask)
313                 return sprintf(buf, "\n");
314
315         for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
316                 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
317         len += sprintf(buf + len, "\n");
318         return len;
319 }
320 static DEVICE_ATTR_RO(commands);
321
322 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
323                 char *buf)
324 {
325         struct nvdimm *nvdimm = to_nvdimm(dev);
326
327         /*
328          * The state may be in the process of changing, userspace should
329          * quiesce probing if it wants a static answer
330          */
331         nvdimm_bus_lock(dev);
332         nvdimm_bus_unlock(dev);
333         return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
334                         ? "active" : "idle");
335 }
336 static DEVICE_ATTR_RO(state);
337
338 static ssize_t available_slots_show(struct device *dev,
339                 struct device_attribute *attr, char *buf)
340 {
341         struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
342         ssize_t rc;
343         u32 nfree;
344
345         if (!ndd)
346                 return -ENXIO;
347
348         nvdimm_bus_lock(dev);
349         nfree = nd_label_nfree(ndd);
350         if (nfree - 1 > nfree) {
351                 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
352                 nfree = 0;
353         } else
354                 nfree--;
355         rc = sprintf(buf, "%d\n", nfree);
356         nvdimm_bus_unlock(dev);
357         return rc;
358 }
359 static DEVICE_ATTR_RO(available_slots);
360
361 static struct attribute *nvdimm_attributes[] = {
362         &dev_attr_state.attr,
363         &dev_attr_commands.attr,
364         &dev_attr_available_slots.attr,
365         NULL,
366 };
367
368 struct attribute_group nvdimm_attribute_group = {
369         .attrs = nvdimm_attributes,
370 };
371 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
372
373 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
374                 const struct attribute_group **groups, unsigned long flags,
375                 unsigned long cmd_mask, int num_flush,
376                 struct resource *flush_wpq)
377 {
378         struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
379         struct device *dev;
380
381         if (!nvdimm)
382                 return NULL;
383
384         nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
385         if (nvdimm->id < 0) {
386                 kfree(nvdimm);
387                 return NULL;
388         }
389         nvdimm->provider_data = provider_data;
390         nvdimm->flags = flags;
391         nvdimm->cmd_mask = cmd_mask;
392         nvdimm->num_flush = num_flush;
393         nvdimm->flush_wpq = flush_wpq;
394         atomic_set(&nvdimm->busy, 0);
395         dev = &nvdimm->dev;
396         dev_set_name(dev, "nmem%d", nvdimm->id);
397         dev->parent = &nvdimm_bus->dev;
398         dev->type = &nvdimm_device_type;
399         dev->devt = MKDEV(nvdimm_major, nvdimm->id);
400         dev->groups = groups;
401         nd_device_register(dev);
402
403         return nvdimm;
404 }
405 EXPORT_SYMBOL_GPL(nvdimm_create);
406
407 int alias_dpa_busy(struct device *dev, void *data)
408 {
409         resource_size_t map_end, blk_start, new;
410         struct blk_alloc_info *info = data;
411         struct nd_mapping *nd_mapping;
412         struct nd_region *nd_region;
413         struct nvdimm_drvdata *ndd;
414         struct resource *res;
415         int i;
416
417         if (!is_nd_pmem(dev))
418                 return 0;
419
420         nd_region = to_nd_region(dev);
421         for (i = 0; i < nd_region->ndr_mappings; i++) {
422                 nd_mapping  = &nd_region->mapping[i];
423                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
424                         break;
425         }
426
427         if (i >= nd_region->ndr_mappings)
428                 return 0;
429
430         ndd = to_ndd(nd_mapping);
431         map_end = nd_mapping->start + nd_mapping->size - 1;
432         blk_start = nd_mapping->start;
433
434         /*
435          * In the allocation case ->res is set to free space that we are
436          * looking to validate against PMEM aliasing collision rules
437          * (i.e. BLK is allocated after all aliased PMEM).
438          */
439         if (info->res) {
440                 if (info->res->start >= nd_mapping->start
441                                 && info->res->start < map_end)
442                         /* pass */;
443                 else
444                         return 0;
445         }
446
447  retry:
448         /*
449          * Find the free dpa from the end of the last pmem allocation to
450          * the end of the interleave-set mapping.
451          */
452         for_each_dpa_resource(ndd, res) {
453                 if (strncmp(res->name, "pmem", 4) != 0)
454                         continue;
455                 if ((res->start >= blk_start && res->start < map_end)
456                                 || (res->end >= blk_start
457                                         && res->end <= map_end)) {
458                         new = max(blk_start, min(map_end + 1, res->end + 1));
459                         if (new != blk_start) {
460                                 blk_start = new;
461                                 goto retry;
462                         }
463                 }
464         }
465
466         /* update the free space range with the probed blk_start */
467         if (info->res && blk_start > info->res->start) {
468                 info->res->start = max(info->res->start, blk_start);
469                 if (info->res->start > info->res->end)
470                         info->res->end = info->res->start - 1;
471                 return 1;
472         }
473
474         info->available -= blk_start - nd_mapping->start;
475
476         return 0;
477 }
478
479 /**
480  * nd_blk_available_dpa - account the unused dpa of BLK region
481  * @nd_mapping: container of dpa-resource-root + labels
482  *
483  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
484  * we arrange for them to never start at an lower dpa than the last
485  * PMEM allocation in an aliased region.
486  */
487 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
488 {
489         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
490         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
491         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
492         struct blk_alloc_info info = {
493                 .nd_mapping = nd_mapping,
494                 .available = nd_mapping->size,
495                 .res = NULL,
496         };
497         struct resource *res;
498
499         if (!ndd)
500                 return 0;
501
502         device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
503
504         /* now account for busy blk allocations in unaliased dpa */
505         for_each_dpa_resource(ndd, res) {
506                 if (strncmp(res->name, "blk", 3) != 0)
507                         continue;
508                 info.available -= resource_size(res);
509         }
510
511         return info.available;
512 }
513
514 /**
515  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
516  * @nd_mapping: container of dpa-resource-root + labels
517  * @nd_region: constrain available space check to this reference region
518  * @overlap: calculate available space assuming this level of overlap
519  *
520  * Validate that a PMEM label, if present, aligns with the start of an
521  * interleave set and truncate the available size at the lowest BLK
522  * overlap point.
523  *
524  * The expectation is that this routine is called multiple times as it
525  * probes for the largest BLK encroachment for any single member DIMM of
526  * the interleave set.  Once that value is determined the PMEM-limit for
527  * the set can be established.
528  */
529 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
530                 struct nd_mapping *nd_mapping, resource_size_t *overlap)
531 {
532         resource_size_t map_start, map_end, busy = 0, available, blk_start;
533         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
534         struct resource *res;
535         const char *reason;
536
537         if (!ndd)
538                 return 0;
539
540         map_start = nd_mapping->start;
541         map_end = map_start + nd_mapping->size - 1;
542         blk_start = max(map_start, map_end + 1 - *overlap);
543         for_each_dpa_resource(ndd, res) {
544                 if (res->start >= map_start && res->start < map_end) {
545                         if (strncmp(res->name, "blk", 3) == 0)
546                                 blk_start = min(blk_start,
547                                                 max(map_start, res->start));
548                         else if (res->end > map_end) {
549                                 reason = "misaligned to iset";
550                                 goto err;
551                         } else
552                                 busy += resource_size(res);
553                 } else if (res->end >= map_start && res->end <= map_end) {
554                         if (strncmp(res->name, "blk", 3) == 0) {
555                                 /*
556                                  * If a BLK allocation overlaps the start of
557                                  * PMEM the entire interleave set may now only
558                                  * be used for BLK.
559                                  */
560                                 blk_start = map_start;
561                         } else
562                                 busy += resource_size(res);
563                 } else if (map_start > res->start && map_start < res->end) {
564                         /* total eclipse of the mapping */
565                         busy += nd_mapping->size;
566                         blk_start = map_start;
567                 }
568         }
569
570         *overlap = map_end + 1 - blk_start;
571         available = blk_start - map_start;
572         if (busy < available)
573                 return available - busy;
574         return 0;
575
576  err:
577         nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
578         return 0;
579 }
580
581 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
582 {
583         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
584         kfree(res->name);
585         __release_region(&ndd->dpa, res->start, resource_size(res));
586 }
587
588 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
589                 struct nd_label_id *label_id, resource_size_t start,
590                 resource_size_t n)
591 {
592         char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
593         struct resource *res;
594
595         if (!name)
596                 return NULL;
597
598         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
599         res = __request_region(&ndd->dpa, start, n, name, 0);
600         if (!res)
601                 kfree(name);
602         return res;
603 }
604
605 /**
606  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
607  * @nvdimm: container of dpa-resource-root + labels
608  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
609  */
610 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
611                 struct nd_label_id *label_id)
612 {
613         resource_size_t allocated = 0;
614         struct resource *res;
615
616         for_each_dpa_resource(ndd, res)
617                 if (strcmp(res->name, label_id->id) == 0)
618                         allocated += resource_size(res);
619
620         return allocated;
621 }
622
623 static int count_dimms(struct device *dev, void *c)
624 {
625         int *count = c;
626
627         if (is_nvdimm(dev))
628                 (*count)++;
629         return 0;
630 }
631
632 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
633 {
634         int count = 0;
635         /* Flush any possible dimm registration failures */
636         nd_synchronize();
637
638         device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
639         dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
640         if (count != dimm_count)
641                 return -ENXIO;
642         return 0;
643 }
644 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
645
646 void __exit nvdimm_devs_exit(void)
647 {
648         ida_destroy(&dimm_ida);
649 }