2 * dcssblk.c -- the S/390 block driver for dcss memory
4 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
7 #define KMSG_COMPONENT "dcssblk"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/ctype.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/blkdev.h>
17 #include <linux/completion.h>
18 #include <linux/interrupt.h>
19 #include <linux/platform_device.h>
20 #include <linux/pfn_t.h>
21 #include <linux/dax.h>
22 #include <asm/extmem.h>
25 #define DCSSBLK_NAME "dcssblk"
26 #define DCSSBLK_MINORS_PER_DISK 1
27 #define DCSSBLK_PARM_LEN 400
28 #define DCSS_BUS_ID_SIZE 20
30 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
31 static void dcssblk_release(struct gendisk *disk, fmode_t mode);
32 static blk_qc_t dcssblk_make_request(struct request_queue *q,
34 static long dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum,
35 void **kaddr, pfn_t *pfn, long size);
36 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
37 long nr_pages, void **kaddr, pfn_t *pfn);
39 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
41 static int dcssblk_major;
42 static const struct block_device_operations dcssblk_devops = {
45 .release = dcssblk_release,
46 .direct_access = dcssblk_blk_direct_access,
49 static const struct dax_operations dcssblk_dax_ops = {
50 .direct_access = dcssblk_dax_direct_access,
53 struct dcssblk_dev_info {
56 char segment_name[DCSS_BUS_ID_SIZE];
62 unsigned char save_pending;
63 unsigned char is_shared;
64 struct request_queue *dcssblk_queue;
66 struct list_head seg_list;
67 struct dax_device *dax_dev;
72 char segment_name[DCSS_BUS_ID_SIZE];
78 static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
80 static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
83 static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
84 static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
86 static struct device *dcssblk_root_dev;
88 static LIST_HEAD(dcssblk_devices);
89 static struct rw_semaphore dcssblk_devices_sem;
92 * release function for segment device.
95 dcssblk_release_segment(struct device *dev)
97 struct dcssblk_dev_info *dev_info;
98 struct segment_info *entry, *temp;
100 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
101 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
102 list_del(&entry->lh);
106 module_put(THIS_MODULE);
110 * get a minor number. needs to be called with
111 * down_write(&dcssblk_devices_sem) and the
112 * device needs to be enqueued before the semaphore is
116 dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
119 struct dcssblk_dev_info *entry;
121 if (dev_info == NULL)
123 for (minor = 0; minor < (1<<MINORBITS); minor++) {
125 // test if minor available
126 list_for_each_entry(entry, &dcssblk_devices, lh)
127 if (minor == entry->gd->first_minor)
129 if (!found) break; // got unused minor
133 dev_info->gd->first_minor = minor;
138 * get the struct dcssblk_dev_info from dcssblk_devices
139 * for the given name.
140 * down_read(&dcssblk_devices_sem) must be held.
142 static struct dcssblk_dev_info *
143 dcssblk_get_device_by_name(char *name)
145 struct dcssblk_dev_info *entry;
147 list_for_each_entry(entry, &dcssblk_devices, lh) {
148 if (!strcmp(name, entry->segment_name)) {
156 * get the struct segment_info from seg_list
157 * for the given name.
158 * down_read(&dcssblk_devices_sem) must be held.
160 static struct segment_info *
161 dcssblk_get_segment_by_name(char *name)
163 struct dcssblk_dev_info *dev_info;
164 struct segment_info *entry;
166 list_for_each_entry(dev_info, &dcssblk_devices, lh) {
167 list_for_each_entry(entry, &dev_info->seg_list, lh) {
168 if (!strcmp(name, entry->segment_name))
176 * get the highest address of the multi-segment block.
179 dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
181 unsigned long highest_addr;
182 struct segment_info *entry;
185 list_for_each_entry(entry, &dev_info->seg_list, lh) {
186 if (highest_addr < entry->end)
187 highest_addr = entry->end;
193 * get the lowest address of the multi-segment block.
196 dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
199 unsigned long lowest_addr;
200 struct segment_info *entry;
204 list_for_each_entry(entry, &dev_info->seg_list, lh) {
205 if (set_first == 0) {
206 lowest_addr = entry->start;
209 if (lowest_addr > entry->start)
210 lowest_addr = entry->start;
217 * Check continuity of segments.
220 dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
223 struct segment_info *sort_list, *entry, temp;
225 if (dev_info->num_of_segments <= 1)
229 sizeof(struct segment_info) * dev_info->num_of_segments,
231 if (sort_list == NULL)
234 list_for_each_entry(entry, &dev_info->seg_list, lh) {
235 memcpy(&sort_list[i], entry, sizeof(struct segment_info));
240 for (i = 0; i < dev_info->num_of_segments; i++)
241 for (j = 0; j < dev_info->num_of_segments; j++)
242 if (sort_list[j].start > sort_list[i].start) {
243 memcpy(&temp, &sort_list[i],
244 sizeof(struct segment_info));
245 memcpy(&sort_list[i], &sort_list[j],
246 sizeof(struct segment_info));
247 memcpy(&sort_list[j], &temp,
248 sizeof(struct segment_info));
251 /* check continuity */
252 for (i = 0; i < dev_info->num_of_segments - 1; i++) {
253 if ((sort_list[i].end + 1) != sort_list[i+1].start) {
254 pr_err("Adjacent DCSSs %s and %s are not "
255 "contiguous\n", sort_list[i].segment_name,
256 sort_list[i+1].segment_name);
260 /* EN and EW are allowed in a block device */
261 if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
262 if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
263 (sort_list[i].segment_type == SEG_TYPE_ER) ||
264 !(sort_list[i+1].segment_type &
265 SEGMENT_EXCLUSIVE) ||
266 (sort_list[i+1].segment_type == SEG_TYPE_ER)) {
267 pr_err("DCSS %s and DCSS %s have "
268 "incompatible types\n",
269 sort_list[i].segment_name,
270 sort_list[i+1].segment_name);
286 dcssblk_load_segment(char *name, struct segment_info **seg_info)
290 /* already loaded? */
291 down_read(&dcssblk_devices_sem);
292 *seg_info = dcssblk_get_segment_by_name(name);
293 up_read(&dcssblk_devices_sem);
294 if (*seg_info != NULL)
297 /* get a struct segment_info */
298 *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
299 if (*seg_info == NULL)
302 strcpy((*seg_info)->segment_name, name);
304 /* load the segment */
305 rc = segment_load(name, SEGMENT_SHARED,
306 &(*seg_info)->start, &(*seg_info)->end);
308 segment_warning(rc, (*seg_info)->segment_name);
311 INIT_LIST_HEAD(&(*seg_info)->lh);
312 (*seg_info)->segment_type = rc;
318 * device attribute for switching shared/nonshared (exclusive)
319 * operation (show + store)
322 dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
324 struct dcssblk_dev_info *dev_info;
326 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
327 return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
331 dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
333 struct dcssblk_dev_info *dev_info;
334 struct segment_info *entry, *temp;
337 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
339 down_write(&dcssblk_devices_sem);
340 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
341 if (atomic_read(&dev_info->use_count)) {
345 if (inbuf[0] == '1') {
346 /* reload segments in shared mode */
347 list_for_each_entry(entry, &dev_info->seg_list, lh) {
348 rc = segment_modify_shared(entry->segment_name,
351 BUG_ON(rc == -EINVAL);
356 dev_info->is_shared = 1;
357 switch (dev_info->segment_type) {
361 set_disk_ro(dev_info->gd, 1);
363 } else if (inbuf[0] == '0') {
364 /* reload segments in exclusive mode */
365 if (dev_info->segment_type == SEG_TYPE_SC) {
366 pr_err("DCSS %s is of type SC and cannot be "
367 "loaded as exclusive-writable\n",
368 dev_info->segment_name);
372 list_for_each_entry(entry, &dev_info->seg_list, lh) {
373 rc = segment_modify_shared(entry->segment_name,
376 BUG_ON(rc == -EINVAL);
381 dev_info->is_shared = 0;
382 set_disk_ro(dev_info->gd, 0);
391 pr_err("DCSS device %s is removed after a failed access mode "
392 "change\n", dev_info->segment_name);
394 list_for_each_entry(entry, &dev_info->seg_list, lh) {
396 segment_unload(entry->segment_name);
398 list_del(&dev_info->lh);
400 kill_dax(dev_info->dax_dev);
401 put_dax(dev_info->dax_dev);
402 del_gendisk(dev_info->gd);
403 blk_cleanup_queue(dev_info->dcssblk_queue);
404 dev_info->gd->queue = NULL;
405 put_disk(dev_info->gd);
406 up_write(&dcssblk_devices_sem);
408 if (device_remove_file_self(dev, attr)) {
409 device_unregister(dev);
414 up_write(&dcssblk_devices_sem);
417 static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
418 dcssblk_shared_store);
421 * device attribute for save operation on current copy
422 * of the segment. If the segment is busy, saving will
423 * become pending until it gets released, which can be
424 * undone by storing a non-true value to this entry.
428 dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
430 struct dcssblk_dev_info *dev_info;
432 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
433 return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
437 dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
439 struct dcssblk_dev_info *dev_info;
440 struct segment_info *entry;
442 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
444 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
446 down_write(&dcssblk_devices_sem);
447 if (inbuf[0] == '1') {
448 if (atomic_read(&dev_info->use_count) == 0) {
449 // device is idle => we save immediately
450 pr_info("All DCSSs that map to device %s are "
451 "saved\n", dev_info->segment_name);
452 list_for_each_entry(entry, &dev_info->seg_list, lh) {
453 if (entry->segment_type == SEG_TYPE_EN ||
454 entry->segment_type == SEG_TYPE_SN)
455 pr_warn("DCSS %s is of type SN or EN"
456 " and cannot be saved\n",
457 entry->segment_name);
459 segment_save(entry->segment_name);
462 // device is busy => we save it when it becomes
463 // idle in dcssblk_release
464 pr_info("Device %s is in use, its DCSSs will be "
465 "saved when it becomes idle\n",
466 dev_info->segment_name);
467 dev_info->save_pending = 1;
469 } else if (inbuf[0] == '0') {
470 if (dev_info->save_pending) {
471 // device is busy & the user wants to undo his save
473 dev_info->save_pending = 0;
474 pr_info("A pending save request for device %s "
475 "has been canceled\n",
476 dev_info->segment_name);
479 up_write(&dcssblk_devices_sem);
482 up_write(&dcssblk_devices_sem);
485 static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
489 * device attribute for showing all segments in a device
492 dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
497 struct dcssblk_dev_info *dev_info;
498 struct segment_info *entry;
500 down_read(&dcssblk_devices_sem);
501 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
504 list_for_each_entry(entry, &dev_info->seg_list, lh) {
505 strcpy(&buf[i], entry->segment_name);
506 i += strlen(entry->segment_name);
510 up_read(&dcssblk_devices_sem);
513 static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
515 static struct attribute *dcssblk_dev_attrs[] = {
516 &dev_attr_shared.attr,
518 &dev_attr_seglist.attr,
521 static struct attribute_group dcssblk_dev_attr_group = {
522 .attrs = dcssblk_dev_attrs,
524 static const struct attribute_group *dcssblk_dev_attr_groups[] = {
525 &dcssblk_dev_attr_group,
530 * device attribute for adding devices
533 dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
535 int rc, i, j, num_of_segments;
536 struct dcssblk_dev_info *dev_info;
537 struct segment_info *seg_info, *temp;
539 unsigned long seg_byte_size;
543 if (dev != dcssblk_root_dev) {
547 if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
552 local_buf = kmalloc(count + 1, GFP_KERNEL);
553 if (local_buf == NULL) {
562 for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
563 for (j = i; j < count &&
566 (buf[j] != '\n'); j++) {
567 local_buf[j-i] = toupper(buf[j]);
569 local_buf[j-i] = '\0';
570 if (((j - i) == 0) || ((j - i) > 8)) {
575 rc = dcssblk_load_segment(local_buf, &seg_info);
579 * get a struct dcssblk_dev_info
581 if (num_of_segments == 0) {
582 dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
584 if (dev_info == NULL) {
588 strcpy(dev_info->segment_name, local_buf);
589 dev_info->segment_type = seg_info->segment_type;
590 INIT_LIST_HEAD(&dev_info->seg_list);
592 list_add_tail(&seg_info->lh, &dev_info->seg_list);
596 if ((buf[j] == '\0') || (buf[j] == '\n'))
600 /* no trailing colon at the end of the input */
601 if ((i > 0) && (buf[i-1] == ':')) {
605 strlcpy(local_buf, buf, i + 1);
606 dev_info->num_of_segments = num_of_segments;
607 rc = dcssblk_is_continuous(dev_info);
611 dev_info->start = dcssblk_find_lowest_addr(dev_info);
612 dev_info->end = dcssblk_find_highest_addr(dev_info);
614 dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
615 dev_info->dev.release = dcssblk_release_segment;
616 dev_info->dev.groups = dcssblk_dev_attr_groups;
617 INIT_LIST_HEAD(&dev_info->lh);
618 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
619 if (dev_info->gd == NULL) {
623 dev_info->gd->major = dcssblk_major;
624 dev_info->gd->fops = &dcssblk_devops;
625 dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
626 dev_info->gd->queue = dev_info->dcssblk_queue;
627 dev_info->gd->private_data = dev_info;
628 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
629 blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
630 queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
632 seg_byte_size = (dev_info->end - dev_info->start + 1);
633 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
634 pr_info("Loaded %s with total size %lu bytes and capacity %lu "
635 "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
637 dev_info->save_pending = 0;
638 dev_info->is_shared = 1;
639 dev_info->dev.parent = dcssblk_root_dev;
642 *get minor, add to list
644 down_write(&dcssblk_devices_sem);
645 if (dcssblk_get_segment_by_name(local_buf)) {
649 rc = dcssblk_assign_free_minor(dev_info);
652 sprintf(dev_info->gd->disk_name, "dcssblk%d",
653 dev_info->gd->first_minor);
654 list_add_tail(&dev_info->lh, &dcssblk_devices);
656 if (!try_module_get(THIS_MODULE)) {
661 * register the device
663 rc = device_register(&dev_info->dev);
667 dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
669 if (!dev_info->dax_dev) {
674 get_device(&dev_info->dev);
675 device_add_disk(&dev_info->dev, dev_info->gd);
677 switch (dev_info->segment_type) {
681 set_disk_ro(dev_info->gd,1);
684 set_disk_ro(dev_info->gd,0);
687 up_write(&dcssblk_devices_sem);
692 list_del(&dev_info->lh);
693 blk_cleanup_queue(dev_info->dcssblk_queue);
694 dev_info->gd->queue = NULL;
695 put_disk(dev_info->gd);
696 list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
697 segment_unload(seg_info->segment_name);
699 put_device(&dev_info->dev);
700 up_write(&dcssblk_devices_sem);
703 list_del(&dev_info->lh);
705 blk_cleanup_queue(dev_info->dcssblk_queue);
706 dev_info->gd->queue = NULL;
707 put_disk(dev_info->gd);
708 up_write(&dcssblk_devices_sem);
710 if (dev_info == NULL)
712 list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
713 list_del(&seg_info->lh);
714 segment_unload(seg_info->segment_name);
725 * device attribute for removing devices
728 dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
730 struct dcssblk_dev_info *dev_info;
731 struct segment_info *entry;
735 if (dev != dcssblk_root_dev) {
738 local_buf = kmalloc(count + 1, GFP_KERNEL);
739 if (local_buf == NULL) {
745 for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) {
746 local_buf[i] = toupper(buf[i]);
749 if ((i == 0) || (i > 8)) {
754 down_write(&dcssblk_devices_sem);
755 dev_info = dcssblk_get_device_by_name(local_buf);
756 if (dev_info == NULL) {
757 up_write(&dcssblk_devices_sem);
758 pr_warn("Device %s cannot be removed because it is not a known device\n",
763 if (atomic_read(&dev_info->use_count) != 0) {
764 up_write(&dcssblk_devices_sem);
765 pr_warn("Device %s cannot be removed while it is in use\n",
771 list_del(&dev_info->lh);
772 kill_dax(dev_info->dax_dev);
773 put_dax(dev_info->dax_dev);
774 del_gendisk(dev_info->gd);
775 blk_cleanup_queue(dev_info->dcssblk_queue);
776 dev_info->gd->queue = NULL;
777 put_disk(dev_info->gd);
779 /* unload all related segments */
780 list_for_each_entry(entry, &dev_info->seg_list, lh)
781 segment_unload(entry->segment_name);
783 up_write(&dcssblk_devices_sem);
785 device_unregister(&dev_info->dev);
786 put_device(&dev_info->dev);
795 dcssblk_open(struct block_device *bdev, fmode_t mode)
797 struct dcssblk_dev_info *dev_info;
800 dev_info = bdev->bd_disk->private_data;
801 if (NULL == dev_info) {
805 atomic_inc(&dev_info->use_count);
806 bdev->bd_block_size = 4096;
813 dcssblk_release(struct gendisk *disk, fmode_t mode)
815 struct dcssblk_dev_info *dev_info = disk->private_data;
816 struct segment_info *entry;
822 down_write(&dcssblk_devices_sem);
823 if (atomic_dec_and_test(&dev_info->use_count)
824 && (dev_info->save_pending)) {
825 pr_info("Device %s has become idle and is being saved "
826 "now\n", dev_info->segment_name);
827 list_for_each_entry(entry, &dev_info->seg_list, lh) {
828 if (entry->segment_type == SEG_TYPE_EN ||
829 entry->segment_type == SEG_TYPE_SN)
830 pr_warn("DCSS %s is of type SN or EN and cannot"
831 " be saved\n", entry->segment_name);
833 segment_save(entry->segment_name);
835 dev_info->save_pending = 0;
837 up_write(&dcssblk_devices_sem);
841 dcssblk_make_request(struct request_queue *q, struct bio *bio)
843 struct dcssblk_dev_info *dev_info;
845 struct bvec_iter iter;
847 unsigned long page_addr;
848 unsigned long source_addr;
849 unsigned long bytes_done;
851 blk_queue_split(q, &bio, q->bio_split);
854 dev_info = bio->bi_bdev->bd_disk->private_data;
855 if (dev_info == NULL)
857 if ((bio->bi_iter.bi_sector & 7) != 0 ||
858 (bio->bi_iter.bi_size & 4095) != 0)
859 /* Request is not page-aligned. */
861 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
862 /* Request beyond end of DCSS segment. */
865 /* verify data transfer direction */
866 if (dev_info->is_shared) {
867 switch (dev_info->segment_type) {
871 /* cannot write to these segments */
872 if (bio_data_dir(bio) == WRITE) {
873 pr_warn("Writing to %s failed because it is a read-only device\n",
874 dev_name(&dev_info->dev));
880 index = (bio->bi_iter.bi_sector >> 3);
881 bio_for_each_segment(bvec, bio, iter) {
882 page_addr = (unsigned long)
883 page_address(bvec.bv_page) + bvec.bv_offset;
884 source_addr = dev_info->start + (index<<12) + bytes_done;
885 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
888 if (bio_data_dir(bio) == READ) {
889 memcpy((void*)page_addr, (void*)source_addr,
892 memcpy((void*)source_addr, (void*)page_addr,
895 bytes_done += bvec.bv_len;
898 return BLK_QC_T_NONE;
901 return BLK_QC_T_NONE;
905 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
906 long nr_pages, void **kaddr, pfn_t *pfn)
908 resource_size_t offset = pgoff * PAGE_SIZE;
909 unsigned long dev_sz;
911 dev_sz = dev_info->end - dev_info->start + 1;
912 *kaddr = (void *) dev_info->start + offset;
913 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
915 return (dev_sz - offset) / PAGE_SIZE;
919 dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum,
920 void **kaddr, pfn_t *pfn, long size)
922 struct dcssblk_dev_info *dev_info;
924 dev_info = bdev->bd_disk->private_data;
927 return __dcssblk_direct_access(dev_info, PHYS_PFN(secnum * 512),
928 PHYS_PFN(size), kaddr, pfn) * PAGE_SIZE;
932 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
933 long nr_pages, void **kaddr, pfn_t *pfn)
935 struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
937 return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
941 dcssblk_check_params(void)
944 char buf[DCSSBLK_PARM_LEN + 1];
945 struct dcssblk_dev_info *dev_info;
947 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
949 for (j = i; (j < DCSSBLK_PARM_LEN) &&
950 (dcssblk_segments[j] != ',') &&
951 (dcssblk_segments[j] != '\0') &&
952 (dcssblk_segments[j] != '('); j++)
954 buf[j-i] = dcssblk_segments[j];
957 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
958 if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
959 for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
960 buf[k] = toupper(buf[k]);
962 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
963 down_read(&dcssblk_devices_sem);
964 dev_info = dcssblk_get_device_by_name(buf);
965 up_read(&dcssblk_devices_sem);
967 dcssblk_shared_store(&dev_info->dev,
971 while ((dcssblk_segments[j] != ',') &&
972 (dcssblk_segments[j] != '\0'))
976 if (dcssblk_segments[j] == '\0')
985 static int dcssblk_freeze(struct device *dev)
987 struct dcssblk_dev_info *dev_info;
990 list_for_each_entry(dev_info, &dcssblk_devices, lh) {
991 switch (dev_info->segment_type) {
995 if (!dev_info->is_shared)
1006 pr_err("Suspending the system failed because DCSS device %s "
1008 dev_info->segment_name);
1012 static int dcssblk_restore(struct device *dev)
1014 struct dcssblk_dev_info *dev_info;
1015 struct segment_info *entry;
1016 unsigned long start, end;
1019 list_for_each_entry(dev_info, &dcssblk_devices, lh) {
1020 list_for_each_entry(entry, &dev_info->seg_list, lh) {
1021 segment_unload(entry->segment_name);
1022 rc = segment_load(entry->segment_name, SEGMENT_SHARED,
1025 // TODO in_use check ?
1026 segment_warning(rc, entry->segment_name);
1029 if (start != entry->start || end != entry->end) {
1030 pr_err("The address range of DCSS %s changed "
1031 "while the system was suspended\n",
1032 entry->segment_name);
1039 panic("fatal dcssblk resume error\n");
1042 static int dcssblk_thaw(struct device *dev)
1047 static const struct dev_pm_ops dcssblk_pm_ops = {
1048 .freeze = dcssblk_freeze,
1049 .thaw = dcssblk_thaw,
1050 .restore = dcssblk_restore,
1053 static struct platform_driver dcssblk_pdrv = {
1056 .pm = &dcssblk_pm_ops,
1060 static struct platform_device *dcssblk_pdev;
1064 * The init/exit functions.
1069 platform_device_unregister(dcssblk_pdev);
1070 platform_driver_unregister(&dcssblk_pdrv);
1071 root_device_unregister(dcssblk_root_dev);
1072 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
1080 rc = platform_driver_register(&dcssblk_pdrv);
1084 dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
1086 if (IS_ERR(dcssblk_pdev)) {
1087 rc = PTR_ERR(dcssblk_pdev);
1091 dcssblk_root_dev = root_device_register("dcssblk");
1092 if (IS_ERR(dcssblk_root_dev)) {
1093 rc = PTR_ERR(dcssblk_root_dev);
1096 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
1099 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
1102 rc = register_blkdev(0, DCSSBLK_NAME);
1106 init_rwsem(&dcssblk_devices_sem);
1108 dcssblk_check_params();
1112 root_device_unregister(dcssblk_root_dev);
1114 platform_device_unregister(dcssblk_pdev);
1116 platform_driver_unregister(&dcssblk_pdrv);
1120 module_init(dcssblk_init);
1121 module_exit(dcssblk_exit);
1123 module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
1124 MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
1125 "comma-separated list, names in each set separated "
1126 "by commas are separated by colons, each set contains "
1127 "names of contiguous segments and each name max. 8 chars.\n"
1128 "Adding \"(local)\" to the end of each set equals echoing 0 "
1129 "to /sys/devices/dcssblk/<device name>/shared after loading "
1130 "the contiguous segments - \n"
1131 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
1133 MODULE_LICENSE("GPL");