2 * ccw based virtio transport
4 * Copyright IBM Corp. 2012
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
13 #include <linux/kernel_stat.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
16 #include <linux/err.h>
17 #include <linux/virtio.h>
18 #include <linux/virtio_config.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/virtio_ring.h>
22 #include <linux/pfn.h>
23 #include <linux/async.h>
24 #include <linux/wait.h>
25 #include <linux/list.h>
26 #include <linux/bitops.h>
27 #include <linux/module.h>
29 #include <linux/kvm_para.h>
30 #include <asm/setup.h>
33 #include <asm/ccwdev.h>
36 * virtio related functions
39 struct vq_config_block {
44 #define VIRTIO_CCW_CONFIG_SIZE 0x100
45 /* same as PCI config space size, should be enough for all drivers */
47 struct virtio_ccw_device {
48 struct virtio_device vdev;
50 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
51 struct ccw_device *cdev;
54 wait_queue_head_t wait_q;
56 struct list_head virtqueues;
57 unsigned long indicators;
58 unsigned long indicators2;
59 struct vq_config_block *config_block;
62 struct vq_info_block {
69 struct virtio_feature_desc {
74 struct virtio_ccw_vq_info {
78 struct vq_info_block *info_block;
79 struct list_head node;
82 #define KVM_VIRTIO_CCW_RING_ALIGN 4096
84 #define KVM_S390_VIRTIO_CCW_NOTIFY 3
86 #define CCW_CMD_SET_VQ 0x13
87 #define CCW_CMD_VDEV_RESET 0x33
88 #define CCW_CMD_SET_IND 0x43
89 #define CCW_CMD_SET_CONF_IND 0x53
90 #define CCW_CMD_READ_FEAT 0x12
91 #define CCW_CMD_WRITE_FEAT 0x11
92 #define CCW_CMD_READ_CONF 0x22
93 #define CCW_CMD_WRITE_CONF 0x21
94 #define CCW_CMD_WRITE_STATUS 0x31
95 #define CCW_CMD_READ_VQ_CONF 0x32
97 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
98 #define VIRTIO_CCW_DOING_RESET 0x00040000
99 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
100 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
101 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
102 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
103 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
104 #define VIRTIO_CCW_DOING_SET_IND 0x01000000
105 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
106 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
107 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
109 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
111 return container_of(vdev, struct virtio_ccw_device, vdev);
114 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
119 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
123 ret = vcdev->curr_io & flag;
124 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
128 static int ccw_io_helper(struct virtio_ccw_device *vcdev,
129 struct ccw1 *ccw, __u32 intparm)
133 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
136 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
137 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
139 vcdev->curr_io |= flag;
140 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
142 } while (ret == -EBUSY);
143 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
144 return ret ? ret : vcdev->err;
147 static inline long do_kvm_notify(struct subchannel_id schid,
148 unsigned long queue_index)
150 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
151 register struct subchannel_id __schid asm("2") = schid;
152 register unsigned long __index asm("3") = queue_index;
153 register long __rc asm("2");
155 asm volatile ("diag 2,4,0x500\n"
156 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index)
161 static void virtio_ccw_kvm_notify(struct virtqueue *vq)
163 struct virtio_ccw_vq_info *info = vq->priv;
164 struct virtio_ccw_device *vcdev;
165 struct subchannel_id schid;
167 vcdev = to_vc_device(info->vq->vdev);
168 ccw_device_get_schid(vcdev->cdev, &schid);
169 do_kvm_notify(schid, virtqueue_get_queue_index(vq));
172 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
173 struct ccw1 *ccw, int index)
175 vcdev->config_block->index = index;
176 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
178 ccw->count = sizeof(struct vq_config_block);
179 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
180 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
181 return vcdev->config_block->num;
184 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
186 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
187 struct virtio_ccw_vq_info *info = vq->priv;
191 unsigned int index = virtqueue_get_queue_index(vq);
193 /* Remove from our list. */
194 spin_lock_irqsave(&vcdev->lock, flags);
195 list_del(&info->node);
196 spin_unlock_irqrestore(&vcdev->lock, flags);
198 /* Release from host. */
199 info->info_block->queue = 0;
200 info->info_block->align = 0;
201 info->info_block->index = index;
202 info->info_block->num = 0;
203 ccw->cmd_code = CCW_CMD_SET_VQ;
205 ccw->count = sizeof(*info->info_block);
206 ccw->cda = (__u32)(unsigned long)(info->info_block);
207 ret = ccw_io_helper(vcdev, ccw,
208 VIRTIO_CCW_DOING_SET_VQ | index);
210 * -ENODEV isn't considered an error: The device is gone anyway.
211 * This may happen on device detach.
213 if (ret && (ret != -ENODEV))
214 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
217 vring_del_virtqueue(vq);
218 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
219 free_pages_exact(info->queue, size);
220 kfree(info->info_block);
224 static void virtio_ccw_del_vqs(struct virtio_device *vdev)
226 struct virtqueue *vq, *n;
229 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
234 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
235 virtio_ccw_del_vq(vq, ccw);
240 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
241 int i, vq_callback_t *callback,
245 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
247 struct virtqueue *vq = NULL;
248 struct virtio_ccw_vq_info *info;
249 unsigned long size = 0; /* silence the compiler */
252 /* Allocate queue. */
253 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
255 dev_warn(&vcdev->cdev->dev, "no info\n");
259 info->info_block = kzalloc(sizeof(*info->info_block),
260 GFP_DMA | GFP_KERNEL);
261 if (!info->info_block) {
262 dev_warn(&vcdev->cdev->dev, "no info block\n");
266 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
267 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
268 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
269 if (info->queue == NULL) {
270 dev_warn(&vcdev->cdev->dev, "no queue\n");
275 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
276 true, info->queue, virtio_ccw_kvm_notify,
279 /* For now, we fail if we can't get the requested size. */
280 dev_warn(&vcdev->cdev->dev, "no vq\n");
285 /* Register it with the host. */
286 info->info_block->queue = (__u64)info->queue;
287 info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
288 info->info_block->index = i;
289 info->info_block->num = info->num;
290 ccw->cmd_code = CCW_CMD_SET_VQ;
292 ccw->count = sizeof(*info->info_block);
293 ccw->cda = (__u32)(unsigned long)(info->info_block);
294 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
296 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
303 /* Save it to our list. */
304 spin_lock_irqsave(&vcdev->lock, flags);
305 list_add(&info->node, &vcdev->virtqueues);
306 spin_unlock_irqrestore(&vcdev->lock, flags);
312 vring_del_virtqueue(vq);
315 free_pages_exact(info->queue, size);
316 kfree(info->info_block);
322 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
323 struct virtqueue *vqs[],
324 vq_callback_t *callbacks[],
327 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
328 unsigned long *indicatorp = NULL;
332 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
336 for (i = 0; i < nvqs; ++i) {
337 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
339 if (IS_ERR(vqs[i])) {
340 ret = PTR_ERR(vqs[i]);
346 /* We need a data area under 2G to communicate. */
347 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
350 *indicatorp = (unsigned long) &vcdev->indicators;
351 /* Register queue indicators with host. */
352 vcdev->indicators = 0;
353 ccw->cmd_code = CCW_CMD_SET_IND;
355 ccw->count = sizeof(vcdev->indicators);
356 ccw->cda = (__u32)(unsigned long) indicatorp;
357 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
360 /* Register indicators2 with host for config changes */
361 *indicatorp = (unsigned long) &vcdev->indicators2;
362 vcdev->indicators2 = 0;
363 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
365 ccw->count = sizeof(vcdev->indicators2);
366 ccw->cda = (__u32)(unsigned long) indicatorp;
367 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
377 virtio_ccw_del_vqs(vdev);
381 static void virtio_ccw_reset(struct virtio_device *vdev)
383 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
386 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
390 /* Zero status bits. */
393 /* Send a reset ccw on device. */
394 ccw->cmd_code = CCW_CMD_VDEV_RESET;
398 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
402 static u32 virtio_ccw_get_features(struct virtio_device *vdev)
404 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
405 struct virtio_feature_desc *features;
409 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
413 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
418 /* Read the feature bits from the host. */
419 /* TODO: Features > 32 bits */
421 ccw->cmd_code = CCW_CMD_READ_FEAT;
423 ccw->count = sizeof(*features);
424 ccw->cda = (__u32)(unsigned long)features;
425 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
431 rc = le32_to_cpu(features->features);
439 static void virtio_ccw_finalize_features(struct virtio_device *vdev)
441 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
442 struct virtio_feature_desc *features;
446 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
450 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
454 /* Give virtio_ring a chance to accept features. */
455 vring_transport_features(vdev);
457 for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
459 int highbits = i % 2 ? 32 : 0;
461 features->features = cpu_to_le32(vdev->features[i / 2]
463 /* Write the feature bits to the host. */
464 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
466 ccw->count = sizeof(*features);
467 ccw->cda = (__u32)(unsigned long)features;
468 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
475 static void virtio_ccw_get_config(struct virtio_device *vdev,
476 unsigned int offset, void *buf, unsigned len)
478 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
483 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
487 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
491 /* Read the config area from the host. */
492 ccw->cmd_code = CCW_CMD_READ_CONF;
494 ccw->count = offset + len;
495 ccw->cda = (__u32)(unsigned long)config_area;
496 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
500 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
501 memcpy(buf, &vcdev->config[offset], len);
508 static void virtio_ccw_set_config(struct virtio_device *vdev,
509 unsigned int offset, const void *buf,
512 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
516 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
520 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
524 memcpy(&vcdev->config[offset], buf, len);
525 /* Write the config area to the host. */
526 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
527 ccw->cmd_code = CCW_CMD_WRITE_CONF;
529 ccw->count = offset + len;
530 ccw->cda = (__u32)(unsigned long)config_area;
531 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
538 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
540 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
542 return *vcdev->status;
545 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
547 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
550 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
554 /* Write the status to the host. */
555 *vcdev->status = status;
556 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
558 ccw->count = sizeof(status);
559 ccw->cda = (__u32)(unsigned long)vcdev->status;
560 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
564 static struct virtio_config_ops virtio_ccw_config_ops = {
565 .get_features = virtio_ccw_get_features,
566 .finalize_features = virtio_ccw_finalize_features,
567 .get = virtio_ccw_get_config,
568 .set = virtio_ccw_set_config,
569 .get_status = virtio_ccw_get_status,
570 .set_status = virtio_ccw_set_status,
571 .reset = virtio_ccw_reset,
572 .find_vqs = virtio_ccw_find_vqs,
573 .del_vqs = virtio_ccw_del_vqs,
578 * ccw bus driver related functions
581 static void virtio_ccw_release_dev(struct device *_d)
583 struct virtio_device *dev = container_of(_d, struct virtio_device,
585 struct virtio_ccw_device *vcdev = to_vc_device(dev);
587 kfree(vcdev->status);
588 kfree(vcdev->config_block);
592 static int irb_is_error(struct irb *irb)
594 if (scsw_cstat(&irb->scsw) != 0)
596 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
598 if (scsw_cc(&irb->scsw) != 0)
603 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
606 struct virtio_ccw_vq_info *info;
608 struct virtqueue *vq;
611 spin_lock_irqsave(&vcdev->lock, flags);
612 list_for_each_entry(info, &vcdev->virtqueues, node) {
613 if (virtqueue_get_queue_index(info->vq) == index) {
618 spin_unlock_irqrestore(&vcdev->lock, flags);
622 static void virtio_ccw_int_handler(struct ccw_device *cdev,
623 unsigned long intparm,
626 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
627 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
629 struct virtqueue *vq;
630 struct virtio_driver *drv;
632 /* Check if it's a notification from the host. */
633 if ((intparm == 0) &&
634 (scsw_stctl(&irb->scsw) ==
635 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
638 if (irb_is_error(irb))
639 vcdev->err = -EIO; /* XXX - use real error */
640 if (vcdev->curr_io & activity) {
642 case VIRTIO_CCW_DOING_READ_FEAT:
643 case VIRTIO_CCW_DOING_WRITE_FEAT:
644 case VIRTIO_CCW_DOING_READ_CONFIG:
645 case VIRTIO_CCW_DOING_WRITE_CONFIG:
646 case VIRTIO_CCW_DOING_WRITE_STATUS:
647 case VIRTIO_CCW_DOING_SET_VQ:
648 case VIRTIO_CCW_DOING_SET_IND:
649 case VIRTIO_CCW_DOING_SET_CONF_IND:
650 case VIRTIO_CCW_DOING_RESET:
651 case VIRTIO_CCW_DOING_READ_VQ_CONF:
652 vcdev->curr_io &= ~activity;
653 wake_up(&vcdev->wait_q);
656 /* don't know what to do... */
657 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
663 for_each_set_bit(i, &vcdev->indicators,
664 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
665 /* The bit clear must happen before the vring kick. */
666 clear_bit(i, &vcdev->indicators);
668 vq = virtio_ccw_vq_by_ind(vcdev, i);
669 vring_interrupt(0, vq);
671 if (test_bit(0, &vcdev->indicators2)) {
672 drv = container_of(vcdev->vdev.dev.driver,
673 struct virtio_driver, driver);
675 if (drv && drv->config_changed)
676 drv->config_changed(&vcdev->vdev);
677 clear_bit(0, &vcdev->indicators2);
682 * We usually want to autoonline all devices, but give the admin
683 * a way to exempt devices from this.
685 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
687 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
689 static char *no_auto = "";
691 module_param(no_auto, charp, 0444);
692 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
694 static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
696 struct ccw_dev_id id;
698 ccw_device_get_id(cdev, &id);
699 if (test_bit(id.devno, devs_no_auto[id.ssid]))
704 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
706 struct ccw_device *cdev = data;
709 ret = ccw_device_set_online(cdev);
711 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
714 static int virtio_ccw_probe(struct ccw_device *cdev)
716 cdev->handler = virtio_ccw_int_handler;
718 if (virtio_ccw_check_autoonline(cdev))
719 async_schedule(virtio_ccw_auto_online, cdev);
723 static void virtio_ccw_remove(struct ccw_device *cdev)
725 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
728 unregister_virtio_device(&vcdev->vdev);
729 dev_set_drvdata(&cdev->dev, NULL);
731 cdev->handler = NULL;
734 static int virtio_ccw_offline(struct ccw_device *cdev)
736 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
738 unregister_virtio_device(&vcdev->vdev);
739 dev_set_drvdata(&cdev->dev, NULL);
744 static int virtio_ccw_online(struct ccw_device *cdev)
747 struct virtio_ccw_device *vcdev;
749 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
751 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
755 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
756 GFP_DMA | GFP_KERNEL);
757 if (!vcdev->config_block) {
761 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
762 if (!vcdev->status) {
767 vcdev->vdev.dev.parent = &cdev->dev;
768 vcdev->vdev.dev.release = virtio_ccw_release_dev;
769 vcdev->vdev.config = &virtio_ccw_config_ops;
771 init_waitqueue_head(&vcdev->wait_q);
772 INIT_LIST_HEAD(&vcdev->virtqueues);
773 spin_lock_init(&vcdev->lock);
775 dev_set_drvdata(&cdev->dev, vcdev);
776 vcdev->vdev.id.vendor = cdev->id.cu_type;
777 vcdev->vdev.id.device = cdev->id.cu_model;
778 ret = register_virtio_device(&vcdev->vdev);
780 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
786 dev_set_drvdata(&cdev->dev, NULL);
787 put_device(&vcdev->vdev.dev);
791 kfree(vcdev->status);
792 kfree(vcdev->config_block);
798 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
800 /* TODO: Check whether we need special handling here. */
804 static struct ccw_device_id virtio_ids[] = {
805 { CCW_DEVICE(0x3832, 0) },
808 MODULE_DEVICE_TABLE(ccw, virtio_ids);
810 static struct ccw_driver virtio_ccw_driver = {
812 .owner = THIS_MODULE,
813 .name = "virtio_ccw",
816 .probe = virtio_ccw_probe,
817 .remove = virtio_ccw_remove,
818 .set_offline = virtio_ccw_offline,
819 .set_online = virtio_ccw_online,
820 .notify = virtio_ccw_cio_notify,
821 .int_class = IRQIO_VIR,
824 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
825 int max_digit, int max_val)
832 while (diff <= max_digit) {
833 int value = hex_to_bin(**cp);
837 *val = *val * 16 + value;
842 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
848 static int __init parse_busid(char *str, unsigned int *cssid,
849 unsigned int *ssid, unsigned int *devno)
860 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
861 if (ret || (str_work[0] != '.'))
864 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
865 if (ret || (str_work[0] != '.'))
868 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
869 if (ret || (str_work[0] != '\0'))
877 static void __init no_auto_parse(void)
879 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
884 while ((parm = strsep(&str, ","))) {
885 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
890 rc = parse_busid(parm, &to_cssid,
892 if ((from_ssid > to_ssid) ||
893 ((from_ssid == to_ssid) && (from > to)))
896 to_cssid = from_cssid;
902 while ((from_ssid < to_ssid) ||
903 ((from_ssid == to_ssid) && (from <= to))) {
904 set_bit(from, devs_no_auto[from_ssid]);
906 if (from > __MAX_SUBCHANNEL) {
914 static int __init virtio_ccw_init(void)
916 /* parse no_auto string before we do anything further */
918 return ccw_driver_register(&virtio_ccw_driver);
920 module_init(virtio_ccw_init);
922 static void __exit virtio_ccw_exit(void)
924 ccw_driver_unregister(&virtio_ccw_driver);
926 module_exit(virtio_ccw_exit);