2 * Driver for s390 chsc subchannels
4 * Copyright IBM Corp. 2008, 2011
6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/miscdevice.h>
15 #include <linux/kernel_stat.h>
17 #include <asm/compat.h>
23 #include "cio_debug.h"
28 static debug_info_t *chsc_debug_msg_id;
29 static debug_info_t *chsc_debug_log_id;
31 #define CHSC_MSG(imp, args...) do { \
32 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
35 #define CHSC_LOG(imp, txt) do { \
36 debug_text_event(chsc_debug_log_id, imp , txt); \
39 static void CHSC_LOG_HEX(int level, void *data, int length)
42 debug_event(chsc_debug_log_id, level, data, length);
43 length -= chsc_debug_log_id->buf_size;
44 data += chsc_debug_log_id->buf_size;
48 MODULE_AUTHOR("IBM Corporation");
49 MODULE_DESCRIPTION("driver for s390 chsc subchannels");
50 MODULE_LICENSE("GPL");
52 static void chsc_subchannel_irq(struct subchannel *sch)
54 struct chsc_private *private = dev_get_drvdata(&sch->dev);
55 struct chsc_request *request = private->request;
56 struct irb *irb = (struct irb *)&S390_lowcore.irb;
59 CHSC_LOG_HEX(4, irb, sizeof(*irb));
60 kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
62 /* Copy irb to provided request and set done. */
64 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
65 sch->schid.ssid, sch->schid.sch_no);
68 private->request = NULL;
69 memcpy(&request->irb, irb, sizeof(*irb));
70 cio_update_schib(sch);
71 complete(&request->completion);
72 put_device(&sch->dev);
75 static int chsc_subchannel_probe(struct subchannel *sch)
77 struct chsc_private *private;
80 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
81 sch->schid.ssid, sch->schid.sch_no);
82 sch->isc = CHSC_SCH_ISC;
83 private = kzalloc(sizeof(*private), GFP_KERNEL);
86 dev_set_drvdata(&sch->dev, private);
87 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
89 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
90 sch->schid.ssid, sch->schid.sch_no, ret);
91 dev_set_drvdata(&sch->dev, NULL);
94 if (dev_get_uevent_suppress(&sch->dev)) {
95 dev_set_uevent_suppress(&sch->dev, 0);
96 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
102 static int chsc_subchannel_remove(struct subchannel *sch)
104 struct chsc_private *private;
106 cio_disable_subchannel(sch);
107 private = dev_get_drvdata(&sch->dev);
108 dev_set_drvdata(&sch->dev, NULL);
109 if (private->request) {
110 complete(&private->request->completion);
111 put_device(&sch->dev);
117 static void chsc_subchannel_shutdown(struct subchannel *sch)
119 cio_disable_subchannel(sch);
122 static int chsc_subchannel_prepare(struct subchannel *sch)
127 * Don't allow suspend while the subchannel is not idle
128 * since we don't have a way to clear the subchannel and
129 * cannot disable it with a request running.
131 cc = stsch_err(sch->schid, &schib);
132 if (!cc && scsw_stctl(&schib.scsw))
137 static int chsc_subchannel_freeze(struct subchannel *sch)
139 return cio_disable_subchannel(sch);
142 static int chsc_subchannel_restore(struct subchannel *sch)
144 return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
147 static struct css_device_id chsc_subchannel_ids[] = {
148 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
149 { /* end of list */ },
151 MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
153 static struct css_driver chsc_subchannel_driver = {
155 .owner = THIS_MODULE,
156 .name = "chsc_subchannel",
158 .subchannel_type = chsc_subchannel_ids,
159 .irq = chsc_subchannel_irq,
160 .probe = chsc_subchannel_probe,
161 .remove = chsc_subchannel_remove,
162 .shutdown = chsc_subchannel_shutdown,
163 .prepare = chsc_subchannel_prepare,
164 .freeze = chsc_subchannel_freeze,
165 .thaw = chsc_subchannel_restore,
166 .restore = chsc_subchannel_restore,
169 static int __init chsc_init_dbfs(void)
171 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
173 if (!chsc_debug_msg_id)
175 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
176 debug_set_level(chsc_debug_msg_id, 2);
177 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
178 if (!chsc_debug_log_id)
180 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
181 debug_set_level(chsc_debug_log_id, 2);
184 if (chsc_debug_msg_id)
185 debug_unregister(chsc_debug_msg_id);
189 static void chsc_remove_dbfs(void)
191 debug_unregister(chsc_debug_log_id);
192 debug_unregister(chsc_debug_msg_id);
195 static int __init chsc_init_sch_driver(void)
197 return css_driver_register(&chsc_subchannel_driver);
200 static void chsc_cleanup_sch_driver(void)
202 css_driver_unregister(&chsc_subchannel_driver);
205 static DEFINE_SPINLOCK(chsc_lock);
207 static int chsc_subchannel_match_next_free(struct device *dev, void *data)
209 struct subchannel *sch = to_subchannel(dev);
211 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
214 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
218 dev = driver_find_device(&chsc_subchannel_driver.drv,
219 sch ? &sch->dev : NULL, NULL,
220 chsc_subchannel_match_next_free);
221 return dev ? to_subchannel(dev) : NULL;
225 * chsc_async() - try to start a chsc request asynchronously
226 * @chsc_area: request to be started
227 * @request: request structure to associate
229 * Tries to start a chsc request on one of the existing chsc subchannels.
231 * %0 if the request was performed synchronously
232 * %-EINPROGRESS if the request was successfully started
233 * %-EBUSY if all chsc subchannels are busy
234 * %-ENODEV if no chsc subchannels are available
236 * interrupts disabled, chsc_lock held
238 static int chsc_async(struct chsc_async_area *chsc_area,
239 struct chsc_request *request)
242 struct chsc_private *private;
243 struct subchannel *sch = NULL;
247 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
248 while ((sch = chsc_get_next_subchannel(sch))) {
249 spin_lock(sch->lock);
250 private = dev_get_drvdata(&sch->dev);
251 if (private->request) {
252 spin_unlock(sch->lock);
256 chsc_area->header.sid = sch->schid;
257 CHSC_LOG(2, "schid");
258 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
259 cc = chsc(chsc_area);
260 sprintf(dbf, "cc:%d", cc);
267 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
269 private->request = request;
277 spin_unlock(sch->lock);
278 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
279 sch->schid.ssid, sch->schid.sch_no, cc);
280 if (ret == -EINPROGRESS)
282 put_device(&sch->dev);
289 static void chsc_log_command(struct chsc_async_area *chsc_area)
293 sprintf(dbf, "CHSC:%x", chsc_area->header.code);
295 CHSC_LOG_HEX(0, chsc_area, 32);
298 static int chsc_examine_irb(struct chsc_request *request)
302 if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
304 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
305 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
306 if (scsw_cstat(&request->irb.scsw) == 0)
310 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
312 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
314 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
316 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
321 static int chsc_ioctl_start(void __user *user_area)
323 struct chsc_request *request;
324 struct chsc_async_area *chsc_area;
328 if (!css_general_characteristics.dynio)
329 /* It makes no sense to try. */
331 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
334 request = kzalloc(sizeof(*request), GFP_KERNEL);
339 init_completion(&request->completion);
340 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
344 chsc_log_command(chsc_area);
345 spin_lock_irq(&chsc_lock);
346 ret = chsc_async(chsc_area, request);
347 spin_unlock_irq(&chsc_lock);
348 if (ret == -EINPROGRESS) {
349 wait_for_completion(&request->completion);
350 ret = chsc_examine_irb(request);
352 /* copy area back to user */
354 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
357 sprintf(dbf, "ret:%d", ret);
360 free_page((unsigned long)chsc_area);
364 static int chsc_ioctl_info_channel_path(void __user *user_cd)
366 struct chsc_chp_cd *cd;
369 struct chsc_header request;
380 struct chsc_header response;
381 u8 data[PAGE_SIZE - 20];
382 } __attribute__ ((packed)) *scpcd_area;
384 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
387 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
392 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
396 scpcd_area->request.length = 0x0010;
397 scpcd_area->request.code = 0x0028;
398 scpcd_area->m = cd->m;
399 scpcd_area->fmt1 = cd->fmt;
400 scpcd_area->cssid = cd->chpid.cssid;
401 scpcd_area->first_chpid = cd->chpid.id;
402 scpcd_area->last_chpid = cd->chpid.id;
404 ccode = chsc(scpcd_area);
409 if (scpcd_area->response.code != 0x0001) {
411 CHSC_MSG(0, "scpcd: response code=%x\n",
412 scpcd_area->response.code);
415 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
416 if (copy_to_user(user_cd, cd, sizeof(*cd)))
422 free_page((unsigned long)scpcd_area);
426 static int chsc_ioctl_info_cu(void __user *user_cd)
428 struct chsc_cu_cd *cd;
431 struct chsc_header request;
442 struct chsc_header response;
443 u8 data[PAGE_SIZE - 20];
444 } __attribute__ ((packed)) *scucd_area;
446 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
449 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
454 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
458 scucd_area->request.length = 0x0010;
459 scucd_area->request.code = 0x0028;
460 scucd_area->m = cd->m;
461 scucd_area->fmt1 = cd->fmt;
462 scucd_area->cssid = cd->cssid;
463 scucd_area->first_cun = cd->cun;
464 scucd_area->last_cun = cd->cun;
466 ccode = chsc(scucd_area);
471 if (scucd_area->response.code != 0x0001) {
473 CHSC_MSG(0, "scucd: response code=%x\n",
474 scucd_area->response.code);
477 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
478 if (copy_to_user(user_cd, cd, sizeof(*cd)))
484 free_page((unsigned long)scucd_area);
488 static int chsc_ioctl_info_sch_cu(void __user *user_cud)
490 struct chsc_sch_cud *cud;
493 struct chsc_header request;
505 struct chsc_header response;
506 u8 data[PAGE_SIZE - 20];
507 } __attribute__ ((packed)) *sscud_area;
509 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
512 cud = kzalloc(sizeof(*cud), GFP_KERNEL);
517 if (copy_from_user(cud, user_cud, sizeof(*cud))) {
521 sscud_area->request.length = 0x0010;
522 sscud_area->request.code = 0x0006;
523 sscud_area->m = cud->schid.m;
524 sscud_area->fmt1 = cud->fmt;
525 sscud_area->ssid = cud->schid.ssid;
526 sscud_area->first_sch = cud->schid.sch_no;
527 sscud_area->cssid = cud->schid.cssid;
528 sscud_area->last_sch = cud->schid.sch_no;
530 ccode = chsc(sscud_area);
535 if (sscud_area->response.code != 0x0001) {
537 CHSC_MSG(0, "sscud: response code=%x\n",
538 sscud_area->response.code);
541 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
542 if (copy_to_user(user_cud, cud, sizeof(*cud)))
548 free_page((unsigned long)sscud_area);
552 static int chsc_ioctl_conf_info(void __user *user_ci)
554 struct chsc_conf_info *ci;
557 struct chsc_header request;
567 struct chsc_header response;
568 u8 data[PAGE_SIZE - 20];
569 } __attribute__ ((packed)) *sci_area;
571 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
574 ci = kzalloc(sizeof(*ci), GFP_KERNEL);
579 if (copy_from_user(ci, user_ci, sizeof(*ci))) {
583 sci_area->request.length = 0x0010;
584 sci_area->request.code = 0x0012;
585 sci_area->m = ci->id.m;
586 sci_area->fmt1 = ci->fmt;
587 sci_area->cssid = ci->id.cssid;
588 sci_area->ssid = ci->id.ssid;
590 ccode = chsc(sci_area);
595 if (sci_area->response.code != 0x0001) {
597 CHSC_MSG(0, "sci: response code=%x\n",
598 sci_area->response.code);
601 memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
602 if (copy_to_user(user_ci, ci, sizeof(*ci)))
608 free_page((unsigned long)sci_area);
612 static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
614 struct chsc_comp_list *ccl;
617 struct chsc_header request;
625 struct chsc_header response;
626 u8 data[PAGE_SIZE - 36];
627 } __attribute__ ((packed)) *sccl_area;
634 } __attribute__ ((packed)) *chpid_parm;
640 } __attribute__ ((packed)) *cssids_parm;
642 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
645 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
650 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
654 sccl_area->request.length = 0x0020;
655 sccl_area->request.code = 0x0030;
656 sccl_area->fmt = ccl->req.fmt;
657 sccl_area->ctype = ccl->req.ctype;
658 switch (sccl_area->ctype) {
661 chpid_parm = (void *)&sccl_area->list_parm;
662 chpid_parm->m = ccl->req.chpid.m;
663 chpid_parm->cssid = ccl->req.chpid.chp.cssid;
664 chpid_parm->chpid = ccl->req.chpid.chp.id;
667 case CCL_CSS_IMG_CONF_CHAR:
668 cssids_parm = (void *)&sccl_area->list_parm;
669 cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
670 cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
673 ccode = chsc(sccl_area);
678 if (sccl_area->response.code != 0x0001) {
680 CHSC_MSG(0, "sccl: response code=%x\n",
681 sccl_area->response.code);
684 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
685 if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
691 free_page((unsigned long)sccl_area);
695 static int chsc_ioctl_chpd(void __user *user_chpd)
697 struct chsc_scpd *scpd_area;
698 struct chsc_cpd_info *chpd;
701 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
702 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
703 if (!scpd_area || !chpd) {
707 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
711 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
712 chpd->rfmt, chpd->c, chpd->m,
716 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
717 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
721 free_page((unsigned long)scpd_area);
725 static int chsc_ioctl_dcal(void __user *user_dcal)
727 struct chsc_dcal *dcal;
730 struct chsc_header request;
738 struct chsc_header response;
739 u8 data[PAGE_SIZE - 36];
740 } __attribute__ ((packed)) *sdcal_area;
742 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
745 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
750 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
754 sdcal_area->request.length = 0x0020;
755 sdcal_area->request.code = 0x0034;
756 sdcal_area->atype = dcal->req.atype;
757 sdcal_area->fmt = dcal->req.fmt;
758 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
759 sizeof(sdcal_area->list_parm));
761 ccode = chsc(sdcal_area);
766 if (sdcal_area->response.code != 0x0001) {
768 CHSC_MSG(0, "sdcal: response code=%x\n",
769 sdcal_area->response.code);
772 memcpy(&dcal->sdcal, &sdcal_area->response,
773 sdcal_area->response.length);
774 if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
780 free_page((unsigned long)sdcal_area);
784 static long chsc_ioctl(struct file *filp, unsigned int cmd,
789 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
790 if (is_compat_task())
791 argp = compat_ptr(arg);
793 argp = (void __user *)arg;
796 return chsc_ioctl_start(argp);
797 case CHSC_INFO_CHANNEL_PATH:
798 return chsc_ioctl_info_channel_path(argp);
800 return chsc_ioctl_info_cu(argp);
801 case CHSC_INFO_SCH_CU:
802 return chsc_ioctl_info_sch_cu(argp);
804 return chsc_ioctl_conf_info(argp);
806 return chsc_ioctl_conf_comp_list(argp);
808 return chsc_ioctl_chpd(argp);
810 return chsc_ioctl_dcal(argp);
811 default: /* unknown ioctl number */
816 static const struct file_operations chsc_fops = {
817 .owner = THIS_MODULE,
818 .open = nonseekable_open,
819 .unlocked_ioctl = chsc_ioctl,
820 .compat_ioctl = chsc_ioctl,
824 static struct miscdevice chsc_misc_device = {
825 .minor = MISC_DYNAMIC_MINOR,
830 static int __init chsc_misc_init(void)
832 return misc_register(&chsc_misc_device);
835 static void chsc_misc_cleanup(void)
837 misc_deregister(&chsc_misc_device);
840 static int __init chsc_sch_init(void)
844 ret = chsc_init_dbfs();
847 isc_register(CHSC_SCH_ISC);
848 ret = chsc_init_sch_driver();
851 ret = chsc_misc_init();
856 chsc_cleanup_sch_driver();
858 isc_unregister(CHSC_SCH_ISC);
863 static void __exit chsc_sch_exit(void)
866 chsc_cleanup_sch_driver();
867 isc_unregister(CHSC_SCH_ISC);
871 module_init(chsc_sch_init);
872 module_exit(chsc_sch_exit);