2 * S/390 common I/O routines -- low level i/o calls
4 * Copyright IBM Corp. 1999, 2008
5 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/ftrace.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/device.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
23 #include <asm/delay.h>
25 #include <asm/irq_regs.h>
26 #include <asm/setup.h>
27 #include <asm/reset.h>
29 #include <asm/chpid.h>
32 #include <linux/cputime.h>
41 #include "blacklist.h"
42 #include "cio_debug.h"
45 debug_info_t *cio_debug_msg_id;
46 debug_info_t *cio_debug_trace_id;
47 debug_info_t *cio_debug_crw_id;
49 DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb);
50 EXPORT_PER_CPU_SYMBOL(cio_irb);
53 * Function: cio_debug_init
54 * Initializes three debug logs for common I/O:
55 * - cio_msg logs generic cio messages
56 * - cio_trace logs the calling of different functions
57 * - cio_crw logs machine check related cio messages
59 static int __init cio_debug_init(void)
61 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
62 if (!cio_debug_msg_id)
64 debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
65 debug_set_level(cio_debug_msg_id, 2);
66 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
67 if (!cio_debug_trace_id)
69 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
70 debug_set_level(cio_debug_trace_id, 2);
71 cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
72 if (!cio_debug_crw_id)
74 debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
75 debug_set_level(cio_debug_crw_id, 4);
80 debug_unregister(cio_debug_msg_id);
81 if (cio_debug_trace_id)
82 debug_unregister(cio_debug_trace_id);
84 debug_unregister(cio_debug_crw_id);
88 arch_initcall (cio_debug_init);
90 int cio_set_options(struct subchannel *sch, int flags)
92 struct io_subchannel_private *priv = to_io_private(sch);
94 priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
95 priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
96 priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
101 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
110 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
111 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
114 if (cio_update_schib(sch))
117 sprintf(dbf_text, "no%s", dev_name(&sch->dev));
118 CIO_TRACE_EVENT(0, dbf_text);
119 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
121 return (sch->lpm ? -EACCES : -ENODEV);
125 cio_start_key (struct subchannel *sch, /* subchannel structure */
126 struct ccw1 * cpa, /* logical channel prog addr */
127 __u8 lpm, /* logical path mask */
128 __u8 key) /* storage key */
130 struct io_subchannel_private *priv = to_io_private(sch);
131 union orb *orb = &priv->orb;
134 CIO_TRACE_EVENT(5, "stIO");
135 CIO_TRACE_EVENT(5, dev_name(&sch->dev));
137 memset(orb, 0, sizeof(union orb));
138 /* sch is always under 2G. */
139 orb->cmd.intparm = (u32)(addr_t)sch;
142 orb->cmd.pfch = priv->options.prefetch == 0;
143 orb->cmd.spnd = priv->options.suspend;
144 orb->cmd.ssic = priv->options.suspend && priv->options.inter;
145 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
147 * for 64 bit we always support 64 bit IDAWs with 4k page size only
151 orb->cmd.key = key >> 4;
152 /* issue "Start Subchannel" */
153 orb->cmd.cpa = (__u32) __pa(cpa);
154 ccode = ssch(sch->schid, orb);
156 /* process condition code */
157 CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
162 * initialize device status information
164 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
166 case 1: /* status pending */
169 case 3: /* device/path not operational */
170 return cio_start_handle_notoper(sch, lpm);
177 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
179 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
183 * resume suspended I/O operation
186 cio_resume (struct subchannel *sch)
190 CIO_TRACE_EVENT(4, "resIO");
191 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
193 ccode = rsch (sch->schid);
195 CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
199 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
207 * useless to wait for request completion
208 * as device is no longer operational !
218 cio_halt(struct subchannel *sch)
225 CIO_TRACE_EVENT(2, "haltIO");
226 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
229 * Issue "Halt subchannel" and process condition code
231 ccode = hsch (sch->schid);
233 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
237 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
239 case 1: /* status pending */
242 default: /* device not operational */
248 * Clear I/O operation
251 cio_clear(struct subchannel *sch)
258 CIO_TRACE_EVENT(2, "clearIO");
259 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
262 * Issue "Clear subchannel" and process condition code
264 ccode = csch (sch->schid);
266 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
270 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
272 default: /* device not operational */
278 * Function: cio_cancel
279 * Issues a "Cancel Subchannel" on the specified subchannel
280 * Note: We don't need any fancy intparms and flags here
281 * since xsch is executed synchronously.
282 * Only for common I/O internal use as for now.
285 cio_cancel (struct subchannel *sch)
292 CIO_TRACE_EVENT(2, "cancelIO");
293 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
295 ccode = xsch (sch->schid);
297 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
300 case 0: /* success */
301 /* Update information in scsw. */
302 if (cio_update_schib(sch))
305 case 1: /* status pending */
307 case 2: /* not applicable */
309 default: /* not oper */
315 static void cio_apply_config(struct subchannel *sch, struct schib *schib)
317 schib->pmcw.intparm = sch->config.intparm;
318 schib->pmcw.mbi = sch->config.mbi;
319 schib->pmcw.isc = sch->config.isc;
320 schib->pmcw.ena = sch->config.ena;
321 schib->pmcw.mme = sch->config.mme;
322 schib->pmcw.mp = sch->config.mp;
323 schib->pmcw.csense = sch->config.csense;
324 schib->pmcw.mbfc = sch->config.mbfc;
325 if (sch->config.mbfc)
326 schib->mba = sch->config.mba;
329 static int cio_check_config(struct subchannel *sch, struct schib *schib)
331 return (schib->pmcw.intparm == sch->config.intparm) &&
332 (schib->pmcw.mbi == sch->config.mbi) &&
333 (schib->pmcw.isc == sch->config.isc) &&
334 (schib->pmcw.ena == sch->config.ena) &&
335 (schib->pmcw.mme == sch->config.mme) &&
336 (schib->pmcw.mp == sch->config.mp) &&
337 (schib->pmcw.csense == sch->config.csense) &&
338 (schib->pmcw.mbfc == sch->config.mbfc) &&
339 (!sch->config.mbfc || (schib->mba == sch->config.mba));
343 * cio_commit_config - apply configuration to the subchannel
345 int cio_commit_config(struct subchannel *sch)
347 int ccode, retry, ret = 0;
351 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
354 for (retry = 0; retry < 5; retry++) {
355 /* copy desired changes to local schib */
356 cio_apply_config(sch, &schib);
357 ccode = msch_err(sch->schid, &schib);
358 if (ccode < 0) /* -EIO if msch gets a program check. */
361 case 0: /* successful */
362 if (stsch_err(sch->schid, &schib) ||
363 !css_sch_is_valid(&schib))
365 if (cio_check_config(sch, &schib)) {
366 /* commit changes from local schib */
367 memcpy(&sch->schib, &schib, sizeof(schib));
372 case 1: /* status pending */
374 if (tsch(sch->schid, &irb))
378 udelay(100); /* allow for recovery */
381 case 3: /* not operational */
389 * cio_update_schib - Perform stsch and update schib if subchannel is valid.
390 * @sch: subchannel on which to perform stsch
391 * Return zero on success, -ENODEV otherwise.
393 int cio_update_schib(struct subchannel *sch)
397 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
400 memcpy(&sch->schib, &schib, sizeof(schib));
403 EXPORT_SYMBOL_GPL(cio_update_schib);
406 * cio_enable_subchannel - enable a subchannel.
407 * @sch: subchannel to be enabled
408 * @intparm: interruption parameter to set
410 int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
414 CIO_TRACE_EVENT(2, "ensch");
415 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
417 if (sch_is_pseudo_sch(sch))
419 if (cio_update_schib(sch))
423 sch->config.isc = sch->isc;
424 sch->config.intparm = intparm;
426 ret = cio_commit_config(sch);
429 * Got a program check in msch. Try without
430 * the concurrent sense bit the next time.
432 sch->config.csense = 0;
433 ret = cio_commit_config(sch);
435 CIO_HEX_EVENT(2, &ret, sizeof(ret));
438 EXPORT_SYMBOL_GPL(cio_enable_subchannel);
441 * cio_disable_subchannel - disable a subchannel.
442 * @sch: subchannel to disable
444 int cio_disable_subchannel(struct subchannel *sch)
448 CIO_TRACE_EVENT(2, "dissch");
449 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
451 if (sch_is_pseudo_sch(sch))
453 if (cio_update_schib(sch))
457 ret = cio_commit_config(sch);
459 CIO_HEX_EVENT(2, &ret, sizeof(ret));
462 EXPORT_SYMBOL_GPL(cio_disable_subchannel);
464 static int cio_check_devno_blacklisted(struct subchannel *sch)
466 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
468 * This device must not be known to Linux. So we simply
469 * say that there is no device and return ENODEV.
471 CIO_MSG_EVENT(6, "Blacklisted device detected "
472 "at devno %04X, subchannel set %x\n",
473 sch->schib.pmcw.dev, sch->schid.ssid);
480 * cio_validate_subchannel - basic validation of subchannel
481 * @sch: subchannel structure to be filled out
482 * @schid: subchannel id
484 * Find out subchannel type and initialize struct subchannel.
487 * -ENXIO for non-defined subchannels
488 * -ENODEV for invalid subchannels or blacklisted devices
489 * -EIO for subchannels in an invalid subchannel set
491 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
497 sprintf(dbf_txt, "valsch%x", schid.sch_no);
498 CIO_TRACE_EVENT(4, dbf_txt);
501 * The first subchannel that is not-operational (ccode==3)
502 * indicates that there aren't any more devices available.
503 * If stsch gets an exception, it means the current subchannel set
506 ccode = stsch_err(schid, &sch->schib);
508 err = (ccode == 3) ? -ENXIO : ccode;
511 sch->st = sch->schib.pmcw.st;
515 case SUBCHANNEL_TYPE_IO:
516 case SUBCHANNEL_TYPE_MSG:
517 if (!css_sch_is_valid(&sch->schib))
520 err = cio_check_devno_blacklisted(sch);
528 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
529 sch->schid.ssid, sch->schid.sch_no, sch->st);
535 * do_cio_interrupt() handles all normal I/O device IRQ's
537 static irqreturn_t do_cio_interrupt(int irq, void *dummy)
539 struct tpi_info *tpi_info;
540 struct subchannel *sch;
543 set_cpu_flag(CIF_NOHZ_DELAY);
544 tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
545 irb = this_cpu_ptr(&cio_irb);
546 sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
548 /* Clear pending interrupt condition. */
549 inc_irq_stat(IRQIO_CIO);
550 tsch(tpi_info->schid, irb);
553 spin_lock(sch->lock);
554 /* Store interrupt response block to lowcore. */
555 if (tsch(tpi_info->schid, irb) == 0) {
556 /* Keep subchannel information word up to date. */
557 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
558 /* Call interrupt handler if there is one. */
559 if (sch->driver && sch->driver->irq)
560 sch->driver->irq(sch);
562 inc_irq_stat(IRQIO_CIO);
564 inc_irq_stat(IRQIO_CIO);
565 spin_unlock(sch->lock);
570 static struct irqaction io_interrupt = {
572 .handler = do_cio_interrupt,
575 void __init init_cio_interrupts(void)
577 irq_set_chip_and_handler(IO_INTERRUPT,
578 &dummy_irq_chip, handle_percpu_irq);
579 setup_irq(IO_INTERRUPT, &io_interrupt);
582 #ifdef CONFIG_CCW_CONSOLE
583 static struct subchannel *console_sch;
584 static struct lock_class_key console_sch_key;
587 * Use cio_tsch to update the subchannel status and call the interrupt handler
588 * if status had been pending. Called with the subchannel's lock held.
590 void cio_tsch(struct subchannel *sch)
595 irb = this_cpu_ptr(&cio_irb);
596 /* Store interrupt response block to lowcore. */
597 if (tsch(sch->schid, irb) != 0)
598 /* Not status pending or not operational. */
600 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
601 /* Call interrupt handler with updated status. */
602 irq_context = in_interrupt();
607 kstat_incr_irq_this_cpu(IO_INTERRUPT);
608 if (sch->driver && sch->driver->irq)
609 sch->driver->irq(sch);
611 inc_irq_stat(IRQIO_CIO);
618 static int cio_test_for_console(struct subchannel_id schid, void *data)
622 if (stsch_err(schid, &schib) != 0)
624 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
625 (schib.pmcw.dev == console_devno)) {
626 console_irq = schid.sch_no;
627 return 1; /* found */
632 static int cio_get_console_sch_no(void)
634 struct subchannel_id schid;
637 init_subchannel_id(&schid);
638 if (console_irq != -1) {
639 /* VM provided us with the irq number of the console. */
640 schid.sch_no = console_irq;
641 if (stsch_err(schid, &schib) != 0 ||
642 (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
644 console_devno = schib.pmcw.dev;
645 } else if (console_devno != -1) {
646 /* At least the console device number is known. */
647 for_each_subchannel(cio_test_for_console, NULL);
652 struct subchannel *cio_probe_console(void)
654 struct subchannel_id schid;
655 struct subchannel *sch;
658 sch_no = cio_get_console_sch_no();
660 pr_warning("No CCW console was found\n");
661 return ERR_PTR(-ENODEV);
663 init_subchannel_id(&schid);
664 schid.sch_no = sch_no;
665 sch = css_alloc_subchannel(schid);
669 lockdep_set_class(sch->lock, &console_sch_key);
670 isc_register(CONSOLE_ISC);
671 sch->config.isc = CONSOLE_ISC;
672 sch->config.intparm = (u32)(addr_t)sch;
673 ret = cio_commit_config(sch);
675 isc_unregister(CONSOLE_ISC);
676 put_device(&sch->dev);
683 int cio_is_console(struct subchannel_id schid)
687 return schid_equal(&schid, &console_sch->schid);
690 void cio_register_early_subchannels(void)
697 ret = css_register_subchannel(console_sch);
699 put_device(&console_sch->dev);
701 #endif /* CONFIG_CCW_CONSOLE */
704 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
709 for (retry=0;retry<3;retry++) {
711 cc = msch_err(schid, schib);
713 return (cc==3?-ENODEV:-EBUSY);
714 if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
716 if (!schib->pmcw.ena)
719 return -EBUSY; /* uhm... */
723 __clear_io_subchannel_easy(struct subchannel_id schid)
729 for (retry=0;retry<20;retry++) {
733 tsch(ti.schid, this_cpu_ptr(&cio_irb));
734 if (schid_equal(&ti.schid, &schid))
742 static void __clear_chsc_subchannel_easy(void)
744 /* It seems we can only wait for a bit here :/ */
748 static int pgm_check_occured;
750 static void cio_reset_pgm_check_handler(void)
752 pgm_check_occured = 1;
755 static int stsch_reset(struct subchannel_id schid, struct schib *addr)
759 pgm_check_occured = 0;
760 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
761 rc = stsch_err(schid, addr);
762 s390_base_pgm_handler_fn = NULL;
764 /* The program check handler could have changed pgm_check_occured. */
767 if (pgm_check_occured)
773 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
777 if (stsch_reset(schid, &schib))
781 switch(__disable_subchannel_easy(schid, &schib)) {
785 default: /* -EBUSY */
786 switch (schib.pmcw.st) {
787 case SUBCHANNEL_TYPE_IO:
788 if (__clear_io_subchannel_easy(schid))
789 goto out; /* give up... */
791 case SUBCHANNEL_TYPE_CHSC:
792 __clear_chsc_subchannel_easy();
795 /* No default clear strategy */
798 stsch_err(schid, &schib);
799 __disable_subchannel_easy(schid, &schib);
805 static atomic_t chpid_reset_count;
807 static void s390_reset_chpids_mcck_handler(void)
812 /* Check for pending channel report word. */
813 mci.val = S390_lowcore.mcck_interruption_code;
816 /* Process channel report words. */
817 while (stcrw(&crw) == 0) {
818 /* Check for responses to RCHP. */
819 if (crw.slct && crw.rsc == CRW_RSC_CPATH)
820 atomic_dec(&chpid_reset_count);
824 #define RCHP_TIMEOUT (30 * USEC_PER_SEC)
825 static void css_reset(void)
828 unsigned long long timeout;
831 /* Reset subchannels. */
832 for_each_subchannel(__shutdown_subchannel_easy, NULL);
833 /* Reset channel paths. */
834 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
835 /* Enable channel report machine checks. */
836 __ctl_set_bit(14, 28);
837 /* Temporarily reenable machine checks. */
840 for (i = 0; i <= __MAX_CHPID; i++) {
843 if ((ret == 0) || (ret == 2))
845 * rchp either succeeded, or another rchp is already
846 * in progress. In either case, we'll get a crw.
848 atomic_inc(&chpid_reset_count);
850 /* Wait for machine check for all channel paths. */
851 timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
852 while (atomic_read(&chpid_reset_count) != 0) {
853 if (get_tod_clock_fast() > timeout)
857 /* Disable machine checks again. */
858 local_mcck_disable();
859 /* Disable channel report machine checks. */
860 __ctl_clear_bit(14, 28);
861 s390_base_mcck_handler_fn = NULL;
864 static struct reset_call css_reset_call = {
868 static int __init init_css_reset_call(void)
870 atomic_set(&chpid_reset_count, 0);
871 register_reset_call(&css_reset_call);
875 arch_initcall(init_css_reset_call);
877 struct sch_match_id {
878 struct subchannel_id schid;
879 struct ccw_dev_id devid;
883 static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
886 struct sch_match_id *match_id = data;
888 if (stsch_reset(schid, &schib))
890 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
891 (schib.pmcw.dev == match_id->devid.devno) &&
892 (schid.ssid == match_id->devid.ssid)) {
893 match_id->schid = schid;
900 static int reipl_find_schid(struct ccw_dev_id *devid,
901 struct subchannel_id *schid)
903 struct sch_match_id match_id;
905 match_id.devid = *devid;
906 match_id.rc = -ENODEV;
907 for_each_subchannel(__reipl_subchannel_match, &match_id);
908 if (match_id.rc == 0)
909 *schid = match_id.schid;
913 extern void do_reipl_asm(__u32 schid);
915 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
916 void reipl_ccw_dev(struct ccw_dev_id *devid)
918 struct subchannel_id uninitialized_var(schid);
920 s390_reset_system(NULL, NULL, NULL);
921 if (reipl_find_schid(devid, &schid) != 0)
922 panic("IPL Device not found\n");
923 do_reipl_asm(*((__u32*)&schid));
926 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
928 struct subchannel_id schid;
931 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
934 if (stsch_err(schid, &schib))
936 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
940 iplinfo->devno = schib.pmcw.dev;
941 iplinfo->is_qdio = schib.pmcw.qf;
946 * cio_tm_start_key - perform start function
947 * @sch: subchannel on which to perform the start function
948 * @tcw: transport-command word to be started
949 * @lpm: mask of paths to use
950 * @key: storage key to use for storage access
952 * Start the tcw on the given subchannel. Return zero on success, non-zero
955 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
958 union orb *orb = &to_io_private(sch)->orb;
960 memset(orb, 0, sizeof(union orb));
961 orb->tm.intparm = (u32) (addr_t) sch;
962 orb->tm.key = key >> 4;
964 orb->tm.lpm = lpm ? lpm : sch->lpm;
965 orb->tm.tcw = (u32) (addr_t) tcw;
966 cc = ssch(sch->schid, orb);
974 return cio_start_handle_notoper(sch, lpm);
979 * cio_tm_intrg - perform interrogate function
980 * @sch - subchannel on which to perform the interrogate function
982 * If the specified subchannel is running in transport-mode, perform the
983 * interrogate function. Return zero on success, non-zero otherwie.
985 int cio_tm_intrg(struct subchannel *sch)
989 if (!to_io_private(sch)->orb.tm.b)
991 cc = xsch(sch->schid);