]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/s390/cio/device_fsm.c
7665000e8dfe047de5cadb5d2076420d4909b36c
[linux-beck.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/jiffies.h>
14 #include <linux/string.h>
15
16 #include <asm/ccwdev.h>
17 #include <asm/cio.h>
18
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "css.h"
22 #include "device.h"
23 #include "chsc.h"
24 #include "ioasm.h"
25
26 int
27 device_is_online(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_ONLINE);
35 }
36
37 int
38 device_is_disconnected(struct subchannel *sch)
39 {
40         struct ccw_device *cdev;
41
42         if (!sch->dev.driver_data)
43                 return 0;
44         cdev = sch->dev.driver_data;
45         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
46                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
47 }
48
49 void
50 device_set_disconnected(struct subchannel *sch)
51 {
52         struct ccw_device *cdev;
53
54         if (!sch->dev.driver_data)
55                 return;
56         cdev = sch->dev.driver_data;
57         ccw_device_set_timeout(cdev, 0);
58         cdev->private->flags.fake_irb = 0;
59         cdev->private->state = DEV_STATE_DISCONNECTED;
60 }
61
62 int device_trigger_verify(struct subchannel *sch)
63 {
64         struct ccw_device *cdev;
65
66         cdev = sch->dev.driver_data;
67         if (!cdev || !cdev->online)
68                 return -EINVAL;
69         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
70         return 0;
71 }
72
73 /*
74  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
75  */
76 static void
77 ccw_device_timeout(unsigned long data)
78 {
79         struct ccw_device *cdev;
80
81         cdev = (struct ccw_device *) data;
82         spin_lock_irq(cdev->ccwlock);
83         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
84         spin_unlock_irq(cdev->ccwlock);
85 }
86
87 /*
88  * Set timeout
89  */
90 void
91 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
92 {
93         if (expires == 0) {
94                 del_timer(&cdev->private->timer);
95                 return;
96         }
97         if (timer_pending(&cdev->private->timer)) {
98                 if (mod_timer(&cdev->private->timer, jiffies + expires))
99                         return;
100         }
101         cdev->private->timer.function = ccw_device_timeout;
102         cdev->private->timer.data = (unsigned long) cdev;
103         cdev->private->timer.expires = jiffies + expires;
104         add_timer(&cdev->private->timer);
105 }
106
107 /* Kill any pending timers after machine check. */
108 void
109 device_kill_pending_timer(struct subchannel *sch)
110 {
111         struct ccw_device *cdev;
112
113         if (!sch->dev.driver_data)
114                 return;
115         cdev = sch->dev.driver_data;
116         ccw_device_set_timeout(cdev, 0);
117 }
118
119 /*
120  * Cancel running i/o. This is called repeatedly since halt/clear are
121  * asynchronous operations. We do one try with cio_cancel, two tries
122  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
123  * Returns 0 if device now idle, -ENODEV for device not operational and
124  * -EBUSY if an interrupt is expected (either from halt/clear or from a
125  * status pending).
126  */
127 int
128 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
129 {
130         struct subchannel *sch;
131         int ret;
132
133         sch = to_subchannel(cdev->dev.parent);
134         ret = stsch(sch->schid, &sch->schib);
135         if (ret || !sch->schib.pmcw.dnv)
136                 return -ENODEV; 
137         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
138                 /* Not operational or no activity -> done. */
139                 return 0;
140         /* Stage 1: cancel io. */
141         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
142             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
143                 ret = cio_cancel(sch);
144                 if (ret != -EINVAL)
145                         return ret;
146                 /* cancel io unsuccessful. From now on it is asynchronous. */
147                 cdev->private->iretry = 3;      /* 3 halt retries. */
148         }
149         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
150                 /* Stage 2: halt io. */
151                 if (cdev->private->iretry) {
152                         cdev->private->iretry--;
153                         ret = cio_halt(sch);
154                         if (ret != -EBUSY)
155                                 return (ret == 0) ? -EBUSY : ret;
156                 }
157                 /* halt io unsuccessful. */
158                 cdev->private->iretry = 255;    /* 255 clear retries. */
159         }
160         /* Stage 3: clear io. */
161         if (cdev->private->iretry) {
162                 cdev->private->iretry--;
163                 ret = cio_clear (sch);
164                 return (ret == 0) ? -EBUSY : ret;
165         }
166         panic("Can't stop i/o on subchannel.\n");
167 }
168
169 static int
170 ccw_device_handle_oper(struct ccw_device *cdev)
171 {
172         struct subchannel *sch;
173
174         sch = to_subchannel(cdev->dev.parent);
175         cdev->private->flags.recog_done = 1;
176         /*
177          * Check if cu type and device type still match. If
178          * not, it is certainly another device and we have to
179          * de- and re-register. Also check here for non-matching devno.
180          */
181         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
182             cdev->id.cu_model != cdev->private->senseid.cu_model ||
183             cdev->id.dev_type != cdev->private->senseid.dev_type ||
184             cdev->id.dev_model != cdev->private->senseid.dev_model ||
185             cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
186                 PREPARE_WORK(&cdev->private->kick_work,
187                              ccw_device_do_unreg_rereg, cdev);
188                 queue_work(ccw_device_work, &cdev->private->kick_work);
189                 return 0;
190         }
191         cdev->private->flags.donotify = 1;
192         return 1;
193 }
194
195 /*
196  * The machine won't give us any notification by machine check if a chpid has
197  * been varied online on the SE so we have to find out by magic (i. e. driving
198  * the channel subsystem to device selection and updating our path masks).
199  */
200 static inline void
201 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
202 {
203         int mask, i;
204
205         for (i = 0; i<8; i++) {
206                 mask = 0x80 >> i;
207                 if (!(sch->lpm & mask))
208                         continue;
209                 if (old_lpm & mask)
210                         continue;
211                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
212         }
213 }
214
215 /*
216  * Stop device recognition.
217  */
218 static void
219 ccw_device_recog_done(struct ccw_device *cdev, int state)
220 {
221         struct subchannel *sch;
222         int notify, old_lpm, same_dev;
223
224         sch = to_subchannel(cdev->dev.parent);
225
226         ccw_device_set_timeout(cdev, 0);
227         cio_disable_subchannel(sch);
228         /*
229          * Now that we tried recognition, we have performed device selection
230          * through ssch() and the path information is up to date.
231          */
232         old_lpm = sch->lpm;
233         stsch(sch->schid, &sch->schib);
234         sch->lpm = sch->schib.pmcw.pam & sch->opm;
235         /* Check since device may again have become not operational. */
236         if (!sch->schib.pmcw.dnv)
237                 state = DEV_STATE_NOT_OPER;
238         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
239                 /* Force reprobe on all chpids. */
240                 old_lpm = 0;
241         if (sch->lpm != old_lpm)
242                 __recover_lost_chpids(sch, old_lpm);
243         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
244                 if (state == DEV_STATE_NOT_OPER) {
245                         cdev->private->flags.recog_done = 1;
246                         cdev->private->state = DEV_STATE_DISCONNECTED;
247                         return;
248                 }
249                 /* Boxed devices don't need extra treatment. */
250         }
251         notify = 0;
252         same_dev = 0; /* Keep the compiler quiet... */
253         switch (state) {
254         case DEV_STATE_NOT_OPER:
255                 CIO_DEBUG(KERN_WARNING, 2,
256                           "SenseID : unknown device %04x on subchannel "
257                           "0.%x.%04x\n", cdev->private->dev_id.devno,
258                           sch->schid.ssid, sch->schid.sch_no);
259                 break;
260         case DEV_STATE_OFFLINE:
261                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
262                         same_dev = ccw_device_handle_oper(cdev);
263                         notify = 1;
264                 }
265                 /* fill out sense information */
266                 memset(&cdev->id, 0, sizeof(cdev->id));
267                 cdev->id.cu_type   = cdev->private->senseid.cu_type;
268                 cdev->id.cu_model  = cdev->private->senseid.cu_model;
269                 cdev->id.dev_type  = cdev->private->senseid.dev_type;
270                 cdev->id.dev_model = cdev->private->senseid.dev_model;
271                 if (notify) {
272                         cdev->private->state = DEV_STATE_OFFLINE;
273                         if (same_dev) {
274                                 /* Get device online again. */
275                                 ccw_device_online(cdev);
276                                 wake_up(&cdev->private->wait_q);
277                         }
278                         return;
279                 }
280                 /* Issue device info message. */
281                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
282                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
283                           "%04X/%02X\n",
284                           cdev->private->dev_id.ssid,
285                           cdev->private->dev_id.devno,
286                           cdev->id.cu_type, cdev->id.cu_model,
287                           cdev->id.dev_type, cdev->id.dev_model);
288                 break;
289         case DEV_STATE_BOXED:
290                 CIO_DEBUG(KERN_WARNING, 2,
291                           "SenseID : boxed device %04x on subchannel "
292                           "0.%x.%04x\n", cdev->private->dev_id.devno,
293                           sch->schid.ssid, sch->schid.sch_no);
294                 break;
295         }
296         cdev->private->state = state;
297         io_subchannel_recog_done(cdev);
298         if (state != DEV_STATE_NOT_OPER)
299                 wake_up(&cdev->private->wait_q);
300 }
301
302 /*
303  * Function called from device_id.c after sense id has completed.
304  */
305 void
306 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
307 {
308         switch (err) {
309         case 0:
310                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
311                 break;
312         case -ETIME:            /* Sense id stopped by timeout. */
313                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
314                 break;
315         default:
316                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
317                 break;
318         }
319 }
320
321 static void
322 ccw_device_oper_notify(void *data)
323 {
324         struct ccw_device *cdev;
325         struct subchannel *sch;
326         int ret;
327
328         cdev = data;
329         sch = to_subchannel(cdev->dev.parent);
330         ret = (sch->driver && sch->driver->notify) ?
331                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
332         if (!ret)
333                 /* Driver doesn't want device back. */
334                 ccw_device_do_unreg_rereg(cdev);
335         else {
336                 /* Reenable channel measurements, if needed. */
337                 cmf_reenable(cdev);
338                 wake_up(&cdev->private->wait_q);
339         }
340 }
341
342 /*
343  * Finished with online/offline processing.
344  */
345 static void
346 ccw_device_done(struct ccw_device *cdev, int state)
347 {
348         struct subchannel *sch;
349
350         sch = to_subchannel(cdev->dev.parent);
351
352         ccw_device_set_timeout(cdev, 0);
353
354         if (state != DEV_STATE_ONLINE)
355                 cio_disable_subchannel(sch);
356
357         /* Reset device status. */
358         memset(&cdev->private->irb, 0, sizeof(struct irb));
359
360         cdev->private->state = state;
361
362
363         if (state == DEV_STATE_BOXED)
364                 CIO_DEBUG(KERN_WARNING, 2,
365                           "Boxed device %04x on subchannel %04x\n",
366                           cdev->private->dev_id.devno, sch->schid.sch_no);
367
368         if (cdev->private->flags.donotify) {
369                 cdev->private->flags.donotify = 0;
370                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
371                              cdev);
372                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
373         }
374         wake_up(&cdev->private->wait_q);
375
376         if (css_init_done && state != DEV_STATE_ONLINE)
377                 put_device (&cdev->dev);
378 }
379
380 static inline int cmp_pgid(struct pgid *p1, struct pgid *p2)
381 {
382         char *c1;
383         char *c2;
384
385         c1 = (char *)p1;
386         c2 = (char *)p2;
387
388         return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
389 }
390
391 static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
392 {
393         int i;
394         int last;
395
396         last = 0;
397         for (i = 0; i < 8; i++) {
398                 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
399                         /* No PGID yet */
400                         continue;
401                 if (cdev->private->pgid[last].inf.ps.state1 ==
402                     SNID_STATE1_RESET) {
403                         /* First non-zero PGID */
404                         last = i;
405                         continue;
406                 }
407                 if (cmp_pgid(&cdev->private->pgid[i],
408                              &cdev->private->pgid[last]) == 0)
409                         /* Non-conflicting PGIDs */
410                         continue;
411
412                 /* PGID mismatch, can't pathgroup. */
413                 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
414                               "0.%x.%04x, can't pathgroup\n",
415                               cdev->private->dev_id.ssid,
416                               cdev->private->dev_id.devno);
417                 cdev->private->options.pgroup = 0;
418                 return;
419         }
420         if (cdev->private->pgid[last].inf.ps.state1 ==
421             SNID_STATE1_RESET)
422                 /* No previous pgid found */
423                 memcpy(&cdev->private->pgid[0], &css[0]->global_pgid,
424                        sizeof(struct pgid));
425         else
426                 /* Use existing pgid */
427                 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
428                        sizeof(struct pgid));
429 }
430
431 /*
432  * Function called from device_pgid.c after sense path ground has completed.
433  */
434 void
435 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
436 {
437         struct subchannel *sch;
438
439         sch = to_subchannel(cdev->dev.parent);
440         switch (err) {
441         case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
442                 cdev->private->options.pgroup = 0;
443                 break;
444         case 0: /* success */
445         case -EACCES: /* partial success, some paths not operational */
446                 /* Check if all pgids are equal or 0. */
447                 __ccw_device_get_common_pgid(cdev);
448                 break;
449         case -ETIME:            /* Sense path group id stopped by timeout. */
450         case -EUSERS:           /* device is reserved for someone else. */
451                 ccw_device_done(cdev, DEV_STATE_BOXED);
452                 return;
453         default:
454                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
455                 return;
456         }
457         /* Start Path Group verification. */
458         cdev->private->state = DEV_STATE_VERIFY;
459         cdev->private->flags.doverify = 0;
460         ccw_device_verify_start(cdev);
461 }
462
463 /*
464  * Start device recognition.
465  */
466 int
467 ccw_device_recognition(struct ccw_device *cdev)
468 {
469         struct subchannel *sch;
470         int ret;
471
472         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
473             (cdev->private->state != DEV_STATE_BOXED))
474                 return -EINVAL;
475         sch = to_subchannel(cdev->dev.parent);
476         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
477         if (ret != 0)
478                 /* Couldn't enable the subchannel for i/o. Sick device. */
479                 return ret;
480
481         /* After 60s the device recognition is considered to have failed. */
482         ccw_device_set_timeout(cdev, 60*HZ);
483
484         /*
485          * We used to start here with a sense pgid to find out whether a device
486          * is locked by someone else. Unfortunately, the sense pgid command
487          * code has other meanings on devices predating the path grouping
488          * algorithm, so we start with sense id and box the device after an
489          * timeout (or if sense pgid during path verification detects the device
490          * is locked, as may happen on newer devices).
491          */
492         cdev->private->flags.recog_done = 0;
493         cdev->private->state = DEV_STATE_SENSE_ID;
494         ccw_device_sense_id_start(cdev);
495         return 0;
496 }
497
498 /*
499  * Handle timeout in device recognition.
500  */
501 static void
502 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
503 {
504         int ret;
505
506         ret = ccw_device_cancel_halt_clear(cdev);
507         switch (ret) {
508         case 0:
509                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
510                 break;
511         case -ENODEV:
512                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
513                 break;
514         default:
515                 ccw_device_set_timeout(cdev, 3*HZ);
516         }
517 }
518
519
520 static void
521 ccw_device_nopath_notify(void *data)
522 {
523         struct ccw_device *cdev;
524         struct subchannel *sch;
525         int ret;
526
527         cdev = data;
528         sch = to_subchannel(cdev->dev.parent);
529         /* Extra sanity. */
530         if (sch->lpm)
531                 return;
532         ret = (sch->driver && sch->driver->notify) ?
533                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
534         if (!ret) {
535                 if (get_device(&sch->dev)) {
536                         /* Driver doesn't want to keep device. */
537                         cio_disable_subchannel(sch);
538                         if (get_device(&cdev->dev)) {
539                                 PREPARE_WORK(&cdev->private->kick_work,
540                                              ccw_device_call_sch_unregister,
541                                              cdev);
542                                 queue_work(ccw_device_work,
543                                            &cdev->private->kick_work);
544                         } else
545                                 put_device(&sch->dev);
546                 }
547         } else {
548                 cio_disable_subchannel(sch);
549                 ccw_device_set_timeout(cdev, 0);
550                 cdev->private->flags.fake_irb = 0;
551                 cdev->private->state = DEV_STATE_DISCONNECTED;
552                 wake_up(&cdev->private->wait_q);
553         }
554 }
555
556 void
557 ccw_device_verify_done(struct ccw_device *cdev, int err)
558 {
559         struct subchannel *sch;
560
561         sch = to_subchannel(cdev->dev.parent);
562         /* Update schib - pom may have changed. */
563         stsch(sch->schid, &sch->schib);
564         /* Update lpm with verified path mask. */
565         sch->lpm = sch->vpm;
566         /* Repeat path verification? */
567         if (cdev->private->flags.doverify) {
568                 cdev->private->flags.doverify = 0;
569                 ccw_device_verify_start(cdev);
570                 return;
571         }
572         switch (err) {
573         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
574                 cdev->private->options.pgroup = 0;
575         case 0:
576                 ccw_device_done(cdev, DEV_STATE_ONLINE);
577                 /* Deliver fake irb to device driver, if needed. */
578                 if (cdev->private->flags.fake_irb) {
579                         memset(&cdev->private->irb, 0, sizeof(struct irb));
580                         cdev->private->irb.scsw.cc = 1;
581                         cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
582                         cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
583                         cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
584                         cdev->private->flags.fake_irb = 0;
585                         if (cdev->handler)
586                                 cdev->handler(cdev, cdev->private->intparm,
587                                               &cdev->private->irb);
588                         memset(&cdev->private->irb, 0, sizeof(struct irb));
589                 }
590                 break;
591         case -ETIME:
592                 /* Reset oper notify indication after verify error. */
593                 cdev->private->flags.donotify = 0;
594                 ccw_device_done(cdev, DEV_STATE_BOXED);
595                 break;
596         default:
597                 /* Reset oper notify indication after verify error. */
598                 cdev->private->flags.donotify = 0;
599                 PREPARE_WORK(&cdev->private->kick_work,
600                              ccw_device_nopath_notify, cdev);
601                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
602                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
603                 break;
604         }
605 }
606
607 /*
608  * Get device online.
609  */
610 int
611 ccw_device_online(struct ccw_device *cdev)
612 {
613         struct subchannel *sch;
614         int ret;
615
616         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
617             (cdev->private->state != DEV_STATE_BOXED))
618                 return -EINVAL;
619         sch = to_subchannel(cdev->dev.parent);
620         if (css_init_done && !get_device(&cdev->dev))
621                 return -ENODEV;
622         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
623         if (ret != 0) {
624                 /* Couldn't enable the subchannel for i/o. Sick device. */
625                 if (ret == -ENODEV)
626                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
627                 return ret;
628         }
629         /* Do we want to do path grouping? */
630         if (!cdev->private->options.pgroup) {
631                 /* Start initial path verification. */
632                 cdev->private->state = DEV_STATE_VERIFY;
633                 cdev->private->flags.doverify = 0;
634                 ccw_device_verify_start(cdev);
635                 return 0;
636         }
637         /* Do a SensePGID first. */
638         cdev->private->state = DEV_STATE_SENSE_PGID;
639         ccw_device_sense_pgid_start(cdev);
640         return 0;
641 }
642
643 void
644 ccw_device_disband_done(struct ccw_device *cdev, int err)
645 {
646         switch (err) {
647         case 0:
648                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
649                 break;
650         case -ETIME:
651                 ccw_device_done(cdev, DEV_STATE_BOXED);
652                 break;
653         default:
654                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
655                 break;
656         }
657 }
658
659 /*
660  * Shutdown device.
661  */
662 int
663 ccw_device_offline(struct ccw_device *cdev)
664 {
665         struct subchannel *sch;
666
667         sch = to_subchannel(cdev->dev.parent);
668         if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
669                 return -ENODEV;
670         if (cdev->private->state != DEV_STATE_ONLINE) {
671                 if (sch->schib.scsw.actl != 0)
672                         return -EBUSY;
673                 return -EINVAL;
674         }
675         if (sch->schib.scsw.actl != 0)
676                 return -EBUSY;
677         /* Are we doing path grouping? */
678         if (!cdev->private->options.pgroup) {
679                 /* No, set state offline immediately. */
680                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
681                 return 0;
682         }
683         /* Start Set Path Group commands. */
684         cdev->private->state = DEV_STATE_DISBAND_PGID;
685         ccw_device_disband_start(cdev);
686         return 0;
687 }
688
689 /*
690  * Handle timeout in device online/offline process.
691  */
692 static void
693 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
694 {
695         int ret;
696
697         ret = ccw_device_cancel_halt_clear(cdev);
698         switch (ret) {
699         case 0:
700                 ccw_device_done(cdev, DEV_STATE_BOXED);
701                 break;
702         case -ENODEV:
703                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
704                 break;
705         default:
706                 ccw_device_set_timeout(cdev, 3*HZ);
707         }
708 }
709
710 /*
711  * Handle not oper event in device recognition.
712  */
713 static void
714 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
715 {
716         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
717 }
718
719 /*
720  * Handle not operational event while offline.
721  */
722 static void
723 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
724 {
725         struct subchannel *sch;
726
727         cdev->private->state = DEV_STATE_NOT_OPER;
728         sch = to_subchannel(cdev->dev.parent);
729         if (get_device(&cdev->dev)) {
730                 PREPARE_WORK(&cdev->private->kick_work,
731                              ccw_device_call_sch_unregister, cdev);
732                 queue_work(ccw_device_work, &cdev->private->kick_work);
733         }
734         wake_up(&cdev->private->wait_q);
735 }
736
737 /*
738  * Handle not operational event while online.
739  */
740 static void
741 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
742 {
743         struct subchannel *sch;
744
745         sch = to_subchannel(cdev->dev.parent);
746         if (sch->driver->notify &&
747             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
748                         ccw_device_set_timeout(cdev, 0);
749                         cdev->private->flags.fake_irb = 0;
750                         cdev->private->state = DEV_STATE_DISCONNECTED;
751                         wake_up(&cdev->private->wait_q);
752                         return;
753         }
754         cdev->private->state = DEV_STATE_NOT_OPER;
755         cio_disable_subchannel(sch);
756         if (sch->schib.scsw.actl != 0) {
757                 // FIXME: not-oper indication to device driver ?
758                 ccw_device_call_handler(cdev);
759         }
760         if (get_device(&cdev->dev)) {
761                 PREPARE_WORK(&cdev->private->kick_work,
762                              ccw_device_call_sch_unregister, cdev);
763                 queue_work(ccw_device_work, &cdev->private->kick_work);
764         }
765         wake_up(&cdev->private->wait_q);
766 }
767
768 /*
769  * Handle path verification event.
770  */
771 static void
772 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
773 {
774         struct subchannel *sch;
775
776         if (cdev->private->state == DEV_STATE_W4SENSE) {
777                 cdev->private->flags.doverify = 1;
778                 return;
779         }
780         sch = to_subchannel(cdev->dev.parent);
781         /*
782          * Since we might not just be coming from an interrupt from the
783          * subchannel we have to update the schib.
784          */
785         stsch(sch->schid, &sch->schib);
786
787         if (sch->schib.scsw.actl != 0 ||
788             (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
789             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
790                 /*
791                  * No final status yet or final status not yet delivered
792                  * to the device driver. Can't do path verfication now,
793                  * delay until final status was delivered.
794                  */
795                 cdev->private->flags.doverify = 1;
796                 return;
797         }
798         /* Device is idle, we can do the path verification. */
799         cdev->private->state = DEV_STATE_VERIFY;
800         cdev->private->flags.doverify = 0;
801         ccw_device_verify_start(cdev);
802 }
803
804 /*
805  * Got an interrupt for a normal io (state online).
806  */
807 static void
808 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
809 {
810         struct irb *irb;
811
812         irb = (struct irb *) __LC_IRB;
813         /* Check for unsolicited interrupt. */
814         if ((irb->scsw.stctl ==
815                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
816             && (!irb->scsw.cc)) {
817                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
818                     !irb->esw.esw0.erw.cons) {
819                         /* Unit check but no sense data. Need basic sense. */
820                         if (ccw_device_do_sense(cdev, irb) != 0)
821                                 goto call_handler_unsol;
822                         memcpy(&cdev->private->irb, irb, sizeof(struct irb));
823                         cdev->private->state = DEV_STATE_W4SENSE;
824                         cdev->private->intparm = 0;
825                         return;
826                 }
827 call_handler_unsol:
828                 if (cdev->handler)
829                         cdev->handler (cdev, 0, irb);
830                 return;
831         }
832         /* Accumulate status and find out if a basic sense is needed. */
833         ccw_device_accumulate_irb(cdev, irb);
834         if (cdev->private->flags.dosense) {
835                 if (ccw_device_do_sense(cdev, irb) == 0) {
836                         cdev->private->state = DEV_STATE_W4SENSE;
837                 }
838                 return;
839         }
840         /* Call the handler. */
841         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
842                 /* Start delayed path verification. */
843                 ccw_device_online_verify(cdev, 0);
844 }
845
846 /*
847  * Got an timeout in online state.
848  */
849 static void
850 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
851 {
852         int ret;
853
854         ccw_device_set_timeout(cdev, 0);
855         ret = ccw_device_cancel_halt_clear(cdev);
856         if (ret == -EBUSY) {
857                 ccw_device_set_timeout(cdev, 3*HZ);
858                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
859                 return;
860         }
861         if (ret == -ENODEV) {
862                 struct subchannel *sch;
863
864                 sch = to_subchannel(cdev->dev.parent);
865                 if (!sch->lpm) {
866                         PREPARE_WORK(&cdev->private->kick_work,
867                                      ccw_device_nopath_notify, cdev);
868                         queue_work(ccw_device_notify_work,
869                                    &cdev->private->kick_work);
870                 } else
871                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
872         } else if (cdev->handler)
873                 cdev->handler(cdev, cdev->private->intparm,
874                               ERR_PTR(-ETIMEDOUT));
875 }
876
877 /*
878  * Got an interrupt for a basic sense.
879  */
880 void
881 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
882 {
883         struct irb *irb;
884
885         irb = (struct irb *) __LC_IRB;
886         /* Check for unsolicited interrupt. */
887         if (irb->scsw.stctl ==
888                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
889                 if (irb->scsw.cc == 1)
890                         /* Basic sense hasn't started. Try again. */
891                         ccw_device_do_sense(cdev, irb);
892                 else {
893                         printk(KERN_INFO "Huh? %s(%s): unsolicited "
894                                "interrupt...\n",
895                                __FUNCTION__, cdev->dev.bus_id);
896                         if (cdev->handler)
897                                 cdev->handler (cdev, 0, irb);
898                 }
899                 return;
900         }
901         /*
902          * Check if a halt or clear has been issued in the meanwhile. If yes,
903          * only deliver the halt/clear interrupt to the device driver as if it
904          * had killed the original request.
905          */
906         if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
907                 cdev->private->flags.dosense = 0;
908                 memset(&cdev->private->irb, 0, sizeof(struct irb));
909                 ccw_device_accumulate_irb(cdev, irb);
910                 goto call_handler;
911         }
912         /* Add basic sense info to irb. */
913         ccw_device_accumulate_basic_sense(cdev, irb);
914         if (cdev->private->flags.dosense) {
915                 /* Another basic sense is needed. */
916                 ccw_device_do_sense(cdev, irb);
917                 return;
918         }
919 call_handler:
920         cdev->private->state = DEV_STATE_ONLINE;
921         /* Call the handler. */
922         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
923                 /* Start delayed path verification. */
924                 ccw_device_online_verify(cdev, 0);
925 }
926
927 static void
928 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
929 {
930         struct irb *irb;
931
932         irb = (struct irb *) __LC_IRB;
933         /* Accumulate status. We don't do basic sense. */
934         ccw_device_accumulate_irb(cdev, irb);
935         /* Remember to clear irb to avoid residuals. */
936         memset(&cdev->private->irb, 0, sizeof(struct irb));
937         /* Try to start delayed device verification. */
938         ccw_device_online_verify(cdev, 0);
939         /* Note: Don't call handler for cio initiated clear! */
940 }
941
942 static void
943 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
944 {
945         struct subchannel *sch;
946
947         sch = to_subchannel(cdev->dev.parent);
948         ccw_device_set_timeout(cdev, 0);
949         /* OK, i/o is dead now. Call interrupt handler. */
950         cdev->private->state = DEV_STATE_ONLINE;
951         if (cdev->handler)
952                 cdev->handler(cdev, cdev->private->intparm,
953                               ERR_PTR(-EIO));
954         if (!sch->lpm) {
955                 PREPARE_WORK(&cdev->private->kick_work,
956                              ccw_device_nopath_notify, cdev);
957                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
958         } else if (cdev->private->flags.doverify)
959                 /* Start delayed path verification. */
960                 ccw_device_online_verify(cdev, 0);
961 }
962
963 static void
964 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
965 {
966         int ret;
967
968         ret = ccw_device_cancel_halt_clear(cdev);
969         if (ret == -EBUSY) {
970                 ccw_device_set_timeout(cdev, 3*HZ);
971                 return;
972         }
973         if (ret == -ENODEV) {
974                 struct subchannel *sch;
975
976                 sch = to_subchannel(cdev->dev.parent);
977                 if (!sch->lpm) {
978                         PREPARE_WORK(&cdev->private->kick_work,
979                                      ccw_device_nopath_notify, cdev);
980                         queue_work(ccw_device_notify_work,
981                                    &cdev->private->kick_work);
982                 } else
983                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
984                 return;
985         }
986         //FIXME: Can we get here?
987         cdev->private->state = DEV_STATE_ONLINE;
988         if (cdev->handler)
989                 cdev->handler(cdev, cdev->private->intparm,
990                               ERR_PTR(-EIO));
991 }
992
993 void device_kill_io(struct subchannel *sch)
994 {
995         int ret;
996         struct ccw_device *cdev;
997
998         cdev = sch->dev.driver_data;
999         ret = ccw_device_cancel_halt_clear(cdev);
1000         if (ret == -EBUSY) {
1001                 ccw_device_set_timeout(cdev, 3*HZ);
1002                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
1003                 return;
1004         }
1005         if (ret == -ENODEV) {
1006                 if (!sch->lpm) {
1007                         PREPARE_WORK(&cdev->private->kick_work,
1008                                      ccw_device_nopath_notify, cdev);
1009                         queue_work(ccw_device_notify_work,
1010                                    &cdev->private->kick_work);
1011                 } else
1012                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1013                 return;
1014         }
1015         if (cdev->handler)
1016                 cdev->handler(cdev, cdev->private->intparm,
1017                               ERR_PTR(-EIO));
1018         if (!sch->lpm) {
1019                 PREPARE_WORK(&cdev->private->kick_work,
1020                              ccw_device_nopath_notify, cdev);
1021                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
1022         } else
1023                 /* Start delayed path verification. */
1024                 ccw_device_online_verify(cdev, 0);
1025 }
1026
1027 static void
1028 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1029 {
1030         /* Start verification after current task finished. */
1031         cdev->private->flags.doverify = 1;
1032 }
1033
1034 static void
1035 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1036 {
1037         struct irb *irb;
1038
1039         switch (dev_event) {
1040         case DEV_EVENT_INTERRUPT:
1041                 irb = (struct irb *) __LC_IRB;
1042                 /* Check for unsolicited interrupt. */
1043                 if ((irb->scsw.stctl ==
1044                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1045                     (!irb->scsw.cc))
1046                         /* FIXME: we should restart stlck here, but this
1047                          * is extremely unlikely ... */
1048                         goto out_wakeup;
1049
1050                 ccw_device_accumulate_irb(cdev, irb);
1051                 /* We don't care about basic sense etc. */
1052                 break;
1053         default: /* timeout */
1054                 break;
1055         }
1056 out_wakeup:
1057         wake_up(&cdev->private->wait_q);
1058 }
1059
1060 static void
1061 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1062 {
1063         struct subchannel *sch;
1064
1065         sch = to_subchannel(cdev->dev.parent);
1066         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1067                 /* Couldn't enable the subchannel for i/o. Sick device. */
1068                 return;
1069
1070         /* After 60s the device recognition is considered to have failed. */
1071         ccw_device_set_timeout(cdev, 60*HZ);
1072
1073         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1074         ccw_device_sense_id_start(cdev);
1075 }
1076
1077 void
1078 device_trigger_reprobe(struct subchannel *sch)
1079 {
1080         struct ccw_device *cdev;
1081
1082         if (!sch->dev.driver_data)
1083                 return;
1084         cdev = sch->dev.driver_data;
1085         if (cdev->private->state != DEV_STATE_DISCONNECTED)
1086                 return;
1087
1088         /* Update some values. */
1089         if (stsch(sch->schid, &sch->schib))
1090                 return;
1091
1092         /*
1093          * The pim, pam, pom values may not be accurate, but they are the best
1094          * we have before performing device selection :/
1095          */
1096         sch->lpm = sch->schib.pmcw.pam & sch->opm;
1097         /* Re-set some bits in the pmcw that were lost. */
1098         sch->schib.pmcw.isc = 3;
1099         sch->schib.pmcw.csense = 1;
1100         sch->schib.pmcw.ena = 0;
1101         if ((sch->lpm & (sch->lpm - 1)) != 0)
1102                 sch->schib.pmcw.mp = 1;
1103         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1104         /* We should also udate ssd info, but this has to wait. */
1105         ccw_device_start_id(cdev, 0);
1106 }
1107
1108 static void
1109 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1110 {
1111         struct subchannel *sch;
1112
1113         sch = to_subchannel(cdev->dev.parent);
1114         /*
1115          * An interrupt in state offline means a previous disable was not
1116          * successful. Try again.
1117          */
1118         cio_disable_subchannel(sch);
1119 }
1120
1121 static void
1122 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1123 {
1124         retry_set_schib(cdev);
1125         cdev->private->state = DEV_STATE_ONLINE;
1126         dev_fsm_event(cdev, dev_event);
1127 }
1128
1129 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1130                                        enum dev_event dev_event)
1131 {
1132         cmf_retry_copy_block(cdev);
1133         cdev->private->state = DEV_STATE_ONLINE;
1134         dev_fsm_event(cdev, dev_event);
1135 }
1136
1137 static void
1138 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1139 {
1140         ccw_device_set_timeout(cdev, 0);
1141         if (dev_event == DEV_EVENT_NOTOPER)
1142                 cdev->private->state = DEV_STATE_NOT_OPER;
1143         else
1144                 cdev->private->state = DEV_STATE_OFFLINE;
1145         wake_up(&cdev->private->wait_q);
1146 }
1147
1148 static void
1149 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1150 {
1151         int ret;
1152
1153         ret = ccw_device_cancel_halt_clear(cdev);
1154         switch (ret) {
1155         case 0:
1156                 cdev->private->state = DEV_STATE_OFFLINE;
1157                 wake_up(&cdev->private->wait_q);
1158                 break;
1159         case -ENODEV:
1160                 cdev->private->state = DEV_STATE_NOT_OPER;
1161                 wake_up(&cdev->private->wait_q);
1162                 break;
1163         default:
1164                 ccw_device_set_timeout(cdev, HZ/10);
1165         }
1166 }
1167
1168 /*
1169  * No operation action. This is used e.g. to ignore a timeout event in
1170  * state offline.
1171  */
1172 static void
1173 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1174 {
1175 }
1176
1177 /*
1178  * Bug operation action. 
1179  */
1180 static void
1181 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1182 {
1183         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1184                cdev->private->state, dev_event);
1185         BUG();
1186 }
1187
1188 /*
1189  * device statemachine
1190  */
1191 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1192         [DEV_STATE_NOT_OPER] = {
1193                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1194                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1195                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1196                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1197         },
1198         [DEV_STATE_SENSE_PGID] = {
1199                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1200                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1201                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1202                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1203         },
1204         [DEV_STATE_SENSE_ID] = {
1205                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1206                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1207                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1208                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1209         },
1210         [DEV_STATE_OFFLINE] = {
1211                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1212                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1213                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1214                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1215         },
1216         [DEV_STATE_VERIFY] = {
1217                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1218                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1219                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1220                 [DEV_EVENT_VERIFY]      = ccw_device_delay_verify,
1221         },
1222         [DEV_STATE_ONLINE] = {
1223                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1224                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1225                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1226                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1227         },
1228         [DEV_STATE_W4SENSE] = {
1229                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1230                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1231                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1232                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1233         },
1234         [DEV_STATE_DISBAND_PGID] = {
1235                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1236                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1237                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1238                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1239         },
1240         [DEV_STATE_BOXED] = {
1241                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1242                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1243                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1244                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1245         },
1246         /* states to wait for i/o completion before doing something */
1247         [DEV_STATE_CLEAR_VERIFY] = {
1248                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1249                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1250                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1251                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1252         },
1253         [DEV_STATE_TIMEOUT_KILL] = {
1254                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1255                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1256                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1257                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1258         },
1259         [DEV_STATE_QUIESCE] = {
1260                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1261                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1262                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1263                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1264         },
1265         /* special states for devices gone not operational */
1266         [DEV_STATE_DISCONNECTED] = {
1267                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1268                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1269                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1270                 [DEV_EVENT_VERIFY]      = ccw_device_start_id,
1271         },
1272         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1273                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1274                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1275                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1276                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1277         },
1278         [DEV_STATE_CMFCHANGE] = {
1279                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1280                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1281                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1282                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1283         },
1284         [DEV_STATE_CMFUPDATE] = {
1285                 [DEV_EVENT_NOTOPER]     = ccw_device_update_cmfblock,
1286                 [DEV_EVENT_INTERRUPT]   = ccw_device_update_cmfblock,
1287                 [DEV_EVENT_TIMEOUT]     = ccw_device_update_cmfblock,
1288                 [DEV_EVENT_VERIFY]      = ccw_device_update_cmfblock,
1289         },
1290 };
1291
1292 /*
1293  * io_subchannel_irq is called for "real" interrupts or for status
1294  * pending conditions on msch.
1295  */
1296 void
1297 io_subchannel_irq (struct device *pdev)
1298 {
1299         struct ccw_device *cdev;
1300
1301         cdev = to_subchannel(pdev)->dev.driver_data;
1302
1303         CIO_TRACE_EVENT (3, "IRQ");
1304         CIO_TRACE_EVENT (3, pdev->bus_id);
1305         if (cdev)
1306                 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1307 }
1308
1309 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);