]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/s390/cio/css.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[karo-tx-linux.git] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2010
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  *
9  * License: GPL
10  */
11
12 #define KMSG_COMPONENT "cio"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/list.h>
21 #include <linux/reboot.h>
22 #include <linux/suspend.h>
23 #include <linux/proc_fs.h>
24 #include <asm/isc.h>
25 #include <asm/crw.h>
26
27 #include "css.h"
28 #include "cio.h"
29 #include "cio_debug.h"
30 #include "ioasm.h"
31 #include "chsc.h"
32 #include "device.h"
33 #include "idset.h"
34 #include "chp.h"
35
36 int css_init_done = 0;
37 int max_ssid;
38
39 #define MAX_CSS_IDX 0
40 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
41 static struct bus_type css_bus_type;
42
43 int
44 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
45 {
46         struct subchannel_id schid;
47         int ret;
48
49         init_subchannel_id(&schid);
50         do {
51                 do {
52                         ret = fn(schid, data);
53                         if (ret)
54                                 break;
55                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
56                 schid.sch_no = 0;
57         } while (schid.ssid++ < max_ssid);
58         return ret;
59 }
60
61 struct cb_data {
62         void *data;
63         struct idset *set;
64         int (*fn_known_sch)(struct subchannel *, void *);
65         int (*fn_unknown_sch)(struct subchannel_id, void *);
66 };
67
68 static int call_fn_known_sch(struct device *dev, void *data)
69 {
70         struct subchannel *sch = to_subchannel(dev);
71         struct cb_data *cb = data;
72         int rc = 0;
73
74         if (cb->set)
75                 idset_sch_del(cb->set, sch->schid);
76         if (cb->fn_known_sch)
77                 rc = cb->fn_known_sch(sch, cb->data);
78         return rc;
79 }
80
81 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
82 {
83         struct cb_data *cb = data;
84         int rc = 0;
85
86         if (idset_sch_contains(cb->set, schid))
87                 rc = cb->fn_unknown_sch(schid, cb->data);
88         return rc;
89 }
90
91 static int call_fn_all_sch(struct subchannel_id schid, void *data)
92 {
93         struct cb_data *cb = data;
94         struct subchannel *sch;
95         int rc = 0;
96
97         sch = get_subchannel_by_schid(schid);
98         if (sch) {
99                 if (cb->fn_known_sch)
100                         rc = cb->fn_known_sch(sch, cb->data);
101                 put_device(&sch->dev);
102         } else {
103                 if (cb->fn_unknown_sch)
104                         rc = cb->fn_unknown_sch(schid, cb->data);
105         }
106
107         return rc;
108 }
109
110 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
111                                int (*fn_unknown)(struct subchannel_id,
112                                void *), void *data)
113 {
114         struct cb_data cb;
115         int rc;
116
117         cb.data = data;
118         cb.fn_known_sch = fn_known;
119         cb.fn_unknown_sch = fn_unknown;
120
121         if (fn_known && !fn_unknown) {
122                 /* Skip idset allocation in case of known-only loop. */
123                 cb.set = NULL;
124                 return bus_for_each_dev(&css_bus_type, NULL, &cb,
125                                         call_fn_known_sch);
126         }
127
128         cb.set = idset_sch_new();
129         if (!cb.set)
130                 /* fall back to brute force scanning in case of oom */
131                 return for_each_subchannel(call_fn_all_sch, &cb);
132
133         idset_fill(cb.set);
134
135         /* Process registered subchannels. */
136         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
137         if (rc)
138                 goto out;
139         /* Process unregistered subchannels. */
140         if (fn_unknown)
141                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
142 out:
143         idset_free(cb.set);
144
145         return rc;
146 }
147
148 static void css_sch_todo(struct work_struct *work);
149
150 static int css_sch_create_locks(struct subchannel *sch)
151 {
152         sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
153         if (!sch->lock)
154                 return -ENOMEM;
155
156         spin_lock_init(sch->lock);
157         mutex_init(&sch->reg_mutex);
158
159         return 0;
160 }
161
162 static void css_subchannel_release(struct device *dev)
163 {
164         struct subchannel *sch = to_subchannel(dev);
165
166         sch->config.intparm = 0;
167         cio_commit_config(sch);
168         kfree(sch->lock);
169         kfree(sch);
170 }
171
172 struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
173 {
174         struct subchannel *sch;
175         int ret;
176
177         sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
178         if (!sch)
179                 return ERR_PTR(-ENOMEM);
180
181         ret = cio_validate_subchannel(sch, schid);
182         if (ret < 0)
183                 goto err;
184
185         ret = css_sch_create_locks(sch);
186         if (ret)
187                 goto err;
188
189         INIT_WORK(&sch->todo_work, css_sch_todo);
190         sch->dev.release = &css_subchannel_release;
191         device_initialize(&sch->dev);
192         return sch;
193
194 err:
195         kfree(sch);
196         return ERR_PTR(ret);
197 }
198
199 static int css_sch_device_register(struct subchannel *sch)
200 {
201         int ret;
202
203         mutex_lock(&sch->reg_mutex);
204         dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
205                      sch->schid.sch_no);
206         ret = device_add(&sch->dev);
207         mutex_unlock(&sch->reg_mutex);
208         return ret;
209 }
210
211 /**
212  * css_sch_device_unregister - unregister a subchannel
213  * @sch: subchannel to be unregistered
214  */
215 void css_sch_device_unregister(struct subchannel *sch)
216 {
217         mutex_lock(&sch->reg_mutex);
218         if (device_is_registered(&sch->dev))
219                 device_unregister(&sch->dev);
220         mutex_unlock(&sch->reg_mutex);
221 }
222 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
223
224 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
225 {
226         int i;
227         int mask;
228
229         memset(ssd, 0, sizeof(struct chsc_ssd_info));
230         ssd->path_mask = pmcw->pim;
231         for (i = 0; i < 8; i++) {
232                 mask = 0x80 >> i;
233                 if (pmcw->pim & mask) {
234                         chp_id_init(&ssd->chpid[i]);
235                         ssd->chpid[i].id = pmcw->chpid[i];
236                 }
237         }
238 }
239
240 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
241 {
242         int i;
243         int mask;
244
245         for (i = 0; i < 8; i++) {
246                 mask = 0x80 >> i;
247                 if (ssd->path_mask & mask)
248                         if (!chp_is_registered(ssd->chpid[i]))
249                                 chp_new(ssd->chpid[i]);
250         }
251 }
252
253 void css_update_ssd_info(struct subchannel *sch)
254 {
255         int ret;
256
257         ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
258         if (ret)
259                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
260
261         ssd_register_chpids(&sch->ssd_info);
262 }
263
264 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
265                          char *buf)
266 {
267         struct subchannel *sch = to_subchannel(dev);
268
269         return sprintf(buf, "%01x\n", sch->st);
270 }
271
272 static DEVICE_ATTR(type, 0444, type_show, NULL);
273
274 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
275                              char *buf)
276 {
277         struct subchannel *sch = to_subchannel(dev);
278
279         return sprintf(buf, "css:t%01X\n", sch->st);
280 }
281
282 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
283
284 static struct attribute *subch_attrs[] = {
285         &dev_attr_type.attr,
286         &dev_attr_modalias.attr,
287         NULL,
288 };
289
290 static struct attribute_group subch_attr_group = {
291         .attrs = subch_attrs,
292 };
293
294 static const struct attribute_group *default_subch_attr_groups[] = {
295         &subch_attr_group,
296         NULL,
297 };
298
299 int css_register_subchannel(struct subchannel *sch)
300 {
301         int ret;
302
303         /* Initialize the subchannel structure */
304         sch->dev.parent = &channel_subsystems[0]->device;
305         sch->dev.bus = &css_bus_type;
306         sch->dev.groups = default_subch_attr_groups;
307         /*
308          * We don't want to generate uevents for I/O subchannels that don't
309          * have a working ccw device behind them since they will be
310          * unregistered before they can be used anyway, so we delay the add
311          * uevent until after device recognition was successful.
312          * Note that we suppress the uevent for all subchannel types;
313          * the subchannel driver can decide itself when it wants to inform
314          * userspace of its existence.
315          */
316         dev_set_uevent_suppress(&sch->dev, 1);
317         css_update_ssd_info(sch);
318         /* make it known to the system */
319         ret = css_sch_device_register(sch);
320         if (ret) {
321                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
322                               sch->schid.ssid, sch->schid.sch_no, ret);
323                 return ret;
324         }
325         if (!sch->driver) {
326                 /*
327                  * No driver matched. Generate the uevent now so that
328                  * a fitting driver module may be loaded based on the
329                  * modalias.
330                  */
331                 dev_set_uevent_suppress(&sch->dev, 0);
332                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
333         }
334         return ret;
335 }
336
337 static int css_probe_device(struct subchannel_id schid)
338 {
339         struct subchannel *sch;
340         int ret;
341
342         sch = css_alloc_subchannel(schid);
343         if (IS_ERR(sch))
344                 return PTR_ERR(sch);
345
346         ret = css_register_subchannel(sch);
347         if (ret)
348                 put_device(&sch->dev);
349
350         return ret;
351 }
352
353 static int
354 check_subchannel(struct device * dev, void * data)
355 {
356         struct subchannel *sch;
357         struct subchannel_id *schid = data;
358
359         sch = to_subchannel(dev);
360         return schid_equal(&sch->schid, schid);
361 }
362
363 struct subchannel *
364 get_subchannel_by_schid(struct subchannel_id schid)
365 {
366         struct device *dev;
367
368         dev = bus_find_device(&css_bus_type, NULL,
369                               &schid, check_subchannel);
370
371         return dev ? to_subchannel(dev) : NULL;
372 }
373
374 /**
375  * css_sch_is_valid() - check if a subchannel is valid
376  * @schib: subchannel information block for the subchannel
377  */
378 int css_sch_is_valid(struct schib *schib)
379 {
380         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
381                 return 0;
382         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
383                 return 0;
384         return 1;
385 }
386 EXPORT_SYMBOL_GPL(css_sch_is_valid);
387
388 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
389 {
390         struct schib schib;
391
392         if (!slow) {
393                 /* Will be done on the slow path. */
394                 return -EAGAIN;
395         }
396         if (stsch(schid, &schib)) {
397                 /* Subchannel is not provided. */
398                 return -ENXIO;
399         }
400         if (!css_sch_is_valid(&schib)) {
401                 /* Unusable - ignore. */
402                 return 0;
403         }
404         CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
405                       schid.sch_no);
406
407         return css_probe_device(schid);
408 }
409
410 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
411 {
412         int ret = 0;
413
414         if (sch->driver) {
415                 if (sch->driver->sch_event)
416                         ret = sch->driver->sch_event(sch, slow);
417                 else
418                         dev_dbg(&sch->dev,
419                                 "Got subchannel machine check but "
420                                 "no sch_event handler provided.\n");
421         }
422         if (ret != 0 && ret != -EAGAIN) {
423                 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
424                               sch->schid.ssid, sch->schid.sch_no, ret);
425         }
426         return ret;
427 }
428
429 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
430 {
431         struct subchannel *sch;
432         int ret;
433
434         sch = get_subchannel_by_schid(schid);
435         if (sch) {
436                 ret = css_evaluate_known_subchannel(sch, slow);
437                 put_device(&sch->dev);
438         } else
439                 ret = css_evaluate_new_subchannel(schid, slow);
440         if (ret == -EAGAIN)
441                 css_schedule_eval(schid);
442 }
443
444 /**
445  * css_sched_sch_todo - schedule a subchannel operation
446  * @sch: subchannel
447  * @todo: todo
448  *
449  * Schedule the operation identified by @todo to be performed on the slow path
450  * workqueue. Do nothing if another operation with higher priority is already
451  * scheduled. Needs to be called with subchannel lock held.
452  */
453 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
454 {
455         CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
456                       sch->schid.ssid, sch->schid.sch_no, todo);
457         if (sch->todo >= todo)
458                 return;
459         /* Get workqueue ref. */
460         if (!get_device(&sch->dev))
461                 return;
462         sch->todo = todo;
463         if (!queue_work(cio_work_q, &sch->todo_work)) {
464                 /* Already queued, release workqueue ref. */
465                 put_device(&sch->dev);
466         }
467 }
468 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
469
470 static void css_sch_todo(struct work_struct *work)
471 {
472         struct subchannel *sch;
473         enum sch_todo todo;
474         int ret;
475
476         sch = container_of(work, struct subchannel, todo_work);
477         /* Find out todo. */
478         spin_lock_irq(sch->lock);
479         todo = sch->todo;
480         CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
481                       sch->schid.sch_no, todo);
482         sch->todo = SCH_TODO_NOTHING;
483         spin_unlock_irq(sch->lock);
484         /* Perform todo. */
485         switch (todo) {
486         case SCH_TODO_NOTHING:
487                 break;
488         case SCH_TODO_EVAL:
489                 ret = css_evaluate_known_subchannel(sch, 1);
490                 if (ret == -EAGAIN) {
491                         spin_lock_irq(sch->lock);
492                         css_sched_sch_todo(sch, todo);
493                         spin_unlock_irq(sch->lock);
494                 }
495                 break;
496         case SCH_TODO_UNREG:
497                 css_sch_device_unregister(sch);
498                 break;
499         }
500         /* Release workqueue ref. */
501         put_device(&sch->dev);
502 }
503
504 static struct idset *slow_subchannel_set;
505 static spinlock_t slow_subchannel_lock;
506 static wait_queue_head_t css_eval_wq;
507 static atomic_t css_eval_scheduled;
508
509 static int __init slow_subchannel_init(void)
510 {
511         spin_lock_init(&slow_subchannel_lock);
512         atomic_set(&css_eval_scheduled, 0);
513         init_waitqueue_head(&css_eval_wq);
514         slow_subchannel_set = idset_sch_new();
515         if (!slow_subchannel_set) {
516                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
517                 return -ENOMEM;
518         }
519         return 0;
520 }
521
522 static int slow_eval_known_fn(struct subchannel *sch, void *data)
523 {
524         int eval;
525         int rc;
526
527         spin_lock_irq(&slow_subchannel_lock);
528         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
529         idset_sch_del(slow_subchannel_set, sch->schid);
530         spin_unlock_irq(&slow_subchannel_lock);
531         if (eval) {
532                 rc = css_evaluate_known_subchannel(sch, 1);
533                 if (rc == -EAGAIN)
534                         css_schedule_eval(sch->schid);
535         }
536         return 0;
537 }
538
539 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
540 {
541         int eval;
542         int rc = 0;
543
544         spin_lock_irq(&slow_subchannel_lock);
545         eval = idset_sch_contains(slow_subchannel_set, schid);
546         idset_sch_del(slow_subchannel_set, schid);
547         spin_unlock_irq(&slow_subchannel_lock);
548         if (eval) {
549                 rc = css_evaluate_new_subchannel(schid, 1);
550                 switch (rc) {
551                 case -EAGAIN:
552                         css_schedule_eval(schid);
553                         rc = 0;
554                         break;
555                 case -ENXIO:
556                 case -ENOMEM:
557                 case -EIO:
558                         /* These should abort looping */
559                         spin_lock_irq(&slow_subchannel_lock);
560                         idset_sch_del_subseq(slow_subchannel_set, schid);
561                         spin_unlock_irq(&slow_subchannel_lock);
562                         break;
563                 default:
564                         rc = 0;
565                 }
566                 /* Allow scheduling here since the containing loop might
567                  * take a while.  */
568                 cond_resched();
569         }
570         return rc;
571 }
572
573 static void css_slow_path_func(struct work_struct *unused)
574 {
575         unsigned long flags;
576
577         CIO_TRACE_EVENT(4, "slowpath");
578         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
579                                    NULL);
580         spin_lock_irqsave(&slow_subchannel_lock, flags);
581         if (idset_is_empty(slow_subchannel_set)) {
582                 atomic_set(&css_eval_scheduled, 0);
583                 wake_up(&css_eval_wq);
584         }
585         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
586 }
587
588 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
589 struct workqueue_struct *cio_work_q;
590
591 void css_schedule_eval(struct subchannel_id schid)
592 {
593         unsigned long flags;
594
595         spin_lock_irqsave(&slow_subchannel_lock, flags);
596         idset_sch_add(slow_subchannel_set, schid);
597         atomic_set(&css_eval_scheduled, 1);
598         queue_delayed_work(cio_work_q, &slow_path_work, 0);
599         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
600 }
601
602 void css_schedule_eval_all(void)
603 {
604         unsigned long flags;
605
606         spin_lock_irqsave(&slow_subchannel_lock, flags);
607         idset_fill(slow_subchannel_set);
608         atomic_set(&css_eval_scheduled, 1);
609         queue_delayed_work(cio_work_q, &slow_path_work, 0);
610         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
611 }
612
613 static int __unset_registered(struct device *dev, void *data)
614 {
615         struct idset *set = data;
616         struct subchannel *sch = to_subchannel(dev);
617
618         idset_sch_del(set, sch->schid);
619         return 0;
620 }
621
622 void css_schedule_eval_all_unreg(unsigned long delay)
623 {
624         unsigned long flags;
625         struct idset *unreg_set;
626
627         /* Find unregistered subchannels. */
628         unreg_set = idset_sch_new();
629         if (!unreg_set) {
630                 /* Fallback. */
631                 css_schedule_eval_all();
632                 return;
633         }
634         idset_fill(unreg_set);
635         bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
636         /* Apply to slow_subchannel_set. */
637         spin_lock_irqsave(&slow_subchannel_lock, flags);
638         idset_add_set(slow_subchannel_set, unreg_set);
639         atomic_set(&css_eval_scheduled, 1);
640         queue_delayed_work(cio_work_q, &slow_path_work, delay);
641         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
642         idset_free(unreg_set);
643 }
644
645 void css_wait_for_slow_path(void)
646 {
647         flush_workqueue(cio_work_q);
648 }
649
650 /* Schedule reprobing of all unregistered subchannels. */
651 void css_schedule_reprobe(void)
652 {
653         /* Schedule with a delay to allow merging of subsequent calls. */
654         css_schedule_eval_all_unreg(1 * HZ);
655 }
656 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
657
658 /*
659  * Called from the machine check handler for subchannel report words.
660  */
661 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
662 {
663         struct subchannel_id mchk_schid;
664         struct subchannel *sch;
665
666         if (overflow) {
667                 css_schedule_eval_all();
668                 return;
669         }
670         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
671                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
672                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
673                       crw0->erc, crw0->rsid);
674         if (crw1)
675                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
676                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
677                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
678                               crw1->anc, crw1->erc, crw1->rsid);
679         init_subchannel_id(&mchk_schid);
680         mchk_schid.sch_no = crw0->rsid;
681         if (crw1)
682                 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
683
684         if (crw0->erc == CRW_ERC_PMOD) {
685                 sch = get_subchannel_by_schid(mchk_schid);
686                 if (sch) {
687                         css_update_ssd_info(sch);
688                         put_device(&sch->dev);
689                 }
690         }
691         /*
692          * Since we are always presented with IPI in the CRW, we have to
693          * use stsch() to find out if the subchannel in question has come
694          * or gone.
695          */
696         css_evaluate_subchannel(mchk_schid, 0);
697 }
698
699 static void __init
700 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
701 {
702         struct cpuid cpu_id;
703
704         if (css_general_characteristics.mcss) {
705                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
706                 css->global_pgid.pgid_high.ext_cssid.cssid =
707                         (css->cssid < 0) ? 0 : css->cssid;
708         } else {
709                 css->global_pgid.pgid_high.cpu_addr = stap();
710         }
711         get_cpu_id(&cpu_id);
712         css->global_pgid.cpu_id = cpu_id.ident;
713         css->global_pgid.cpu_model = cpu_id.machine;
714         css->global_pgid.tod_high = tod_high;
715 }
716
717 static void channel_subsystem_release(struct device *dev)
718 {
719         struct channel_subsystem *css = to_css(dev);
720
721         mutex_destroy(&css->mutex);
722         kfree(css);
723 }
724
725 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
726                                char *buf)
727 {
728         struct channel_subsystem *css = to_css(dev);
729
730         if (css->cssid < 0)
731                 return -EINVAL;
732
733         return sprintf(buf, "%x\n", css->cssid);
734 }
735 static DEVICE_ATTR_RO(real_cssid);
736
737 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
738                               char *buf)
739 {
740         struct channel_subsystem *css = to_css(dev);
741         int ret;
742
743         mutex_lock(&css->mutex);
744         ret = sprintf(buf, "%x\n", css->cm_enabled);
745         mutex_unlock(&css->mutex);
746         return ret;
747 }
748
749 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
750                                const char *buf, size_t count)
751 {
752         struct channel_subsystem *css = to_css(dev);
753         unsigned long val;
754         int ret;
755
756         ret = kstrtoul(buf, 16, &val);
757         if (ret)
758                 return ret;
759         mutex_lock(&css->mutex);
760         switch (val) {
761         case 0:
762                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
763                 break;
764         case 1:
765                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
766                 break;
767         default:
768                 ret = -EINVAL;
769         }
770         mutex_unlock(&css->mutex);
771         return ret < 0 ? ret : count;
772 }
773 static DEVICE_ATTR_RW(cm_enable);
774
775 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
776                               int index)
777 {
778         return css_chsc_characteristics.secm ? attr->mode : 0;
779 }
780
781 static struct attribute *cssdev_attrs[] = {
782         &dev_attr_real_cssid.attr,
783         NULL,
784 };
785
786 static struct attribute_group cssdev_attr_group = {
787         .attrs = cssdev_attrs,
788 };
789
790 static struct attribute *cssdev_cm_attrs[] = {
791         &dev_attr_cm_enable.attr,
792         NULL,
793 };
794
795 static struct attribute_group cssdev_cm_attr_group = {
796         .attrs = cssdev_cm_attrs,
797         .is_visible = cm_enable_mode,
798 };
799
800 static const struct attribute_group *cssdev_attr_groups[] = {
801         &cssdev_attr_group,
802         &cssdev_cm_attr_group,
803         NULL,
804 };
805
806 static int __init setup_css(int nr)
807 {
808         struct channel_subsystem *css;
809         int ret;
810
811         css = kzalloc(sizeof(*css), GFP_KERNEL);
812         if (!css)
813                 return -ENOMEM;
814
815         channel_subsystems[nr] = css;
816         dev_set_name(&css->device, "css%x", nr);
817         css->device.groups = cssdev_attr_groups;
818         css->device.release = channel_subsystem_release;
819
820         mutex_init(&css->mutex);
821         css->cssid = chsc_get_cssid(nr);
822         css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
823
824         ret = device_register(&css->device);
825         if (ret) {
826                 put_device(&css->device);
827                 goto out_err;
828         }
829
830         css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
831                                          GFP_KERNEL);
832         if (!css->pseudo_subchannel) {
833                 device_unregister(&css->device);
834                 ret = -ENOMEM;
835                 goto out_err;
836         }
837
838         css->pseudo_subchannel->dev.parent = &css->device;
839         css->pseudo_subchannel->dev.release = css_subchannel_release;
840         mutex_init(&css->pseudo_subchannel->reg_mutex);
841         ret = css_sch_create_locks(css->pseudo_subchannel);
842         if (ret) {
843                 kfree(css->pseudo_subchannel);
844                 device_unregister(&css->device);
845                 goto out_err;
846         }
847
848         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
849         ret = device_register(&css->pseudo_subchannel->dev);
850         if (ret) {
851                 put_device(&css->pseudo_subchannel->dev);
852                 device_unregister(&css->device);
853                 goto out_err;
854         }
855
856         return ret;
857 out_err:
858         channel_subsystems[nr] = NULL;
859         return ret;
860 }
861
862 static int css_reboot_event(struct notifier_block *this,
863                             unsigned long event,
864                             void *ptr)
865 {
866         struct channel_subsystem *css;
867         int ret;
868
869         ret = NOTIFY_DONE;
870         for_each_css(css) {
871                 mutex_lock(&css->mutex);
872                 if (css->cm_enabled)
873                         if (chsc_secm(css, 0))
874                                 ret = NOTIFY_BAD;
875                 mutex_unlock(&css->mutex);
876         }
877
878         return ret;
879 }
880
881 static struct notifier_block css_reboot_notifier = {
882         .notifier_call = css_reboot_event,
883 };
884
885 /*
886  * Since the css devices are neither on a bus nor have a class
887  * nor have a special device type, we cannot stop/restart channel
888  * path measurements via the normal suspend/resume callbacks, but have
889  * to use notifiers.
890  */
891 static int css_power_event(struct notifier_block *this, unsigned long event,
892                            void *ptr)
893 {
894         struct channel_subsystem *css;
895         int ret;
896
897         switch (event) {
898         case PM_HIBERNATION_PREPARE:
899         case PM_SUSPEND_PREPARE:
900                 ret = NOTIFY_DONE;
901                 for_each_css(css) {
902                         mutex_lock(&css->mutex);
903                         if (!css->cm_enabled) {
904                                 mutex_unlock(&css->mutex);
905                                 continue;
906                         }
907                         ret = __chsc_do_secm(css, 0);
908                         ret = notifier_from_errno(ret);
909                         mutex_unlock(&css->mutex);
910                 }
911                 break;
912         case PM_POST_HIBERNATION:
913         case PM_POST_SUSPEND:
914                 ret = NOTIFY_DONE;
915                 for_each_css(css) {
916                         mutex_lock(&css->mutex);
917                         if (!css->cm_enabled) {
918                                 mutex_unlock(&css->mutex);
919                                 continue;
920                         }
921                         ret = __chsc_do_secm(css, 1);
922                         ret = notifier_from_errno(ret);
923                         mutex_unlock(&css->mutex);
924                 }
925                 /* search for subchannels, which appeared during hibernation */
926                 css_schedule_reprobe();
927                 break;
928         default:
929                 ret = NOTIFY_DONE;
930         }
931         return ret;
932
933 }
934 static struct notifier_block css_power_notifier = {
935         .notifier_call = css_power_event,
936 };
937
938 /*
939  * Now that the driver core is running, we can setup our channel subsystem.
940  * The struct subchannel's are created during probing.
941  */
942 static int __init css_bus_init(void)
943 {
944         int ret, i;
945
946         ret = chsc_init();
947         if (ret)
948                 return ret;
949
950         chsc_determine_css_characteristics();
951         /* Try to enable MSS. */
952         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
953         if (ret)
954                 max_ssid = 0;
955         else /* Success. */
956                 max_ssid = __MAX_SSID;
957
958         ret = slow_subchannel_init();
959         if (ret)
960                 goto out;
961
962         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
963         if (ret)
964                 goto out;
965
966         if ((ret = bus_register(&css_bus_type)))
967                 goto out;
968
969         /* Setup css structure. */
970         for (i = 0; i <= MAX_CSS_IDX; i++) {
971                 ret = setup_css(i);
972                 if (ret)
973                         goto out_unregister;
974         }
975         ret = register_reboot_notifier(&css_reboot_notifier);
976         if (ret)
977                 goto out_unregister;
978         ret = register_pm_notifier(&css_power_notifier);
979         if (ret) {
980                 unregister_reboot_notifier(&css_reboot_notifier);
981                 goto out_unregister;
982         }
983         css_init_done = 1;
984
985         /* Enable default isc for I/O subchannels. */
986         isc_register(IO_SCH_ISC);
987
988         return 0;
989 out_unregister:
990         while (i-- > 0) {
991                 struct channel_subsystem *css = channel_subsystems[i];
992                 device_unregister(&css->pseudo_subchannel->dev);
993                 device_unregister(&css->device);
994         }
995         bus_unregister(&css_bus_type);
996 out:
997         crw_unregister_handler(CRW_RSC_SCH);
998         idset_free(slow_subchannel_set);
999         chsc_init_cleanup();
1000         pr_alert("The CSS device driver initialization failed with "
1001                  "errno=%d\n", ret);
1002         return ret;
1003 }
1004
1005 static void __init css_bus_cleanup(void)
1006 {
1007         struct channel_subsystem *css;
1008
1009         for_each_css(css) {
1010                 device_unregister(&css->pseudo_subchannel->dev);
1011                 device_unregister(&css->device);
1012         }
1013         bus_unregister(&css_bus_type);
1014         crw_unregister_handler(CRW_RSC_SCH);
1015         idset_free(slow_subchannel_set);
1016         chsc_init_cleanup();
1017         isc_unregister(IO_SCH_ISC);
1018 }
1019
1020 static int __init channel_subsystem_init(void)
1021 {
1022         int ret;
1023
1024         ret = css_bus_init();
1025         if (ret)
1026                 return ret;
1027         cio_work_q = create_singlethread_workqueue("cio");
1028         if (!cio_work_q) {
1029                 ret = -ENOMEM;
1030                 goto out_bus;
1031         }
1032         ret = io_subchannel_init();
1033         if (ret)
1034                 goto out_wq;
1035
1036         return ret;
1037 out_wq:
1038         destroy_workqueue(cio_work_q);
1039 out_bus:
1040         css_bus_cleanup();
1041         return ret;
1042 }
1043 subsys_initcall(channel_subsystem_init);
1044
1045 static int css_settle(struct device_driver *drv, void *unused)
1046 {
1047         struct css_driver *cssdrv = to_cssdriver(drv);
1048
1049         if (cssdrv->settle)
1050                 return cssdrv->settle();
1051         return 0;
1052 }
1053
1054 int css_complete_work(void)
1055 {
1056         int ret;
1057
1058         /* Wait for the evaluation of subchannels to finish. */
1059         ret = wait_event_interruptible(css_eval_wq,
1060                                        atomic_read(&css_eval_scheduled) == 0);
1061         if (ret)
1062                 return -EINTR;
1063         flush_workqueue(cio_work_q);
1064         /* Wait for the subchannel type specific initialization to finish */
1065         return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1066 }
1067
1068
1069 /*
1070  * Wait for the initialization of devices to finish, to make sure we are
1071  * done with our setup if the search for the root device starts.
1072  */
1073 static int __init channel_subsystem_init_sync(void)
1074 {
1075         /* Register subchannels which are already in use. */
1076         cio_register_early_subchannels();
1077         /* Start initial subchannel evaluation. */
1078         css_schedule_eval_all();
1079         css_complete_work();
1080         return 0;
1081 }
1082 subsys_initcall_sync(channel_subsystem_init_sync);
1083
1084 void channel_subsystem_reinit(void)
1085 {
1086         struct channel_path *chp;
1087         struct chp_id chpid;
1088
1089         chsc_enable_facility(CHSC_SDA_OC_MSS);
1090         chp_id_for_each(&chpid) {
1091                 chp = chpid_to_chp(chpid);
1092                 if (chp)
1093                         chp_update_desc(chp);
1094         }
1095         cmf_reactivate();
1096 }
1097
1098 #ifdef CONFIG_PROC_FS
1099 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1100                                 size_t count, loff_t *ppos)
1101 {
1102         int ret;
1103
1104         /* Handle pending CRW's. */
1105         crw_wait_for_channel_report();
1106         ret = css_complete_work();
1107
1108         return ret ? ret : count;
1109 }
1110
1111 static const struct file_operations cio_settle_proc_fops = {
1112         .open = nonseekable_open,
1113         .write = cio_settle_write,
1114         .llseek = no_llseek,
1115 };
1116
1117 static int __init cio_settle_init(void)
1118 {
1119         struct proc_dir_entry *entry;
1120
1121         entry = proc_create("cio_settle", S_IWUSR, NULL,
1122                             &cio_settle_proc_fops);
1123         if (!entry)
1124                 return -ENOMEM;
1125         return 0;
1126 }
1127 device_initcall(cio_settle_init);
1128 #endif /*CONFIG_PROC_FS*/
1129
1130 int sch_is_pseudo_sch(struct subchannel *sch)
1131 {
1132         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1133 }
1134
1135 static int css_bus_match(struct device *dev, struct device_driver *drv)
1136 {
1137         struct subchannel *sch = to_subchannel(dev);
1138         struct css_driver *driver = to_cssdriver(drv);
1139         struct css_device_id *id;
1140
1141         for (id = driver->subchannel_type; id->match_flags; id++) {
1142                 if (sch->st == id->type)
1143                         return 1;
1144         }
1145
1146         return 0;
1147 }
1148
1149 static int css_probe(struct device *dev)
1150 {
1151         struct subchannel *sch;
1152         int ret;
1153
1154         sch = to_subchannel(dev);
1155         sch->driver = to_cssdriver(dev->driver);
1156         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1157         if (ret)
1158                 sch->driver = NULL;
1159         return ret;
1160 }
1161
1162 static int css_remove(struct device *dev)
1163 {
1164         struct subchannel *sch;
1165         int ret;
1166
1167         sch = to_subchannel(dev);
1168         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1169         sch->driver = NULL;
1170         return ret;
1171 }
1172
1173 static void css_shutdown(struct device *dev)
1174 {
1175         struct subchannel *sch;
1176
1177         sch = to_subchannel(dev);
1178         if (sch->driver && sch->driver->shutdown)
1179                 sch->driver->shutdown(sch);
1180 }
1181
1182 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1183 {
1184         struct subchannel *sch = to_subchannel(dev);
1185         int ret;
1186
1187         ret = add_uevent_var(env, "ST=%01X", sch->st);
1188         if (ret)
1189                 return ret;
1190         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1191         return ret;
1192 }
1193
1194 static int css_pm_prepare(struct device *dev)
1195 {
1196         struct subchannel *sch = to_subchannel(dev);
1197         struct css_driver *drv;
1198
1199         if (mutex_is_locked(&sch->reg_mutex))
1200                 return -EAGAIN;
1201         if (!sch->dev.driver)
1202                 return 0;
1203         drv = to_cssdriver(sch->dev.driver);
1204         /* Notify drivers that they may not register children. */
1205         return drv->prepare ? drv->prepare(sch) : 0;
1206 }
1207
1208 static void css_pm_complete(struct device *dev)
1209 {
1210         struct subchannel *sch = to_subchannel(dev);
1211         struct css_driver *drv;
1212
1213         if (!sch->dev.driver)
1214                 return;
1215         drv = to_cssdriver(sch->dev.driver);
1216         if (drv->complete)
1217                 drv->complete(sch);
1218 }
1219
1220 static int css_pm_freeze(struct device *dev)
1221 {
1222         struct subchannel *sch = to_subchannel(dev);
1223         struct css_driver *drv;
1224
1225         if (!sch->dev.driver)
1226                 return 0;
1227         drv = to_cssdriver(sch->dev.driver);
1228         return drv->freeze ? drv->freeze(sch) : 0;
1229 }
1230
1231 static int css_pm_thaw(struct device *dev)
1232 {
1233         struct subchannel *sch = to_subchannel(dev);
1234         struct css_driver *drv;
1235
1236         if (!sch->dev.driver)
1237                 return 0;
1238         drv = to_cssdriver(sch->dev.driver);
1239         return drv->thaw ? drv->thaw(sch) : 0;
1240 }
1241
1242 static int css_pm_restore(struct device *dev)
1243 {
1244         struct subchannel *sch = to_subchannel(dev);
1245         struct css_driver *drv;
1246
1247         css_update_ssd_info(sch);
1248         if (!sch->dev.driver)
1249                 return 0;
1250         drv = to_cssdriver(sch->dev.driver);
1251         return drv->restore ? drv->restore(sch) : 0;
1252 }
1253
1254 static const struct dev_pm_ops css_pm_ops = {
1255         .prepare = css_pm_prepare,
1256         .complete = css_pm_complete,
1257         .freeze = css_pm_freeze,
1258         .thaw = css_pm_thaw,
1259         .restore = css_pm_restore,
1260 };
1261
1262 static struct bus_type css_bus_type = {
1263         .name     = "css",
1264         .match    = css_bus_match,
1265         .probe    = css_probe,
1266         .remove   = css_remove,
1267         .shutdown = css_shutdown,
1268         .uevent   = css_uevent,
1269         .pm = &css_pm_ops,
1270 };
1271
1272 /**
1273  * css_driver_register - register a css driver
1274  * @cdrv: css driver to register
1275  *
1276  * This is mainly a wrapper around driver_register that sets name
1277  * and bus_type in the embedded struct device_driver correctly.
1278  */
1279 int css_driver_register(struct css_driver *cdrv)
1280 {
1281         cdrv->drv.bus = &css_bus_type;
1282         return driver_register(&cdrv->drv);
1283 }
1284 EXPORT_SYMBOL_GPL(css_driver_register);
1285
1286 /**
1287  * css_driver_unregister - unregister a css driver
1288  * @cdrv: css driver to unregister
1289  *
1290  * This is a wrapper around driver_unregister.
1291  */
1292 void css_driver_unregister(struct css_driver *cdrv)
1293 {
1294         driver_unregister(&cdrv->drv);
1295 }
1296 EXPORT_SYMBOL_GPL(css_driver_unregister);