]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/s390/cio/css.c
8b608e0d2d26860dfaed5b5f38ad82706d3818b5
[karo-tx-linux.git] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2010
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  *
9  * License: GPL
10  */
11
12 #define KMSG_COMPONENT "cio"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/list.h>
21 #include <linux/reboot.h>
22 #include <linux/suspend.h>
23 #include <linux/proc_fs.h>
24 #include <asm/isc.h>
25 #include <asm/crw.h>
26
27 #include "css.h"
28 #include "cio.h"
29 #include "cio_debug.h"
30 #include "ioasm.h"
31 #include "chsc.h"
32 #include "device.h"
33 #include "idset.h"
34 #include "chp.h"
35
36 int css_init_done = 0;
37 int max_ssid;
38
39 #define MAX_CSS_IDX 0
40 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
41 static struct bus_type css_bus_type;
42
43 int
44 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
45 {
46         struct subchannel_id schid;
47         int ret;
48
49         init_subchannel_id(&schid);
50         do {
51                 do {
52                         ret = fn(schid, data);
53                         if (ret)
54                                 break;
55                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
56                 schid.sch_no = 0;
57         } while (schid.ssid++ < max_ssid);
58         return ret;
59 }
60
61 struct cb_data {
62         void *data;
63         struct idset *set;
64         int (*fn_known_sch)(struct subchannel *, void *);
65         int (*fn_unknown_sch)(struct subchannel_id, void *);
66 };
67
68 static int call_fn_known_sch(struct device *dev, void *data)
69 {
70         struct subchannel *sch = to_subchannel(dev);
71         struct cb_data *cb = data;
72         int rc = 0;
73
74         if (cb->set)
75                 idset_sch_del(cb->set, sch->schid);
76         if (cb->fn_known_sch)
77                 rc = cb->fn_known_sch(sch, cb->data);
78         return rc;
79 }
80
81 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
82 {
83         struct cb_data *cb = data;
84         int rc = 0;
85
86         if (idset_sch_contains(cb->set, schid))
87                 rc = cb->fn_unknown_sch(schid, cb->data);
88         return rc;
89 }
90
91 static int call_fn_all_sch(struct subchannel_id schid, void *data)
92 {
93         struct cb_data *cb = data;
94         struct subchannel *sch;
95         int rc = 0;
96
97         sch = get_subchannel_by_schid(schid);
98         if (sch) {
99                 if (cb->fn_known_sch)
100                         rc = cb->fn_known_sch(sch, cb->data);
101                 put_device(&sch->dev);
102         } else {
103                 if (cb->fn_unknown_sch)
104                         rc = cb->fn_unknown_sch(schid, cb->data);
105         }
106
107         return rc;
108 }
109
110 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
111                                int (*fn_unknown)(struct subchannel_id,
112                                void *), void *data)
113 {
114         struct cb_data cb;
115         int rc;
116
117         cb.data = data;
118         cb.fn_known_sch = fn_known;
119         cb.fn_unknown_sch = fn_unknown;
120
121         if (fn_known && !fn_unknown) {
122                 /* Skip idset allocation in case of known-only loop. */
123                 cb.set = NULL;
124                 return bus_for_each_dev(&css_bus_type, NULL, &cb,
125                                         call_fn_known_sch);
126         }
127
128         cb.set = idset_sch_new();
129         if (!cb.set)
130                 /* fall back to brute force scanning in case of oom */
131                 return for_each_subchannel(call_fn_all_sch, &cb);
132
133         idset_fill(cb.set);
134
135         /* Process registered subchannels. */
136         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
137         if (rc)
138                 goto out;
139         /* Process unregistered subchannels. */
140         if (fn_unknown)
141                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
142 out:
143         idset_free(cb.set);
144
145         return rc;
146 }
147
148 static void css_sch_todo(struct work_struct *work);
149
150 static int css_sch_create_locks(struct subchannel *sch)
151 {
152         sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
153         if (!sch->lock)
154                 return -ENOMEM;
155
156         spin_lock_init(sch->lock);
157         mutex_init(&sch->reg_mutex);
158
159         return 0;
160 }
161
162 static void css_subchannel_release(struct device *dev)
163 {
164         struct subchannel *sch = to_subchannel(dev);
165
166         sch->config.intparm = 0;
167         cio_commit_config(sch);
168         kfree(sch->lock);
169         kfree(sch);
170 }
171
172 struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
173 {
174         struct subchannel *sch;
175         int ret;
176
177         sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
178         if (!sch)
179                 return ERR_PTR(-ENOMEM);
180
181         ret = cio_validate_subchannel(sch, schid);
182         if (ret < 0)
183                 goto err;
184
185         ret = css_sch_create_locks(sch);
186         if (ret)
187                 goto err;
188
189         INIT_WORK(&sch->todo_work, css_sch_todo);
190         sch->dev.release = &css_subchannel_release;
191         device_initialize(&sch->dev);
192         return sch;
193
194 err:
195         kfree(sch);
196         return ERR_PTR(ret);
197 }
198
199 static int css_sch_device_register(struct subchannel *sch)
200 {
201         int ret;
202
203         mutex_lock(&sch->reg_mutex);
204         dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
205                      sch->schid.sch_no);
206         ret = device_add(&sch->dev);
207         mutex_unlock(&sch->reg_mutex);
208         return ret;
209 }
210
211 /**
212  * css_sch_device_unregister - unregister a subchannel
213  * @sch: subchannel to be unregistered
214  */
215 void css_sch_device_unregister(struct subchannel *sch)
216 {
217         mutex_lock(&sch->reg_mutex);
218         if (device_is_registered(&sch->dev))
219                 device_unregister(&sch->dev);
220         mutex_unlock(&sch->reg_mutex);
221 }
222 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
223
224 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
225 {
226         int i;
227         int mask;
228
229         memset(ssd, 0, sizeof(struct chsc_ssd_info));
230         ssd->path_mask = pmcw->pim;
231         for (i = 0; i < 8; i++) {
232                 mask = 0x80 >> i;
233                 if (pmcw->pim & mask) {
234                         chp_id_init(&ssd->chpid[i]);
235                         ssd->chpid[i].id = pmcw->chpid[i];
236                 }
237         }
238 }
239
240 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
241 {
242         int i;
243         int mask;
244
245         for (i = 0; i < 8; i++) {
246                 mask = 0x80 >> i;
247                 if (ssd->path_mask & mask)
248                         if (!chp_is_registered(ssd->chpid[i]))
249                                 chp_new(ssd->chpid[i]);
250         }
251 }
252
253 void css_update_ssd_info(struct subchannel *sch)
254 {
255         int ret;
256
257         ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
258         if (ret)
259                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
260
261         ssd_register_chpids(&sch->ssd_info);
262 }
263
264 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
265                          char *buf)
266 {
267         struct subchannel *sch = to_subchannel(dev);
268
269         return sprintf(buf, "%01x\n", sch->st);
270 }
271
272 static DEVICE_ATTR(type, 0444, type_show, NULL);
273
274 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
275                              char *buf)
276 {
277         struct subchannel *sch = to_subchannel(dev);
278
279         return sprintf(buf, "css:t%01X\n", sch->st);
280 }
281
282 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
283
284 static struct attribute *subch_attrs[] = {
285         &dev_attr_type.attr,
286         &dev_attr_modalias.attr,
287         NULL,
288 };
289
290 static struct attribute_group subch_attr_group = {
291         .attrs = subch_attrs,
292 };
293
294 static const struct attribute_group *default_subch_attr_groups[] = {
295         &subch_attr_group,
296         NULL,
297 };
298
299 int css_register_subchannel(struct subchannel *sch)
300 {
301         int ret;
302
303         /* Initialize the subchannel structure */
304         sch->dev.parent = &channel_subsystems[0]->device;
305         sch->dev.bus = &css_bus_type;
306         sch->dev.groups = default_subch_attr_groups;
307         /*
308          * We don't want to generate uevents for I/O subchannels that don't
309          * have a working ccw device behind them since they will be
310          * unregistered before they can be used anyway, so we delay the add
311          * uevent until after device recognition was successful.
312          * Note that we suppress the uevent for all subchannel types;
313          * the subchannel driver can decide itself when it wants to inform
314          * userspace of its existence.
315          */
316         dev_set_uevent_suppress(&sch->dev, 1);
317         css_update_ssd_info(sch);
318         /* make it known to the system */
319         ret = css_sch_device_register(sch);
320         if (ret) {
321                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
322                               sch->schid.ssid, sch->schid.sch_no, ret);
323                 return ret;
324         }
325         if (!sch->driver) {
326                 /*
327                  * No driver matched. Generate the uevent now so that
328                  * a fitting driver module may be loaded based on the
329                  * modalias.
330                  */
331                 dev_set_uevent_suppress(&sch->dev, 0);
332                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
333         }
334         return ret;
335 }
336
337 static int css_probe_device(struct subchannel_id schid)
338 {
339         struct subchannel *sch;
340         int ret;
341
342         sch = css_alloc_subchannel(schid);
343         if (IS_ERR(sch))
344                 return PTR_ERR(sch);
345
346         ret = css_register_subchannel(sch);
347         if (ret)
348                 put_device(&sch->dev);
349
350         return ret;
351 }
352
353 static int
354 check_subchannel(struct device * dev, void * data)
355 {
356         struct subchannel *sch;
357         struct subchannel_id *schid = data;
358
359         sch = to_subchannel(dev);
360         return schid_equal(&sch->schid, schid);
361 }
362
363 struct subchannel *
364 get_subchannel_by_schid(struct subchannel_id schid)
365 {
366         struct device *dev;
367
368         dev = bus_find_device(&css_bus_type, NULL,
369                               &schid, check_subchannel);
370
371         return dev ? to_subchannel(dev) : NULL;
372 }
373
374 /**
375  * css_sch_is_valid() - check if a subchannel is valid
376  * @schib: subchannel information block for the subchannel
377  */
378 int css_sch_is_valid(struct schib *schib)
379 {
380         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
381                 return 0;
382         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
383                 return 0;
384         return 1;
385 }
386 EXPORT_SYMBOL_GPL(css_sch_is_valid);
387
388 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
389 {
390         struct schib schib;
391
392         if (!slow) {
393                 /* Will be done on the slow path. */
394                 return -EAGAIN;
395         }
396         if (stsch(schid, &schib)) {
397                 /* Subchannel is not provided. */
398                 return -ENXIO;
399         }
400         if (!css_sch_is_valid(&schib)) {
401                 /* Unusable - ignore. */
402                 return 0;
403         }
404         CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
405                       schid.sch_no);
406
407         return css_probe_device(schid);
408 }
409
410 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
411 {
412         int ret = 0;
413
414         if (sch->driver) {
415                 if (sch->driver->sch_event)
416                         ret = sch->driver->sch_event(sch, slow);
417                 else
418                         dev_dbg(&sch->dev,
419                                 "Got subchannel machine check but "
420                                 "no sch_event handler provided.\n");
421         }
422         if (ret != 0 && ret != -EAGAIN) {
423                 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
424                               sch->schid.ssid, sch->schid.sch_no, ret);
425         }
426         return ret;
427 }
428
429 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
430 {
431         struct subchannel *sch;
432         int ret;
433
434         sch = get_subchannel_by_schid(schid);
435         if (sch) {
436                 ret = css_evaluate_known_subchannel(sch, slow);
437                 put_device(&sch->dev);
438         } else
439                 ret = css_evaluate_new_subchannel(schid, slow);
440         if (ret == -EAGAIN)
441                 css_schedule_eval(schid);
442 }
443
444 /**
445  * css_sched_sch_todo - schedule a subchannel operation
446  * @sch: subchannel
447  * @todo: todo
448  *
449  * Schedule the operation identified by @todo to be performed on the slow path
450  * workqueue. Do nothing if another operation with higher priority is already
451  * scheduled. Needs to be called with subchannel lock held.
452  */
453 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
454 {
455         CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
456                       sch->schid.ssid, sch->schid.sch_no, todo);
457         if (sch->todo >= todo)
458                 return;
459         /* Get workqueue ref. */
460         if (!get_device(&sch->dev))
461                 return;
462         sch->todo = todo;
463         if (!queue_work(cio_work_q, &sch->todo_work)) {
464                 /* Already queued, release workqueue ref. */
465                 put_device(&sch->dev);
466         }
467 }
468 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
469
470 static void css_sch_todo(struct work_struct *work)
471 {
472         struct subchannel *sch;
473         enum sch_todo todo;
474         int ret;
475
476         sch = container_of(work, struct subchannel, todo_work);
477         /* Find out todo. */
478         spin_lock_irq(sch->lock);
479         todo = sch->todo;
480         CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
481                       sch->schid.sch_no, todo);
482         sch->todo = SCH_TODO_NOTHING;
483         spin_unlock_irq(sch->lock);
484         /* Perform todo. */
485         switch (todo) {
486         case SCH_TODO_NOTHING:
487                 break;
488         case SCH_TODO_EVAL:
489                 ret = css_evaluate_known_subchannel(sch, 1);
490                 if (ret == -EAGAIN) {
491                         spin_lock_irq(sch->lock);
492                         css_sched_sch_todo(sch, todo);
493                         spin_unlock_irq(sch->lock);
494                 }
495                 break;
496         case SCH_TODO_UNREG:
497                 css_sch_device_unregister(sch);
498                 break;
499         }
500         /* Release workqueue ref. */
501         put_device(&sch->dev);
502 }
503
504 static struct idset *slow_subchannel_set;
505 static spinlock_t slow_subchannel_lock;
506 static wait_queue_head_t css_eval_wq;
507 static atomic_t css_eval_scheduled;
508
509 static int __init slow_subchannel_init(void)
510 {
511         spin_lock_init(&slow_subchannel_lock);
512         atomic_set(&css_eval_scheduled, 0);
513         init_waitqueue_head(&css_eval_wq);
514         slow_subchannel_set = idset_sch_new();
515         if (!slow_subchannel_set) {
516                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
517                 return -ENOMEM;
518         }
519         return 0;
520 }
521
522 static int slow_eval_known_fn(struct subchannel *sch, void *data)
523 {
524         int eval;
525         int rc;
526
527         spin_lock_irq(&slow_subchannel_lock);
528         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
529         idset_sch_del(slow_subchannel_set, sch->schid);
530         spin_unlock_irq(&slow_subchannel_lock);
531         if (eval) {
532                 rc = css_evaluate_known_subchannel(sch, 1);
533                 if (rc == -EAGAIN)
534                         css_schedule_eval(sch->schid);
535         }
536         return 0;
537 }
538
539 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
540 {
541         int eval;
542         int rc = 0;
543
544         spin_lock_irq(&slow_subchannel_lock);
545         eval = idset_sch_contains(slow_subchannel_set, schid);
546         idset_sch_del(slow_subchannel_set, schid);
547         spin_unlock_irq(&slow_subchannel_lock);
548         if (eval) {
549                 rc = css_evaluate_new_subchannel(schid, 1);
550                 switch (rc) {
551                 case -EAGAIN:
552                         css_schedule_eval(schid);
553                         rc = 0;
554                         break;
555                 case -ENXIO:
556                 case -ENOMEM:
557                 case -EIO:
558                         /* These should abort looping */
559                         spin_lock_irq(&slow_subchannel_lock);
560                         idset_sch_del_subseq(slow_subchannel_set, schid);
561                         spin_unlock_irq(&slow_subchannel_lock);
562                         break;
563                 default:
564                         rc = 0;
565                 }
566                 /* Allow scheduling here since the containing loop might
567                  * take a while.  */
568                 cond_resched();
569         }
570         return rc;
571 }
572
573 static void css_slow_path_func(struct work_struct *unused)
574 {
575         unsigned long flags;
576
577         CIO_TRACE_EVENT(4, "slowpath");
578         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
579                                    NULL);
580         spin_lock_irqsave(&slow_subchannel_lock, flags);
581         if (idset_is_empty(slow_subchannel_set)) {
582                 atomic_set(&css_eval_scheduled, 0);
583                 wake_up(&css_eval_wq);
584         }
585         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
586 }
587
588 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
589 struct workqueue_struct *cio_work_q;
590
591 void css_schedule_eval(struct subchannel_id schid)
592 {
593         unsigned long flags;
594
595         spin_lock_irqsave(&slow_subchannel_lock, flags);
596         idset_sch_add(slow_subchannel_set, schid);
597         atomic_set(&css_eval_scheduled, 1);
598         queue_delayed_work(cio_work_q, &slow_path_work, 0);
599         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
600 }
601
602 void css_schedule_eval_all(void)
603 {
604         unsigned long flags;
605
606         spin_lock_irqsave(&slow_subchannel_lock, flags);
607         idset_fill(slow_subchannel_set);
608         atomic_set(&css_eval_scheduled, 1);
609         queue_delayed_work(cio_work_q, &slow_path_work, 0);
610         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
611 }
612
613 static int __unset_registered(struct device *dev, void *data)
614 {
615         struct idset *set = data;
616         struct subchannel *sch = to_subchannel(dev);
617
618         idset_sch_del(set, sch->schid);
619         return 0;
620 }
621
622 void css_schedule_eval_all_unreg(unsigned long delay)
623 {
624         unsigned long flags;
625         struct idset *unreg_set;
626
627         /* Find unregistered subchannels. */
628         unreg_set = idset_sch_new();
629         if (!unreg_set) {
630                 /* Fallback. */
631                 css_schedule_eval_all();
632                 return;
633         }
634         idset_fill(unreg_set);
635         bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
636         /* Apply to slow_subchannel_set. */
637         spin_lock_irqsave(&slow_subchannel_lock, flags);
638         idset_add_set(slow_subchannel_set, unreg_set);
639         atomic_set(&css_eval_scheduled, 1);
640         queue_delayed_work(cio_work_q, &slow_path_work, delay);
641         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
642         idset_free(unreg_set);
643 }
644
645 void css_wait_for_slow_path(void)
646 {
647         flush_workqueue(cio_work_q);
648 }
649
650 /* Schedule reprobing of all unregistered subchannels. */
651 void css_schedule_reprobe(void)
652 {
653         /* Schedule with a delay to allow merging of subsequent calls. */
654         css_schedule_eval_all_unreg(1 * HZ);
655 }
656 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
657
658 /*
659  * Called from the machine check handler for subchannel report words.
660  */
661 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
662 {
663         struct subchannel_id mchk_schid;
664         struct subchannel *sch;
665
666         if (overflow) {
667                 css_schedule_eval_all();
668                 return;
669         }
670         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
671                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
672                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
673                       crw0->erc, crw0->rsid);
674         if (crw1)
675                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
676                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
677                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
678                               crw1->anc, crw1->erc, crw1->rsid);
679         init_subchannel_id(&mchk_schid);
680         mchk_schid.sch_no = crw0->rsid;
681         if (crw1)
682                 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
683
684         if (crw0->erc == CRW_ERC_PMOD) {
685                 sch = get_subchannel_by_schid(mchk_schid);
686                 if (sch) {
687                         css_update_ssd_info(sch);
688                         put_device(&sch->dev);
689                 }
690         }
691         /*
692          * Since we are always presented with IPI in the CRW, we have to
693          * use stsch() to find out if the subchannel in question has come
694          * or gone.
695          */
696         css_evaluate_subchannel(mchk_schid, 0);
697 }
698
699 static void __init
700 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
701 {
702         struct cpuid cpu_id;
703
704         if (css_general_characteristics.mcss) {
705                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
706                 css->global_pgid.pgid_high.ext_cssid.cssid =
707                         (css->cssid < 0) ? 0 : css->cssid;
708         } else {
709                 css->global_pgid.pgid_high.cpu_addr = stap();
710         }
711         get_cpu_id(&cpu_id);
712         css->global_pgid.cpu_id = cpu_id.ident;
713         css->global_pgid.cpu_model = cpu_id.machine;
714         css->global_pgid.tod_high = tod_high;
715 }
716
717 static void channel_subsystem_release(struct device *dev)
718 {
719         struct channel_subsystem *css = to_css(dev);
720
721         mutex_destroy(&css->mutex);
722         kfree(css);
723 }
724
725 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
726                               char *buf)
727 {
728         struct channel_subsystem *css = to_css(dev);
729         int ret;
730
731         mutex_lock(&css->mutex);
732         ret = sprintf(buf, "%x\n", css->cm_enabled);
733         mutex_unlock(&css->mutex);
734         return ret;
735 }
736
737 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
738                                const char *buf, size_t count)
739 {
740         struct channel_subsystem *css = to_css(dev);
741         unsigned long val;
742         int ret;
743
744         ret = kstrtoul(buf, 16, &val);
745         if (ret)
746                 return ret;
747         mutex_lock(&css->mutex);
748         switch (val) {
749         case 0:
750                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
751                 break;
752         case 1:
753                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
754                 break;
755         default:
756                 ret = -EINVAL;
757         }
758         mutex_unlock(&css->mutex);
759         return ret < 0 ? ret : count;
760 }
761 static DEVICE_ATTR_RW(cm_enable);
762
763 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
764                               int index)
765 {
766         return css_chsc_characteristics.secm ? attr->mode : 0;
767 }
768
769 static struct attribute *cssdev_cm_attrs[] = {
770         &dev_attr_cm_enable.attr,
771         NULL,
772 };
773
774 static struct attribute_group cssdev_cm_attr_group = {
775         .attrs = cssdev_cm_attrs,
776         .is_visible = cm_enable_mode,
777 };
778
779 static const struct attribute_group *cssdev_attr_groups[] = {
780         &cssdev_cm_attr_group,
781         NULL,
782 };
783
784 static int __init setup_css(int nr)
785 {
786         struct channel_subsystem *css;
787         int ret;
788
789         css = kzalloc(sizeof(*css), GFP_KERNEL);
790         if (!css)
791                 return -ENOMEM;
792
793         channel_subsystems[nr] = css;
794         dev_set_name(&css->device, "css%x", nr);
795         css->device.groups = cssdev_attr_groups;
796         css->device.release = channel_subsystem_release;
797
798         mutex_init(&css->mutex);
799         css->valid = 1;
800         css->cssid = chsc_get_cssid(nr);
801         css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
802
803         ret = device_register(&css->device);
804         if (ret) {
805                 put_device(&css->device);
806                 goto out_err;
807         }
808
809         css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
810                                          GFP_KERNEL);
811         if (!css->pseudo_subchannel) {
812                 device_unregister(&css->device);
813                 ret = -ENOMEM;
814                 goto out_err;
815         }
816
817         css->pseudo_subchannel->dev.parent = &css->device;
818         css->pseudo_subchannel->dev.release = css_subchannel_release;
819         mutex_init(&css->pseudo_subchannel->reg_mutex);
820         ret = css_sch_create_locks(css->pseudo_subchannel);
821         if (ret) {
822                 kfree(css->pseudo_subchannel);
823                 device_unregister(&css->device);
824                 goto out_err;
825         }
826
827         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
828         ret = device_register(&css->pseudo_subchannel->dev);
829         if (ret) {
830                 put_device(&css->pseudo_subchannel->dev);
831                 device_unregister(&css->device);
832                 goto out_err;
833         }
834
835         return ret;
836 out_err:
837         channel_subsystems[nr] = NULL;
838         return ret;
839 }
840
841 static int css_reboot_event(struct notifier_block *this,
842                             unsigned long event,
843                             void *ptr)
844 {
845         struct channel_subsystem *css;
846         int ret;
847
848         ret = NOTIFY_DONE;
849         for_each_css(css) {
850                 mutex_lock(&css->mutex);
851                 if (css->cm_enabled)
852                         if (chsc_secm(css, 0))
853                                 ret = NOTIFY_BAD;
854                 mutex_unlock(&css->mutex);
855         }
856
857         return ret;
858 }
859
860 static struct notifier_block css_reboot_notifier = {
861         .notifier_call = css_reboot_event,
862 };
863
864 /*
865  * Since the css devices are neither on a bus nor have a class
866  * nor have a special device type, we cannot stop/restart channel
867  * path measurements via the normal suspend/resume callbacks, but have
868  * to use notifiers.
869  */
870 static int css_power_event(struct notifier_block *this, unsigned long event,
871                            void *ptr)
872 {
873         struct channel_subsystem *css;
874         int ret;
875
876         switch (event) {
877         case PM_HIBERNATION_PREPARE:
878         case PM_SUSPEND_PREPARE:
879                 ret = NOTIFY_DONE;
880                 for_each_css(css) {
881                         mutex_lock(&css->mutex);
882                         if (!css->cm_enabled) {
883                                 mutex_unlock(&css->mutex);
884                                 continue;
885                         }
886                         ret = __chsc_do_secm(css, 0);
887                         ret = notifier_from_errno(ret);
888                         mutex_unlock(&css->mutex);
889                 }
890                 break;
891         case PM_POST_HIBERNATION:
892         case PM_POST_SUSPEND:
893                 ret = NOTIFY_DONE;
894                 for_each_css(css) {
895                         mutex_lock(&css->mutex);
896                         if (!css->cm_enabled) {
897                                 mutex_unlock(&css->mutex);
898                                 continue;
899                         }
900                         ret = __chsc_do_secm(css, 1);
901                         ret = notifier_from_errno(ret);
902                         mutex_unlock(&css->mutex);
903                 }
904                 /* search for subchannels, which appeared during hibernation */
905                 css_schedule_reprobe();
906                 break;
907         default:
908                 ret = NOTIFY_DONE;
909         }
910         return ret;
911
912 }
913 static struct notifier_block css_power_notifier = {
914         .notifier_call = css_power_event,
915 };
916
917 /*
918  * Now that the driver core is running, we can setup our channel subsystem.
919  * The struct subchannel's are created during probing.
920  */
921 static int __init css_bus_init(void)
922 {
923         int ret, i;
924
925         ret = chsc_init();
926         if (ret)
927                 return ret;
928
929         chsc_determine_css_characteristics();
930         /* Try to enable MSS. */
931         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
932         if (ret)
933                 max_ssid = 0;
934         else /* Success. */
935                 max_ssid = __MAX_SSID;
936
937         ret = slow_subchannel_init();
938         if (ret)
939                 goto out;
940
941         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
942         if (ret)
943                 goto out;
944
945         if ((ret = bus_register(&css_bus_type)))
946                 goto out;
947
948         /* Setup css structure. */
949         for (i = 0; i <= MAX_CSS_IDX; i++) {
950                 ret = setup_css(i);
951                 if (ret)
952                         goto out_unregister;
953         }
954         ret = register_reboot_notifier(&css_reboot_notifier);
955         if (ret)
956                 goto out_unregister;
957         ret = register_pm_notifier(&css_power_notifier);
958         if (ret) {
959                 unregister_reboot_notifier(&css_reboot_notifier);
960                 goto out_unregister;
961         }
962         css_init_done = 1;
963
964         /* Enable default isc for I/O subchannels. */
965         isc_register(IO_SCH_ISC);
966
967         return 0;
968 out_unregister:
969         while (i-- > 0) {
970                 struct channel_subsystem *css = channel_subsystems[i];
971                 device_unregister(&css->pseudo_subchannel->dev);
972                 device_unregister(&css->device);
973         }
974         bus_unregister(&css_bus_type);
975 out:
976         crw_unregister_handler(CRW_RSC_SCH);
977         idset_free(slow_subchannel_set);
978         chsc_init_cleanup();
979         pr_alert("The CSS device driver initialization failed with "
980                  "errno=%d\n", ret);
981         return ret;
982 }
983
984 static void __init css_bus_cleanup(void)
985 {
986         struct channel_subsystem *css;
987
988         for_each_css(css) {
989                 device_unregister(&css->pseudo_subchannel->dev);
990                 device_unregister(&css->device);
991         }
992         bus_unregister(&css_bus_type);
993         crw_unregister_handler(CRW_RSC_SCH);
994         idset_free(slow_subchannel_set);
995         chsc_init_cleanup();
996         isc_unregister(IO_SCH_ISC);
997 }
998
999 static int __init channel_subsystem_init(void)
1000 {
1001         int ret;
1002
1003         ret = css_bus_init();
1004         if (ret)
1005                 return ret;
1006         cio_work_q = create_singlethread_workqueue("cio");
1007         if (!cio_work_q) {
1008                 ret = -ENOMEM;
1009                 goto out_bus;
1010         }
1011         ret = io_subchannel_init();
1012         if (ret)
1013                 goto out_wq;
1014
1015         return ret;
1016 out_wq:
1017         destroy_workqueue(cio_work_q);
1018 out_bus:
1019         css_bus_cleanup();
1020         return ret;
1021 }
1022 subsys_initcall(channel_subsystem_init);
1023
1024 static int css_settle(struct device_driver *drv, void *unused)
1025 {
1026         struct css_driver *cssdrv = to_cssdriver(drv);
1027
1028         if (cssdrv->settle)
1029                 return cssdrv->settle();
1030         return 0;
1031 }
1032
1033 int css_complete_work(void)
1034 {
1035         int ret;
1036
1037         /* Wait for the evaluation of subchannels to finish. */
1038         ret = wait_event_interruptible(css_eval_wq,
1039                                        atomic_read(&css_eval_scheduled) == 0);
1040         if (ret)
1041                 return -EINTR;
1042         flush_workqueue(cio_work_q);
1043         /* Wait for the subchannel type specific initialization to finish */
1044         return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1045 }
1046
1047
1048 /*
1049  * Wait for the initialization of devices to finish, to make sure we are
1050  * done with our setup if the search for the root device starts.
1051  */
1052 static int __init channel_subsystem_init_sync(void)
1053 {
1054         /* Register subchannels which are already in use. */
1055         cio_register_early_subchannels();
1056         /* Start initial subchannel evaluation. */
1057         css_schedule_eval_all();
1058         css_complete_work();
1059         return 0;
1060 }
1061 subsys_initcall_sync(channel_subsystem_init_sync);
1062
1063 void channel_subsystem_reinit(void)
1064 {
1065         struct channel_path *chp;
1066         struct chp_id chpid;
1067
1068         chsc_enable_facility(CHSC_SDA_OC_MSS);
1069         chp_id_for_each(&chpid) {
1070                 chp = chpid_to_chp(chpid);
1071                 if (chp)
1072                         chp_update_desc(chp);
1073         }
1074         cmf_reactivate();
1075 }
1076
1077 #ifdef CONFIG_PROC_FS
1078 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1079                                 size_t count, loff_t *ppos)
1080 {
1081         int ret;
1082
1083         /* Handle pending CRW's. */
1084         crw_wait_for_channel_report();
1085         ret = css_complete_work();
1086
1087         return ret ? ret : count;
1088 }
1089
1090 static const struct file_operations cio_settle_proc_fops = {
1091         .open = nonseekable_open,
1092         .write = cio_settle_write,
1093         .llseek = no_llseek,
1094 };
1095
1096 static int __init cio_settle_init(void)
1097 {
1098         struct proc_dir_entry *entry;
1099
1100         entry = proc_create("cio_settle", S_IWUSR, NULL,
1101                             &cio_settle_proc_fops);
1102         if (!entry)
1103                 return -ENOMEM;
1104         return 0;
1105 }
1106 device_initcall(cio_settle_init);
1107 #endif /*CONFIG_PROC_FS*/
1108
1109 int sch_is_pseudo_sch(struct subchannel *sch)
1110 {
1111         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1112 }
1113
1114 static int css_bus_match(struct device *dev, struct device_driver *drv)
1115 {
1116         struct subchannel *sch = to_subchannel(dev);
1117         struct css_driver *driver = to_cssdriver(drv);
1118         struct css_device_id *id;
1119
1120         for (id = driver->subchannel_type; id->match_flags; id++) {
1121                 if (sch->st == id->type)
1122                         return 1;
1123         }
1124
1125         return 0;
1126 }
1127
1128 static int css_probe(struct device *dev)
1129 {
1130         struct subchannel *sch;
1131         int ret;
1132
1133         sch = to_subchannel(dev);
1134         sch->driver = to_cssdriver(dev->driver);
1135         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1136         if (ret)
1137                 sch->driver = NULL;
1138         return ret;
1139 }
1140
1141 static int css_remove(struct device *dev)
1142 {
1143         struct subchannel *sch;
1144         int ret;
1145
1146         sch = to_subchannel(dev);
1147         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1148         sch->driver = NULL;
1149         return ret;
1150 }
1151
1152 static void css_shutdown(struct device *dev)
1153 {
1154         struct subchannel *sch;
1155
1156         sch = to_subchannel(dev);
1157         if (sch->driver && sch->driver->shutdown)
1158                 sch->driver->shutdown(sch);
1159 }
1160
1161 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1162 {
1163         struct subchannel *sch = to_subchannel(dev);
1164         int ret;
1165
1166         ret = add_uevent_var(env, "ST=%01X", sch->st);
1167         if (ret)
1168                 return ret;
1169         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1170         return ret;
1171 }
1172
1173 static int css_pm_prepare(struct device *dev)
1174 {
1175         struct subchannel *sch = to_subchannel(dev);
1176         struct css_driver *drv;
1177
1178         if (mutex_is_locked(&sch->reg_mutex))
1179                 return -EAGAIN;
1180         if (!sch->dev.driver)
1181                 return 0;
1182         drv = to_cssdriver(sch->dev.driver);
1183         /* Notify drivers that they may not register children. */
1184         return drv->prepare ? drv->prepare(sch) : 0;
1185 }
1186
1187 static void css_pm_complete(struct device *dev)
1188 {
1189         struct subchannel *sch = to_subchannel(dev);
1190         struct css_driver *drv;
1191
1192         if (!sch->dev.driver)
1193                 return;
1194         drv = to_cssdriver(sch->dev.driver);
1195         if (drv->complete)
1196                 drv->complete(sch);
1197 }
1198
1199 static int css_pm_freeze(struct device *dev)
1200 {
1201         struct subchannel *sch = to_subchannel(dev);
1202         struct css_driver *drv;
1203
1204         if (!sch->dev.driver)
1205                 return 0;
1206         drv = to_cssdriver(sch->dev.driver);
1207         return drv->freeze ? drv->freeze(sch) : 0;
1208 }
1209
1210 static int css_pm_thaw(struct device *dev)
1211 {
1212         struct subchannel *sch = to_subchannel(dev);
1213         struct css_driver *drv;
1214
1215         if (!sch->dev.driver)
1216                 return 0;
1217         drv = to_cssdriver(sch->dev.driver);
1218         return drv->thaw ? drv->thaw(sch) : 0;
1219 }
1220
1221 static int css_pm_restore(struct device *dev)
1222 {
1223         struct subchannel *sch = to_subchannel(dev);
1224         struct css_driver *drv;
1225
1226         css_update_ssd_info(sch);
1227         if (!sch->dev.driver)
1228                 return 0;
1229         drv = to_cssdriver(sch->dev.driver);
1230         return drv->restore ? drv->restore(sch) : 0;
1231 }
1232
1233 static const struct dev_pm_ops css_pm_ops = {
1234         .prepare = css_pm_prepare,
1235         .complete = css_pm_complete,
1236         .freeze = css_pm_freeze,
1237         .thaw = css_pm_thaw,
1238         .restore = css_pm_restore,
1239 };
1240
1241 static struct bus_type css_bus_type = {
1242         .name     = "css",
1243         .match    = css_bus_match,
1244         .probe    = css_probe,
1245         .remove   = css_remove,
1246         .shutdown = css_shutdown,
1247         .uevent   = css_uevent,
1248         .pm = &css_pm_ops,
1249 };
1250
1251 /**
1252  * css_driver_register - register a css driver
1253  * @cdrv: css driver to register
1254  *
1255  * This is mainly a wrapper around driver_register that sets name
1256  * and bus_type in the embedded struct device_driver correctly.
1257  */
1258 int css_driver_register(struct css_driver *cdrv)
1259 {
1260         cdrv->drv.bus = &css_bus_type;
1261         return driver_register(&cdrv->drv);
1262 }
1263 EXPORT_SYMBOL_GPL(css_driver_register);
1264
1265 /**
1266  * css_driver_unregister - unregister a css driver
1267  * @cdrv: css driver to unregister
1268  *
1269  * This is a wrapper around driver_unregister.
1270  */
1271 void css_driver_unregister(struct css_driver *cdrv)
1272 {
1273         driver_unregister(&cdrv->drv);
1274 }
1275 EXPORT_SYMBOL_GPL(css_driver_unregister);