[S390] cio: fix memleak in subchannel validation
[safe/jmp/linux-2.6] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2009
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <asm/isc.h>
22 #include <asm/crw.h>
23
24 #include "css.h"
25 #include "cio.h"
26 #include "cio_debug.h"
27 #include "ioasm.h"
28 #include "chsc.h"
29 #include "device.h"
30 #include "idset.h"
31 #include "chp.h"
32
33 int css_init_done = 0;
34 static int need_reprobe = 0;
35 static int max_ssid = 0;
36
37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38
39 int
40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
41 {
42         struct subchannel_id schid;
43         int ret;
44
45         init_subchannel_id(&schid);
46         ret = -ENODEV;
47         do {
48                 do {
49                         ret = fn(schid, data);
50                         if (ret)
51                                 break;
52                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
53                 schid.sch_no = 0;
54         } while (schid.ssid++ < max_ssid);
55         return ret;
56 }
57
58 struct cb_data {
59         void *data;
60         struct idset *set;
61         int (*fn_known_sch)(struct subchannel *, void *);
62         int (*fn_unknown_sch)(struct subchannel_id, void *);
63 };
64
65 static int call_fn_known_sch(struct device *dev, void *data)
66 {
67         struct subchannel *sch = to_subchannel(dev);
68         struct cb_data *cb = data;
69         int rc = 0;
70
71         idset_sch_del(cb->set, sch->schid);
72         if (cb->fn_known_sch)
73                 rc = cb->fn_known_sch(sch, cb->data);
74         return rc;
75 }
76
77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
78 {
79         struct cb_data *cb = data;
80         int rc = 0;
81
82         if (idset_sch_contains(cb->set, schid))
83                 rc = cb->fn_unknown_sch(schid, cb->data);
84         return rc;
85 }
86
87 static int call_fn_all_sch(struct subchannel_id schid, void *data)
88 {
89         struct cb_data *cb = data;
90         struct subchannel *sch;
91         int rc = 0;
92
93         sch = get_subchannel_by_schid(schid);
94         if (sch) {
95                 if (cb->fn_known_sch)
96                         rc = cb->fn_known_sch(sch, cb->data);
97                 put_device(&sch->dev);
98         } else {
99                 if (cb->fn_unknown_sch)
100                         rc = cb->fn_unknown_sch(schid, cb->data);
101         }
102
103         return rc;
104 }
105
106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
107                                int (*fn_unknown)(struct subchannel_id,
108                                void *), void *data)
109 {
110         struct cb_data cb;
111         int rc;
112
113         cb.data = data;
114         cb.fn_known_sch = fn_known;
115         cb.fn_unknown_sch = fn_unknown;
116
117         cb.set = idset_sch_new();
118         if (!cb.set)
119                 /* fall back to brute force scanning in case of oom */
120                 return for_each_subchannel(call_fn_all_sch, &cb);
121
122         idset_fill(cb.set);
123
124         /* Process registered subchannels. */
125         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
126         if (rc)
127                 goto out;
128         /* Process unregistered subchannels. */
129         if (fn_unknown)
130                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
131 out:
132         idset_free(cb.set);
133
134         return rc;
135 }
136
137 static struct subchannel *
138 css_alloc_subchannel(struct subchannel_id schid)
139 {
140         struct subchannel *sch;
141         int ret;
142
143         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
144         if (sch == NULL)
145                 return ERR_PTR(-ENOMEM);
146         ret = cio_validate_subchannel (sch, schid);
147         if (ret < 0) {
148                 kfree(sch);
149                 return ERR_PTR(ret);
150         }
151         return sch;
152 }
153
154 static void
155 css_free_subchannel(struct subchannel *sch)
156 {
157         if (sch) {
158                 /* Reset intparm to zeroes. */
159                 sch->config.intparm = 0;
160                 cio_commit_config(sch);
161                 kfree(sch->lock);
162                 kfree(sch);
163         }
164 }
165
166 static void
167 css_subchannel_release(struct device *dev)
168 {
169         struct subchannel *sch;
170
171         sch = to_subchannel(dev);
172         if (!cio_is_console(sch->schid)) {
173                 /* Reset intparm to zeroes. */
174                 sch->config.intparm = 0;
175                 cio_commit_config(sch);
176                 kfree(sch->lock);
177                 kfree(sch);
178         }
179 }
180
181 static int css_sch_device_register(struct subchannel *sch)
182 {
183         int ret;
184
185         mutex_lock(&sch->reg_mutex);
186         if (cio_is_console(sch->schid))
187                 sch->dev.init_name = cio_get_console_sch_name(sch->schid);
188         else
189                 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
190                              sch->schid.sch_no);
191         ret = device_register(&sch->dev);
192         mutex_unlock(&sch->reg_mutex);
193         return ret;
194 }
195
196 /**
197  * css_sch_device_unregister - unregister a subchannel
198  * @sch: subchannel to be unregistered
199  */
200 void css_sch_device_unregister(struct subchannel *sch)
201 {
202         mutex_lock(&sch->reg_mutex);
203         if (device_is_registered(&sch->dev))
204                 device_unregister(&sch->dev);
205         mutex_unlock(&sch->reg_mutex);
206 }
207 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
208
209 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
210 {
211         int i;
212         int mask;
213
214         memset(ssd, 0, sizeof(struct chsc_ssd_info));
215         ssd->path_mask = pmcw->pim;
216         for (i = 0; i < 8; i++) {
217                 mask = 0x80 >> i;
218                 if (pmcw->pim & mask) {
219                         chp_id_init(&ssd->chpid[i]);
220                         ssd->chpid[i].id = pmcw->chpid[i];
221                 }
222         }
223 }
224
225 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
226 {
227         int i;
228         int mask;
229
230         for (i = 0; i < 8; i++) {
231                 mask = 0x80 >> i;
232                 if (ssd->path_mask & mask)
233                         if (!chp_is_registered(ssd->chpid[i]))
234                                 chp_new(ssd->chpid[i]);
235         }
236 }
237
238 void css_update_ssd_info(struct subchannel *sch)
239 {
240         int ret;
241
242         if (cio_is_console(sch->schid)) {
243                 /* Console is initialized too early for functions requiring
244                  * memory allocation. */
245                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
246         } else {
247                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
248                 if (ret)
249                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
250                 ssd_register_chpids(&sch->ssd_info);
251         }
252 }
253
254 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
255                          char *buf)
256 {
257         struct subchannel *sch = to_subchannel(dev);
258
259         return sprintf(buf, "%01x\n", sch->st);
260 }
261
262 static DEVICE_ATTR(type, 0444, type_show, NULL);
263
264 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
265                              char *buf)
266 {
267         struct subchannel *sch = to_subchannel(dev);
268
269         return sprintf(buf, "css:t%01X\n", sch->st);
270 }
271
272 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
273
274 static struct attribute *subch_attrs[] = {
275         &dev_attr_type.attr,
276         &dev_attr_modalias.attr,
277         NULL,
278 };
279
280 static struct attribute_group subch_attr_group = {
281         .attrs = subch_attrs,
282 };
283
284 static struct attribute_group *default_subch_attr_groups[] = {
285         &subch_attr_group,
286         NULL,
287 };
288
289 static int css_register_subchannel(struct subchannel *sch)
290 {
291         int ret;
292
293         /* Initialize the subchannel structure */
294         sch->dev.parent = &channel_subsystems[0]->device;
295         sch->dev.bus = &css_bus_type;
296         sch->dev.release = &css_subchannel_release;
297         sch->dev.groups = default_subch_attr_groups;
298         /*
299          * We don't want to generate uevents for I/O subchannels that don't
300          * have a working ccw device behind them since they will be
301          * unregistered before they can be used anyway, so we delay the add
302          * uevent until after device recognition was successful.
303          * Note that we suppress the uevent for all subchannel types;
304          * the subchannel driver can decide itself when it wants to inform
305          * userspace of its existence.
306          */
307         dev_set_uevent_suppress(&sch->dev, 1);
308         css_update_ssd_info(sch);
309         /* make it known to the system */
310         ret = css_sch_device_register(sch);
311         if (ret) {
312                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
313                               sch->schid.ssid, sch->schid.sch_no, ret);
314                 return ret;
315         }
316         if (!sch->driver) {
317                 /*
318                  * No driver matched. Generate the uevent now so that
319                  * a fitting driver module may be loaded based on the
320                  * modalias.
321                  */
322                 dev_set_uevent_suppress(&sch->dev, 0);
323                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
324         }
325         return ret;
326 }
327
328 int css_probe_device(struct subchannel_id schid)
329 {
330         int ret;
331         struct subchannel *sch;
332
333         sch = css_alloc_subchannel(schid);
334         if (IS_ERR(sch))
335                 return PTR_ERR(sch);
336         ret = css_register_subchannel(sch);
337         if (ret)
338                 css_free_subchannel(sch);
339         return ret;
340 }
341
342 static int
343 check_subchannel(struct device * dev, void * data)
344 {
345         struct subchannel *sch;
346         struct subchannel_id *schid = data;
347
348         sch = to_subchannel(dev);
349         return schid_equal(&sch->schid, schid);
350 }
351
352 struct subchannel *
353 get_subchannel_by_schid(struct subchannel_id schid)
354 {
355         struct device *dev;
356
357         dev = bus_find_device(&css_bus_type, NULL,
358                               &schid, check_subchannel);
359
360         return dev ? to_subchannel(dev) : NULL;
361 }
362
363 /**
364  * css_sch_is_valid() - check if a subchannel is valid
365  * @schib: subchannel information block for the subchannel
366  */
367 int css_sch_is_valid(struct schib *schib)
368 {
369         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
370                 return 0;
371         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
372                 return 0;
373         return 1;
374 }
375 EXPORT_SYMBOL_GPL(css_sch_is_valid);
376
377 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
378 {
379         struct schib schib;
380
381         if (!slow) {
382                 /* Will be done on the slow path. */
383                 return -EAGAIN;
384         }
385         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
386                 /* Unusable - ignore. */
387                 return 0;
388         }
389         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
390                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
391
392         return css_probe_device(schid);
393 }
394
395 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
396 {
397         int ret = 0;
398
399         if (sch->driver) {
400                 if (sch->driver->sch_event)
401                         ret = sch->driver->sch_event(sch, slow);
402                 else
403                         dev_dbg(&sch->dev,
404                                 "Got subchannel machine check but "
405                                 "no sch_event handler provided.\n");
406         }
407         return ret;
408 }
409
410 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
411 {
412         struct subchannel *sch;
413         int ret;
414
415         sch = get_subchannel_by_schid(schid);
416         if (sch) {
417                 ret = css_evaluate_known_subchannel(sch, slow);
418                 put_device(&sch->dev);
419         } else
420                 ret = css_evaluate_new_subchannel(schid, slow);
421         if (ret == -EAGAIN)
422                 css_schedule_eval(schid);
423 }
424
425 static struct idset *slow_subchannel_set;
426 static spinlock_t slow_subchannel_lock;
427
428 static int __init slow_subchannel_init(void)
429 {
430         spin_lock_init(&slow_subchannel_lock);
431         slow_subchannel_set = idset_sch_new();
432         if (!slow_subchannel_set) {
433                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
434                 return -ENOMEM;
435         }
436         return 0;
437 }
438
439 static int slow_eval_known_fn(struct subchannel *sch, void *data)
440 {
441         int eval;
442         int rc;
443
444         spin_lock_irq(&slow_subchannel_lock);
445         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
446         idset_sch_del(slow_subchannel_set, sch->schid);
447         spin_unlock_irq(&slow_subchannel_lock);
448         if (eval) {
449                 rc = css_evaluate_known_subchannel(sch, 1);
450                 if (rc == -EAGAIN)
451                         css_schedule_eval(sch->schid);
452         }
453         return 0;
454 }
455
456 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
457 {
458         int eval;
459         int rc = 0;
460
461         spin_lock_irq(&slow_subchannel_lock);
462         eval = idset_sch_contains(slow_subchannel_set, schid);
463         idset_sch_del(slow_subchannel_set, schid);
464         spin_unlock_irq(&slow_subchannel_lock);
465         if (eval) {
466                 rc = css_evaluate_new_subchannel(schid, 1);
467                 switch (rc) {
468                 case -EAGAIN:
469                         css_schedule_eval(schid);
470                         rc = 0;
471                         break;
472                 case -ENXIO:
473                 case -ENOMEM:
474                 case -EIO:
475                         /* These should abort looping */
476                         break;
477                 default:
478                         rc = 0;
479                 }
480         }
481         return rc;
482 }
483
484 static void css_slow_path_func(struct work_struct *unused)
485 {
486         CIO_TRACE_EVENT(4, "slowpath");
487         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
488                                    NULL);
489 }
490
491 static DECLARE_WORK(slow_path_work, css_slow_path_func);
492 struct workqueue_struct *slow_path_wq;
493
494 void css_schedule_eval(struct subchannel_id schid)
495 {
496         unsigned long flags;
497
498         spin_lock_irqsave(&slow_subchannel_lock, flags);
499         idset_sch_add(slow_subchannel_set, schid);
500         queue_work(slow_path_wq, &slow_path_work);
501         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
502 }
503
504 void css_schedule_eval_all(void)
505 {
506         unsigned long flags;
507
508         spin_lock_irqsave(&slow_subchannel_lock, flags);
509         idset_fill(slow_subchannel_set);
510         queue_work(slow_path_wq, &slow_path_work);
511         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
512 }
513
514 void css_wait_for_slow_path(void)
515 {
516         flush_workqueue(slow_path_wq);
517 }
518
519 /* Reprobe subchannel if unregistered. */
520 static int reprobe_subchannel(struct subchannel_id schid, void *data)
521 {
522         int ret;
523
524         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
525                       schid.ssid, schid.sch_no);
526         if (need_reprobe)
527                 return -EAGAIN;
528
529         ret = css_probe_device(schid);
530         switch (ret) {
531         case 0:
532                 break;
533         case -ENXIO:
534         case -ENOMEM:
535         case -EIO:
536                 /* These should abort looping */
537                 break;
538         default:
539                 ret = 0;
540         }
541
542         return ret;
543 }
544
545 static void reprobe_after_idle(struct work_struct *unused)
546 {
547         /* Make sure initial subchannel scan is done. */
548         wait_event(ccw_device_init_wq,
549                    atomic_read(&ccw_device_init_count) == 0);
550         if (need_reprobe)
551                 css_schedule_reprobe();
552 }
553
554 static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
555
556 /* Work function used to reprobe all unregistered subchannels. */
557 static void reprobe_all(struct work_struct *unused)
558 {
559         int ret;
560
561         CIO_MSG_EVENT(4, "reprobe start\n");
562
563         /* Make sure initial subchannel scan is done. */
564         if (atomic_read(&ccw_device_init_count) != 0) {
565                 queue_work(ccw_device_work, &reprobe_idle_work);
566                 return;
567         }
568         need_reprobe = 0;
569         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
570
571         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
572                       need_reprobe);
573 }
574
575 static DECLARE_WORK(css_reprobe_work, reprobe_all);
576
577 /* Schedule reprobing of all unregistered subchannels. */
578 void css_schedule_reprobe(void)
579 {
580         need_reprobe = 1;
581         queue_work(slow_path_wq, &css_reprobe_work);
582 }
583
584 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
585
586 /*
587  * Called from the machine check handler for subchannel report words.
588  */
589 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
590 {
591         struct subchannel_id mchk_schid;
592
593         if (overflow) {
594                 css_schedule_eval_all();
595                 return;
596         }
597         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
598                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
599                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
600                       crw0->erc, crw0->rsid);
601         if (crw1)
602                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
603                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
604                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
605                               crw1->anc, crw1->erc, crw1->rsid);
606         init_subchannel_id(&mchk_schid);
607         mchk_schid.sch_no = crw0->rsid;
608         if (crw1)
609                 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
610
611         /*
612          * Since we are always presented with IPI in the CRW, we have to
613          * use stsch() to find out if the subchannel in question has come
614          * or gone.
615          */
616         css_evaluate_subchannel(mchk_schid, 0);
617 }
618
619 static int __init
620 __init_channel_subsystem(struct subchannel_id schid, void *data)
621 {
622         struct subchannel *sch;
623         int ret;
624
625         if (cio_is_console(schid))
626                 sch = cio_get_console_subchannel();
627         else {
628                 sch = css_alloc_subchannel(schid);
629                 if (IS_ERR(sch))
630                         ret = PTR_ERR(sch);
631                 else
632                         ret = 0;
633                 switch (ret) {
634                 case 0:
635                         break;
636                 case -ENOMEM:
637                         panic("Out of memory in init_channel_subsystem\n");
638                 /* -ENXIO: no more subchannels. */
639                 case -ENXIO:
640                         return ret;
641                 /* -EIO: this subchannel set not supported. */
642                 case -EIO:
643                         return ret;
644                 default:
645                         return 0;
646                 }
647         }
648         /*
649          * We register ALL valid subchannels in ioinfo, even those
650          * that have been present before init_channel_subsystem.
651          * These subchannels can't have been registered yet (kmalloc
652          * not working) so we do it now. This is true e.g. for the
653          * console subchannel.
654          */
655         css_register_subchannel(sch);
656         return 0;
657 }
658
659 static void __init
660 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
661 {
662         if (css_general_characteristics.mcss) {
663                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
664                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
665         } else {
666 #ifdef CONFIG_SMP
667                 css->global_pgid.pgid_high.cpu_addr = stap();
668 #else
669                 css->global_pgid.pgid_high.cpu_addr = 0;
670 #endif
671         }
672         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
673         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
674         css->global_pgid.tod_high = tod_high;
675
676 }
677
678 static void
679 channel_subsystem_release(struct device *dev)
680 {
681         struct channel_subsystem *css;
682
683         css = to_css(dev);
684         mutex_destroy(&css->mutex);
685         if (css->pseudo_subchannel) {
686                 /* Implies that it has been generated but never registered. */
687                 css_subchannel_release(&css->pseudo_subchannel->dev);
688                 css->pseudo_subchannel = NULL;
689         }
690         kfree(css);
691 }
692
693 static ssize_t
694 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
695                    char *buf)
696 {
697         struct channel_subsystem *css = to_css(dev);
698         int ret;
699
700         if (!css)
701                 return 0;
702         mutex_lock(&css->mutex);
703         ret = sprintf(buf, "%x\n", css->cm_enabled);
704         mutex_unlock(&css->mutex);
705         return ret;
706 }
707
708 static ssize_t
709 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
710                     const char *buf, size_t count)
711 {
712         struct channel_subsystem *css = to_css(dev);
713         int ret;
714         unsigned long val;
715
716         ret = strict_strtoul(buf, 16, &val);
717         if (ret)
718                 return ret;
719         mutex_lock(&css->mutex);
720         switch (val) {
721         case 0:
722                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
723                 break;
724         case 1:
725                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
726                 break;
727         default:
728                 ret = -EINVAL;
729         }
730         mutex_unlock(&css->mutex);
731         return ret < 0 ? ret : count;
732 }
733
734 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
735
736 static int __init setup_css(int nr)
737 {
738         u32 tod_high;
739         int ret;
740         struct channel_subsystem *css;
741
742         css = channel_subsystems[nr];
743         memset(css, 0, sizeof(struct channel_subsystem));
744         css->pseudo_subchannel =
745                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
746         if (!css->pseudo_subchannel)
747                 return -ENOMEM;
748         css->pseudo_subchannel->dev.parent = &css->device;
749         css->pseudo_subchannel->dev.release = css_subchannel_release;
750         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
751         ret = cio_create_sch_lock(css->pseudo_subchannel);
752         if (ret) {
753                 kfree(css->pseudo_subchannel);
754                 return ret;
755         }
756         mutex_init(&css->mutex);
757         css->valid = 1;
758         css->cssid = nr;
759         dev_set_name(&css->device, "css%x", nr);
760         css->device.release = channel_subsystem_release;
761         tod_high = (u32) (get_clock() >> 32);
762         css_generate_pgid(css, tod_high);
763         return 0;
764 }
765
766 static int css_reboot_event(struct notifier_block *this,
767                             unsigned long event,
768                             void *ptr)
769 {
770         int ret, i;
771
772         ret = NOTIFY_DONE;
773         for (i = 0; i <= __MAX_CSSID; i++) {
774                 struct channel_subsystem *css;
775
776                 css = channel_subsystems[i];
777                 mutex_lock(&css->mutex);
778                 if (css->cm_enabled)
779                         if (chsc_secm(css, 0))
780                                 ret = NOTIFY_BAD;
781                 mutex_unlock(&css->mutex);
782         }
783
784         return ret;
785 }
786
787 static struct notifier_block css_reboot_notifier = {
788         .notifier_call = css_reboot_event,
789 };
790
791 /*
792  * Since the css devices are neither on a bus nor have a class
793  * nor have a special device type, we cannot stop/restart channel
794  * path measurements via the normal suspend/resume callbacks, but have
795  * to use notifiers.
796  */
797 static int css_power_event(struct notifier_block *this, unsigned long event,
798                            void *ptr)
799 {
800         void *secm_area;
801         int ret, i;
802
803         switch (event) {
804         case PM_HIBERNATION_PREPARE:
805         case PM_SUSPEND_PREPARE:
806                 ret = NOTIFY_DONE;
807                 for (i = 0; i <= __MAX_CSSID; i++) {
808                         struct channel_subsystem *css;
809
810                         css = channel_subsystems[i];
811                         mutex_lock(&css->mutex);
812                         if (!css->cm_enabled) {
813                                 mutex_unlock(&css->mutex);
814                                 continue;
815                         }
816                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
817                                                             GFP_DMA);
818                         if (secm_area) {
819                                 if (__chsc_do_secm(css, 0, secm_area))
820                                         ret = NOTIFY_BAD;
821                                 free_page((unsigned long)secm_area);
822                         } else
823                                 ret = NOTIFY_BAD;
824
825                         mutex_unlock(&css->mutex);
826                 }
827                 break;
828         case PM_POST_HIBERNATION:
829         case PM_POST_SUSPEND:
830                 ret = NOTIFY_DONE;
831                 for (i = 0; i <= __MAX_CSSID; i++) {
832                         struct channel_subsystem *css;
833
834                         css = channel_subsystems[i];
835                         mutex_lock(&css->mutex);
836                         if (!css->cm_enabled) {
837                                 mutex_unlock(&css->mutex);
838                                 continue;
839                         }
840                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
841                                                             GFP_DMA);
842                         if (secm_area) {
843                                 if (__chsc_do_secm(css, 1, secm_area))
844                                         ret = NOTIFY_BAD;
845                                 free_page((unsigned long)secm_area);
846                         } else
847                                 ret = NOTIFY_BAD;
848
849                         mutex_unlock(&css->mutex);
850                 }
851                 /* search for subchannels, which appeared during hibernation */
852                 css_schedule_reprobe();
853                 break;
854         default:
855                 ret = NOTIFY_DONE;
856         }
857         return ret;
858
859 }
860 static struct notifier_block css_power_notifier = {
861         .notifier_call = css_power_event,
862 };
863
864 /*
865  * Now that the driver core is running, we can setup our channel subsystem.
866  * The struct subchannel's are created during probing (except for the
867  * static console subchannel).
868  */
869 static int __init
870 init_channel_subsystem (void)
871 {
872         int ret, i;
873
874         ret = chsc_determine_css_characteristics();
875         if (ret == -ENOMEM)
876                 goto out; /* No need to continue. */
877
878         ret = chsc_alloc_sei_area();
879         if (ret)
880                 goto out;
881
882         ret = slow_subchannel_init();
883         if (ret)
884                 goto out;
885
886         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
887         if (ret)
888                 goto out;
889
890         if ((ret = bus_register(&css_bus_type)))
891                 goto out;
892
893         /* Try to enable MSS. */
894         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
895         switch (ret) {
896         case 0: /* Success. */
897                 max_ssid = __MAX_SSID;
898                 break;
899         case -ENOMEM:
900                 goto out_bus;
901         default:
902                 max_ssid = 0;
903         }
904         /* Setup css structure. */
905         for (i = 0; i <= __MAX_CSSID; i++) {
906                 struct channel_subsystem *css;
907
908                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
909                 if (!css) {
910                         ret = -ENOMEM;
911                         goto out_unregister;
912                 }
913                 channel_subsystems[i] = css;
914                 ret = setup_css(i);
915                 if (ret) {
916                         kfree(channel_subsystems[i]);
917                         goto out_unregister;
918                 }
919                 ret = device_register(&css->device);
920                 if (ret) {
921                         put_device(&css->device);
922                         goto out_unregister;
923                 }
924                 if (css_chsc_characteristics.secm) {
925                         ret = device_create_file(&css->device,
926                                                  &dev_attr_cm_enable);
927                         if (ret)
928                                 goto out_device;
929                 }
930                 ret = device_register(&css->pseudo_subchannel->dev);
931                 if (ret)
932                         goto out_file;
933         }
934         ret = register_reboot_notifier(&css_reboot_notifier);
935         if (ret)
936                 goto out_unregister;
937         ret = register_pm_notifier(&css_power_notifier);
938         if (ret) {
939                 unregister_reboot_notifier(&css_reboot_notifier);
940                 goto out_unregister;
941         }
942         css_init_done = 1;
943
944         /* Enable default isc for I/O subchannels. */
945         isc_register(IO_SCH_ISC);
946
947         for_each_subchannel(__init_channel_subsystem, NULL);
948         return 0;
949 out_file:
950         if (css_chsc_characteristics.secm)
951                 device_remove_file(&channel_subsystems[i]->device,
952                                    &dev_attr_cm_enable);
953 out_device:
954         device_unregister(&channel_subsystems[i]->device);
955 out_unregister:
956         while (i > 0) {
957                 struct channel_subsystem *css;
958
959                 i--;
960                 css = channel_subsystems[i];
961                 device_unregister(&css->pseudo_subchannel->dev);
962                 css->pseudo_subchannel = NULL;
963                 if (css_chsc_characteristics.secm)
964                         device_remove_file(&css->device,
965                                            &dev_attr_cm_enable);
966                 device_unregister(&css->device);
967         }
968 out_bus:
969         bus_unregister(&css_bus_type);
970 out:
971         crw_unregister_handler(CRW_RSC_CSS);
972         chsc_free_sei_area();
973         kfree(slow_subchannel_set);
974         pr_alert("The CSS device driver initialization failed with "
975                  "errno=%d\n", ret);
976         return ret;
977 }
978
979 int sch_is_pseudo_sch(struct subchannel *sch)
980 {
981         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
982 }
983
984 static int css_bus_match(struct device *dev, struct device_driver *drv)
985 {
986         struct subchannel *sch = to_subchannel(dev);
987         struct css_driver *driver = to_cssdriver(drv);
988         struct css_device_id *id;
989
990         for (id = driver->subchannel_type; id->match_flags; id++) {
991                 if (sch->st == id->type)
992                         return 1;
993         }
994
995         return 0;
996 }
997
998 static int css_probe(struct device *dev)
999 {
1000         struct subchannel *sch;
1001         int ret;
1002
1003         sch = to_subchannel(dev);
1004         sch->driver = to_cssdriver(dev->driver);
1005         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1006         if (ret)
1007                 sch->driver = NULL;
1008         return ret;
1009 }
1010
1011 static int css_remove(struct device *dev)
1012 {
1013         struct subchannel *sch;
1014         int ret;
1015
1016         sch = to_subchannel(dev);
1017         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1018         sch->driver = NULL;
1019         return ret;
1020 }
1021
1022 static void css_shutdown(struct device *dev)
1023 {
1024         struct subchannel *sch;
1025
1026         sch = to_subchannel(dev);
1027         if (sch->driver && sch->driver->shutdown)
1028                 sch->driver->shutdown(sch);
1029 }
1030
1031 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1032 {
1033         struct subchannel *sch = to_subchannel(dev);
1034         int ret;
1035
1036         ret = add_uevent_var(env, "ST=%01X", sch->st);
1037         if (ret)
1038                 return ret;
1039         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1040         return ret;
1041 }
1042
1043 static int css_pm_prepare(struct device *dev)
1044 {
1045         struct subchannel *sch = to_subchannel(dev);
1046         struct css_driver *drv;
1047
1048         if (mutex_is_locked(&sch->reg_mutex))
1049                 return -EAGAIN;
1050         if (!sch->dev.driver)
1051                 return 0;
1052         drv = to_cssdriver(sch->dev.driver);
1053         /* Notify drivers that they may not register children. */
1054         return drv->prepare ? drv->prepare(sch) : 0;
1055 }
1056
1057 static void css_pm_complete(struct device *dev)
1058 {
1059         struct subchannel *sch = to_subchannel(dev);
1060         struct css_driver *drv;
1061
1062         if (!sch->dev.driver)
1063                 return;
1064         drv = to_cssdriver(sch->dev.driver);
1065         if (drv->complete)
1066                 drv->complete(sch);
1067 }
1068
1069 static int css_pm_freeze(struct device *dev)
1070 {
1071         struct subchannel *sch = to_subchannel(dev);
1072         struct css_driver *drv;
1073
1074         if (!sch->dev.driver)
1075                 return 0;
1076         drv = to_cssdriver(sch->dev.driver);
1077         return drv->freeze ? drv->freeze(sch) : 0;
1078 }
1079
1080 static int css_pm_thaw(struct device *dev)
1081 {
1082         struct subchannel *sch = to_subchannel(dev);
1083         struct css_driver *drv;
1084
1085         if (!sch->dev.driver)
1086                 return 0;
1087         drv = to_cssdriver(sch->dev.driver);
1088         return drv->thaw ? drv->thaw(sch) : 0;
1089 }
1090
1091 static int css_pm_restore(struct device *dev)
1092 {
1093         struct subchannel *sch = to_subchannel(dev);
1094         struct css_driver *drv;
1095
1096         if (!sch->dev.driver)
1097                 return 0;
1098         drv = to_cssdriver(sch->dev.driver);
1099         return drv->restore ? drv->restore(sch) : 0;
1100 }
1101
1102 static struct dev_pm_ops css_pm_ops = {
1103         .prepare = css_pm_prepare,
1104         .complete = css_pm_complete,
1105         .freeze = css_pm_freeze,
1106         .thaw = css_pm_thaw,
1107         .restore = css_pm_restore,
1108 };
1109
1110 struct bus_type css_bus_type = {
1111         .name     = "css",
1112         .match    = css_bus_match,
1113         .probe    = css_probe,
1114         .remove   = css_remove,
1115         .shutdown = css_shutdown,
1116         .uevent   = css_uevent,
1117         .pm = &css_pm_ops,
1118 };
1119
1120 /**
1121  * css_driver_register - register a css driver
1122  * @cdrv: css driver to register
1123  *
1124  * This is mainly a wrapper around driver_register that sets name
1125  * and bus_type in the embedded struct device_driver correctly.
1126  */
1127 int css_driver_register(struct css_driver *cdrv)
1128 {
1129         cdrv->drv.name = cdrv->name;
1130         cdrv->drv.bus = &css_bus_type;
1131         cdrv->drv.owner = cdrv->owner;
1132         return driver_register(&cdrv->drv);
1133 }
1134 EXPORT_SYMBOL_GPL(css_driver_register);
1135
1136 /**
1137  * css_driver_unregister - unregister a css driver
1138  * @cdrv: css driver to unregister
1139  *
1140  * This is a wrapper around driver_unregister.
1141  */
1142 void css_driver_unregister(struct css_driver *cdrv)
1143 {
1144         driver_unregister(&cdrv->drv);
1145 }
1146 EXPORT_SYMBOL_GPL(css_driver_unregister);
1147
1148 subsys_initcall(init_channel_subsystem);
1149
1150 MODULE_LICENSE("GPL");
1151 EXPORT_SYMBOL(css_bus_type);