[S390] subchannel lock conversion.
[safe/jmp/linux-2.6] / drivers / s390 / cio / css.c
1 /*
2  *  drivers/s390/cio/css.c
3  *  driver for channel subsystem
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16
17 #include "css.h"
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "ioasm.h"
21 #include "chsc.h"
22 #include "device.h"
23
24 int need_rescan = 0;
25 int css_init_done = 0;
26 static int need_reprobe = 0;
27 static int max_ssid = 0;
28
29 struct channel_subsystem *css[__MAX_CSSID + 1];
30
31 int css_characteristics_avail = 0;
32
33 inline int
34 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35 {
36         struct subchannel_id schid;
37         int ret;
38
39         init_subchannel_id(&schid);
40         ret = -ENODEV;
41         do {
42                 do {
43                         ret = fn(schid, data);
44                         if (ret)
45                                 break;
46                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
47                 schid.sch_no = 0;
48         } while (schid.ssid++ < max_ssid);
49         return ret;
50 }
51
52 static struct subchannel *
53 css_alloc_subchannel(struct subchannel_id schid)
54 {
55         struct subchannel *sch;
56         int ret;
57
58         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
59         if (sch == NULL)
60                 return ERR_PTR(-ENOMEM);
61         ret = cio_validate_subchannel (sch, schid);
62         if (ret < 0) {
63                 kfree(sch);
64                 return ERR_PTR(ret);
65         }
66
67         if (sch->st != SUBCHANNEL_TYPE_IO) {
68                 /* For now we ignore all non-io subchannels. */
69                 kfree(sch);
70                 return ERR_PTR(-EINVAL);
71         }
72
73         /* 
74          * Set intparm to subchannel address.
75          * This is fine even on 64bit since the subchannel is always located
76          * under 2G.
77          */
78         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
79         ret = cio_modify(sch);
80         if (ret) {
81                 kfree(sch);
82                 return ERR_PTR(ret);
83         }
84         return sch;
85 }
86
87 static void
88 css_free_subchannel(struct subchannel *sch)
89 {
90         if (sch) {
91                 /* Reset intparm to zeroes. */
92                 sch->schib.pmcw.intparm = 0;
93                 cio_modify(sch);
94                 kfree(sch->lock);
95                 kfree(sch);
96         }
97 }
98
99 static void
100 css_subchannel_release(struct device *dev)
101 {
102         struct subchannel *sch;
103
104         sch = to_subchannel(dev);
105         if (!cio_is_console(sch->schid)) {
106                 kfree(sch->lock);
107                 kfree(sch);
108         }
109 }
110
111 extern int css_get_ssd_info(struct subchannel *sch);
112
113
114 int css_sch_device_register(struct subchannel *sch)
115 {
116         int ret;
117
118         mutex_lock(&sch->reg_mutex);
119         ret = device_register(&sch->dev);
120         mutex_unlock(&sch->reg_mutex);
121         return ret;
122 }
123
124 void css_sch_device_unregister(struct subchannel *sch)
125 {
126         mutex_lock(&sch->reg_mutex);
127         device_unregister(&sch->dev);
128         mutex_unlock(&sch->reg_mutex);
129 }
130
131 static int
132 css_register_subchannel(struct subchannel *sch)
133 {
134         int ret;
135
136         /* Initialize the subchannel structure */
137         sch->dev.parent = &css[0]->device;
138         sch->dev.bus = &css_bus_type;
139         sch->dev.release = &css_subchannel_release;
140
141         /* make it known to the system */
142         ret = css_sch_device_register(sch);
143         if (ret) {
144                 printk (KERN_WARNING "%s: could not register %s\n",
145                         __func__, sch->dev.bus_id);
146                 return ret;
147         }
148         css_get_ssd_info(sch);
149         ret = subchannel_add_files(&sch->dev);
150         if (ret)
151                 printk(KERN_WARNING "%s: could not add attributes to %s\n",
152                        __func__, sch->dev.bus_id);
153         return ret;
154 }
155
156 int
157 css_probe_device(struct subchannel_id schid)
158 {
159         int ret;
160         struct subchannel *sch;
161
162         sch = css_alloc_subchannel(schid);
163         if (IS_ERR(sch))
164                 return PTR_ERR(sch);
165         ret = css_register_subchannel(sch);
166         if (ret)
167                 css_free_subchannel(sch);
168         return ret;
169 }
170
171 static int
172 check_subchannel(struct device * dev, void * data)
173 {
174         struct subchannel *sch;
175         struct subchannel_id *schid = data;
176
177         sch = to_subchannel(dev);
178         return schid_equal(&sch->schid, schid);
179 }
180
181 struct subchannel *
182 get_subchannel_by_schid(struct subchannel_id schid)
183 {
184         struct device *dev;
185
186         dev = bus_find_device(&css_bus_type, NULL,
187                               &schid, check_subchannel);
188
189         return dev ? to_subchannel(dev) : NULL;
190 }
191
192 static inline int css_get_subchannel_status(struct subchannel *sch)
193 {
194         struct schib schib;
195
196         if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
197                 return CIO_GONE;
198         if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
199                 return CIO_REVALIDATE;
200         if (!sch->lpm)
201                 return CIO_NO_PATH;
202         return CIO_OPER;
203 }
204
205 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
206 {
207         int event, ret, disc;
208         unsigned long flags;
209         enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
210
211         spin_lock_irqsave(sch->lock, flags);
212         disc = device_is_disconnected(sch);
213         if (disc && slow) {
214                 /* Disconnected devices are evaluated directly only.*/
215                 spin_unlock_irqrestore(sch->lock, flags);
216                 return 0;
217         }
218         /* No interrupt after machine check - kill pending timers. */
219         device_kill_pending_timer(sch);
220         if (!disc && !slow) {
221                 /* Non-disconnected devices are evaluated on the slow path. */
222                 spin_unlock_irqrestore(sch->lock, flags);
223                 return -EAGAIN;
224         }
225         event = css_get_subchannel_status(sch);
226         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
227                       sch->schid.ssid, sch->schid.sch_no, event,
228                       disc ? "disconnected" : "normal",
229                       slow ? "slow" : "fast");
230         /* Analyze subchannel status. */
231         action = NONE;
232         switch (event) {
233         case CIO_NO_PATH:
234                 if (disc) {
235                         /* Check if paths have become available. */
236                         action = REPROBE;
237                         break;
238                 }
239                 /* fall through */
240         case CIO_GONE:
241                 /* Prevent unwanted effects when opening lock. */
242                 cio_disable_subchannel(sch);
243                 device_set_disconnected(sch);
244                 /* Ask driver what to do with device. */
245                 action = UNREGISTER;
246                 if (sch->driver && sch->driver->notify) {
247                         spin_unlock_irqrestore(sch->lock, flags);
248                         ret = sch->driver->notify(&sch->dev, event);
249                         spin_lock_irqsave(sch->lock, flags);
250                         if (ret)
251                                 action = NONE;
252                 }
253                 break;
254         case CIO_REVALIDATE:
255                 /* Device will be removed, so no notify necessary. */
256                 if (disc)
257                         /* Reprobe because immediate unregister might block. */
258                         action = REPROBE;
259                 else
260                         action = UNREGISTER_PROBE;
261                 break;
262         case CIO_OPER:
263                 if (disc)
264                         /* Get device operational again. */
265                         action = REPROBE;
266                 break;
267         }
268         /* Perform action. */
269         ret = 0;
270         switch (action) {
271         case UNREGISTER:
272         case UNREGISTER_PROBE:
273                 /* Unregister device (will use subchannel lock). */
274                 spin_unlock_irqrestore(sch->lock, flags);
275                 css_sch_device_unregister(sch);
276                 spin_lock_irqsave(sch->lock, flags);
277
278                 /* Reset intparm to zeroes. */
279                 sch->schib.pmcw.intparm = 0;
280                 cio_modify(sch);
281                 break;
282         case REPROBE:
283                 device_trigger_reprobe(sch);
284                 break;
285         default:
286                 break;
287         }
288         spin_unlock_irqrestore(sch->lock, flags);
289         /* Probe if necessary. */
290         if (action == UNREGISTER_PROBE)
291                 ret = css_probe_device(sch->schid);
292
293         return ret;
294 }
295
296 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
297 {
298         struct schib schib;
299
300         if (!slow) {
301                 /* Will be done on the slow path. */
302                 return -EAGAIN;
303         }
304         if (stsch(schid, &schib) || !schib.pmcw.dnv) {
305                 /* Unusable - ignore. */
306                 return 0;
307         }
308         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
309                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
310
311         return css_probe_device(schid);
312 }
313
314 static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
315 {
316         struct subchannel *sch;
317         int ret;
318
319         sch = get_subchannel_by_schid(schid);
320         if (sch) {
321                 ret = css_evaluate_known_subchannel(sch, slow);
322                 put_device(&sch->dev);
323         } else
324                 ret = css_evaluate_new_subchannel(schid, slow);
325
326         return ret;
327 }
328
329 static int
330 css_rescan_devices(struct subchannel_id schid, void *data)
331 {
332         return css_evaluate_subchannel(schid, 1);
333 }
334
335 struct slow_subchannel {
336         struct list_head slow_list;
337         struct subchannel_id schid;
338 };
339
340 static LIST_HEAD(slow_subchannels_head);
341 static DEFINE_SPINLOCK(slow_subchannel_lock);
342
343 static void
344 css_trigger_slow_path(struct work_struct *unused)
345 {
346         CIO_TRACE_EVENT(4, "slowpath");
347
348         if (need_rescan) {
349                 need_rescan = 0;
350                 for_each_subchannel(css_rescan_devices, NULL);
351                 return;
352         }
353
354         spin_lock_irq(&slow_subchannel_lock);
355         while (!list_empty(&slow_subchannels_head)) {
356                 struct slow_subchannel *slow_sch =
357                         list_entry(slow_subchannels_head.next,
358                                    struct slow_subchannel, slow_list);
359
360                 list_del_init(slow_subchannels_head.next);
361                 spin_unlock_irq(&slow_subchannel_lock);
362                 css_evaluate_subchannel(slow_sch->schid, 1);
363                 spin_lock_irq(&slow_subchannel_lock);
364                 kfree(slow_sch);
365         }
366         spin_unlock_irq(&slow_subchannel_lock);
367 }
368
369 DECLARE_WORK(slow_path_work, css_trigger_slow_path);
370 struct workqueue_struct *slow_path_wq;
371
372 /* Reprobe subchannel if unregistered. */
373 static int reprobe_subchannel(struct subchannel_id schid, void *data)
374 {
375         struct subchannel *sch;
376         int ret;
377
378         CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
379                   schid.ssid, schid.sch_no);
380         if (need_reprobe)
381                 return -EAGAIN;
382
383         sch = get_subchannel_by_schid(schid);
384         if (sch) {
385                 /* Already known. */
386                 put_device(&sch->dev);
387                 return 0;
388         }
389
390         ret = css_probe_device(schid);
391         switch (ret) {
392         case 0:
393                 break;
394         case -ENXIO:
395         case -ENOMEM:
396                 /* These should abort looping */
397                 break;
398         default:
399                 ret = 0;
400         }
401
402         return ret;
403 }
404
405 /* Work function used to reprobe all unregistered subchannels. */
406 static void reprobe_all(struct work_struct *unused)
407 {
408         int ret;
409
410         CIO_MSG_EVENT(2, "reprobe start\n");
411
412         need_reprobe = 0;
413         /* Make sure initial subchannel scan is done. */
414         wait_event(ccw_device_init_wq,
415                    atomic_read(&ccw_device_init_count) == 0);
416         ret = for_each_subchannel(reprobe_subchannel, NULL);
417
418         CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
419                       need_reprobe);
420 }
421
422 DECLARE_WORK(css_reprobe_work, reprobe_all);
423
424 /* Schedule reprobing of all unregistered subchannels. */
425 void css_schedule_reprobe(void)
426 {
427         need_reprobe = 1;
428         queue_work(ccw_device_work, &css_reprobe_work);
429 }
430
431 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
432
433 /*
434  * Rescan for new devices. FIXME: This is slow.
435  * This function is called when we have lost CRWs due to overflows and we have
436  * to do subchannel housekeeping.
437  */
438 void
439 css_reiterate_subchannels(void)
440 {
441         css_clear_subchannel_slow_list();
442         need_rescan = 1;
443 }
444
445 /*
446  * Called from the machine check handler for subchannel report words.
447  */
448 int
449 css_process_crw(int rsid1, int rsid2)
450 {
451         int ret;
452         struct subchannel_id mchk_schid;
453
454         CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
455                       rsid1, rsid2);
456
457         if (need_rescan)
458                 /* We need to iterate all subchannels anyway. */
459                 return -EAGAIN;
460
461         init_subchannel_id(&mchk_schid);
462         mchk_schid.sch_no = rsid1;
463         if (rsid2 != 0)
464                 mchk_schid.ssid = (rsid2 >> 8) & 3;
465
466         /* 
467          * Since we are always presented with IPI in the CRW, we have to
468          * use stsch() to find out if the subchannel in question has come
469          * or gone.
470          */
471         ret = css_evaluate_subchannel(mchk_schid, 0);
472         if (ret == -EAGAIN) {
473                 if (css_enqueue_subchannel_slow(mchk_schid)) {
474                         css_clear_subchannel_slow_list();
475                         need_rescan = 1;
476                 }
477         }
478         return ret;
479 }
480
481 static int __init
482 __init_channel_subsystem(struct subchannel_id schid, void *data)
483 {
484         struct subchannel *sch;
485         int ret;
486
487         if (cio_is_console(schid))
488                 sch = cio_get_console_subchannel();
489         else {
490                 sch = css_alloc_subchannel(schid);
491                 if (IS_ERR(sch))
492                         ret = PTR_ERR(sch);
493                 else
494                         ret = 0;
495                 switch (ret) {
496                 case 0:
497                         break;
498                 case -ENOMEM:
499                         panic("Out of memory in init_channel_subsystem\n");
500                 /* -ENXIO: no more subchannels. */
501                 case -ENXIO:
502                         return ret;
503                 /* -EIO: this subchannel set not supported. */
504                 case -EIO:
505                         return ret;
506                 default:
507                         return 0;
508                 }
509         }
510         /*
511          * We register ALL valid subchannels in ioinfo, even those
512          * that have been present before init_channel_subsystem.
513          * These subchannels can't have been registered yet (kmalloc
514          * not working) so we do it now. This is true e.g. for the
515          * console subchannel.
516          */
517         css_register_subchannel(sch);
518         return 0;
519 }
520
521 static void __init
522 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
523 {
524         if (css_characteristics_avail && css_general_characteristics.mcss) {
525                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
526                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
527         } else {
528 #ifdef CONFIG_SMP
529                 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
530 #else
531                 css->global_pgid.pgid_high.cpu_addr = 0;
532 #endif
533         }
534         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
535         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
536         css->global_pgid.tod_high = tod_high;
537
538 }
539
540 static void
541 channel_subsystem_release(struct device *dev)
542 {
543         struct channel_subsystem *css;
544
545         css = to_css(dev);
546         mutex_destroy(&css->mutex);
547         kfree(css);
548 }
549
550 static ssize_t
551 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
552                    char *buf)
553 {
554         struct channel_subsystem *css = to_css(dev);
555
556         if (!css)
557                 return 0;
558         return sprintf(buf, "%x\n", css->cm_enabled);
559 }
560
561 static ssize_t
562 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
563                     const char *buf, size_t count)
564 {
565         struct channel_subsystem *css = to_css(dev);
566         int ret;
567
568         switch (buf[0]) {
569         case '0':
570                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
571                 break;
572         case '1':
573                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
574                 break;
575         default:
576                 ret = -EINVAL;
577         }
578         return ret < 0 ? ret : count;
579 }
580
581 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
582
583 static inline void __init
584 setup_css(int nr)
585 {
586         u32 tod_high;
587
588         memset(css[nr], 0, sizeof(struct channel_subsystem));
589         mutex_init(&css[nr]->mutex);
590         css[nr]->valid = 1;
591         css[nr]->cssid = nr;
592         sprintf(css[nr]->device.bus_id, "css%x", nr);
593         css[nr]->device.release = channel_subsystem_release;
594         tod_high = (u32) (get_clock() >> 32);
595         css_generate_pgid(css[nr], tod_high);
596 }
597
598 /*
599  * Now that the driver core is running, we can setup our channel subsystem.
600  * The struct subchannel's are created during probing (except for the
601  * static console subchannel).
602  */
603 static int __init
604 init_channel_subsystem (void)
605 {
606         int ret, i;
607
608         if (chsc_determine_css_characteristics() == 0)
609                 css_characteristics_avail = 1;
610
611         if ((ret = bus_register(&css_bus_type)))
612                 goto out;
613
614         /* Try to enable MSS. */
615         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
616         switch (ret) {
617         case 0: /* Success. */
618                 max_ssid = __MAX_SSID;
619                 break;
620         case -ENOMEM:
621                 goto out_bus;
622         default:
623                 max_ssid = 0;
624         }
625         /* Setup css structure. */
626         for (i = 0; i <= __MAX_CSSID; i++) {
627                 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
628                 if (!css[i]) {
629                         ret = -ENOMEM;
630                         goto out_unregister;
631                 }
632                 setup_css(i);
633                 ret = device_register(&css[i]->device);
634                 if (ret)
635                         goto out_free;
636                 if (css_characteristics_avail &&
637                     css_chsc_characteristics.secm) {
638                         ret = device_create_file(&css[i]->device,
639                                                  &dev_attr_cm_enable);
640                         if (ret)
641                                 goto out_device;
642                 }
643         }
644         css_init_done = 1;
645
646         ctl_set_bit(6, 28);
647
648         for_each_subchannel(__init_channel_subsystem, NULL);
649         return 0;
650 out_device:
651         device_unregister(&css[i]->device);
652 out_free:
653         kfree(css[i]);
654 out_unregister:
655         while (i > 0) {
656                 i--;
657                 if (css_characteristics_avail && css_chsc_characteristics.secm)
658                         device_remove_file(&css[i]->device,
659                                            &dev_attr_cm_enable);
660                 device_unregister(&css[i]->device);
661         }
662 out_bus:
663         bus_unregister(&css_bus_type);
664 out:
665         return ret;
666 }
667
668 /*
669  * find a driver for a subchannel. They identify by the subchannel
670  * type with the exception that the console subchannel driver has its own
671  * subchannel type although the device is an i/o subchannel
672  */
673 static int
674 css_bus_match (struct device *dev, struct device_driver *drv)
675 {
676         struct subchannel *sch = container_of (dev, struct subchannel, dev);
677         struct css_driver *driver = container_of (drv, struct css_driver, drv);
678
679         if (sch->st == driver->subchannel_type)
680                 return 1;
681
682         return 0;
683 }
684
685 static int
686 css_probe (struct device *dev)
687 {
688         struct subchannel *sch;
689
690         sch = to_subchannel(dev);
691         sch->driver = container_of (dev->driver, struct css_driver, drv);
692         return (sch->driver->probe ? sch->driver->probe(sch) : 0);
693 }
694
695 static int
696 css_remove (struct device *dev)
697 {
698         struct subchannel *sch;
699
700         sch = to_subchannel(dev);
701         return (sch->driver->remove ? sch->driver->remove(sch) : 0);
702 }
703
704 static void
705 css_shutdown (struct device *dev)
706 {
707         struct subchannel *sch;
708
709         sch = to_subchannel(dev);
710         if (sch->driver->shutdown)
711                 sch->driver->shutdown(sch);
712 }
713
714 struct bus_type css_bus_type = {
715         .name     = "css",
716         .match    = css_bus_match,
717         .probe    = css_probe,
718         .remove   = css_remove,
719         .shutdown = css_shutdown,
720 };
721
722 subsys_initcall(init_channel_subsystem);
723
724 int
725 css_enqueue_subchannel_slow(struct subchannel_id schid)
726 {
727         struct slow_subchannel *new_slow_sch;
728         unsigned long flags;
729
730         new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
731         if (!new_slow_sch)
732                 return -ENOMEM;
733         new_slow_sch->schid = schid;
734         spin_lock_irqsave(&slow_subchannel_lock, flags);
735         list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
736         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
737         return 0;
738 }
739
740 void
741 css_clear_subchannel_slow_list(void)
742 {
743         unsigned long flags;
744
745         spin_lock_irqsave(&slow_subchannel_lock, flags);
746         while (!list_empty(&slow_subchannels_head)) {
747                 struct slow_subchannel *slow_sch =
748                         list_entry(slow_subchannels_head.next,
749                                    struct slow_subchannel, slow_list);
750
751                 list_del_init(slow_subchannels_head.next);
752                 kfree(slow_sch);
753         }
754         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
755 }
756
757
758
759 int
760 css_slow_subchannels_exist(void)
761 {
762         return (!list_empty(&slow_subchannels_head));
763 }
764
765 MODULE_LICENSE("GPL");
766 EXPORT_SYMBOL(css_bus_type);
767 EXPORT_SYMBOL_GPL(css_characteristics_avail);