[S390] cio: Rework css driver.
[safe/jmp/linux-2.6] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright IBM Corp. 1999,2008
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Arnd Bergmann (arndb@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/device.h>
15
16 #include <asm/cio.h>
17 #include <asm/chpid.h>
18
19 #include "css.h"
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "ioasm.h"
23 #include "chp.h"
24 #include "chsc.h"
25
26 static void *sei_page;
27
28 static int chsc_error_from_response(int response)
29 {
30         switch (response) {
31         case 0x0001:
32                 return 0;
33         case 0x0002:
34         case 0x0003:
35         case 0x0006:
36         case 0x0007:
37         case 0x0008:
38         case 0x000a:
39                 return -EINVAL;
40         case 0x0004:
41                 return -EOPNOTSUPP;
42         default:
43                 return -EIO;
44         }
45 }
46
47 struct chsc_ssd_area {
48         struct chsc_header request;
49         u16 :10;
50         u16 ssid:2;
51         u16 :4;
52         u16 f_sch;        /* first subchannel */
53         u16 :16;
54         u16 l_sch;        /* last subchannel */
55         u32 :32;
56         struct chsc_header response;
57         u32 :32;
58         u8 sch_valid : 1;
59         u8 dev_valid : 1;
60         u8 st        : 3; /* subchannel type */
61         u8 zeroes    : 3;
62         u8  unit_addr;    /* unit address */
63         u16 devno;        /* device number */
64         u8 path_mask;
65         u8 fla_valid_mask;
66         u16 sch;          /* subchannel */
67         u8 chpid[8];      /* chpids 0-7 */
68         u16 fla[8];       /* full link addresses 0-7 */
69 } __attribute__ ((packed));
70
71 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
72 {
73         unsigned long page;
74         struct chsc_ssd_area *ssd_area;
75         int ccode;
76         int ret;
77         int i;
78         int mask;
79
80         page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
81         if (!page)
82                 return -ENOMEM;
83         ssd_area = (struct chsc_ssd_area *) page;
84         ssd_area->request.length = 0x0010;
85         ssd_area->request.code = 0x0004;
86         ssd_area->ssid = schid.ssid;
87         ssd_area->f_sch = schid.sch_no;
88         ssd_area->l_sch = schid.sch_no;
89
90         ccode = chsc(ssd_area);
91         /* Check response. */
92         if (ccode > 0) {
93                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
94                 goto out_free;
95         }
96         ret = chsc_error_from_response(ssd_area->response.code);
97         if (ret != 0) {
98                 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
99                               schid.ssid, schid.sch_no,
100                               ssd_area->response.code);
101                 goto out_free;
102         }
103         if (!ssd_area->sch_valid) {
104                 ret = -ENODEV;
105                 goto out_free;
106         }
107         /* Copy data */
108         ret = 0;
109         memset(ssd, 0, sizeof(struct chsc_ssd_info));
110         if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
111             (ssd_area->st != SUBCHANNEL_TYPE_MSG))
112                 goto out_free;
113         ssd->path_mask = ssd_area->path_mask;
114         ssd->fla_valid_mask = ssd_area->fla_valid_mask;
115         for (i = 0; i < 8; i++) {
116                 mask = 0x80 >> i;
117                 if (ssd_area->path_mask & mask) {
118                         chp_id_init(&ssd->chpid[i]);
119                         ssd->chpid[i].id = ssd_area->chpid[i];
120                 }
121                 if (ssd_area->fla_valid_mask & mask)
122                         ssd->fla[i] = ssd_area->fla[i];
123         }
124 out_free:
125         free_page(page);
126         return ret;
127 }
128
129 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
130 {
131         spin_lock_irq(sch->lock);
132         if (sch->driver && sch->driver->chp_event)
133                 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
134                         goto out_unreg;
135         spin_unlock_irq(sch->lock);
136         return 0;
137
138 out_unreg:
139         sch->lpm = 0;
140         spin_unlock_irq(sch->lock);
141         css_schedule_eval(sch->schid);
142         return 0;
143 }
144
145 void chsc_chp_offline(struct chp_id chpid)
146 {
147         char dbf_txt[15];
148
149         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
150         CIO_TRACE_EVENT(2, dbf_txt);
151
152         if (chp_get_status(chpid) <= 0)
153                 return;
154         /* Wait until previous actions have settled. */
155         css_wait_for_slow_path();
156         for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
157 }
158
159 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
160 {
161         struct schib schib;
162         /*
163          * We don't know the device yet, but since a path
164          * may be available now to the device we'll have
165          * to do recognition again.
166          * Since we don't have any idea about which chpid
167          * that beast may be on we'll have to do a stsch
168          * on all devices, grr...
169          */
170         if (stsch_err(schid, &schib))
171                 /* We're through */
172                 return -ENXIO;
173
174         /* Put it on the slow path. */
175         css_schedule_eval(schid);
176         return 0;
177 }
178
179 static int __s390_process_res_acc(struct subchannel *sch, void *data)
180 {
181         spin_lock_irq(sch->lock);
182         if (sch->driver && sch->driver->chp_event)
183                 sch->driver->chp_event(sch, data, CHP_ONLINE);
184         spin_unlock_irq(sch->lock);
185
186         return 0;
187 }
188
189 static void s390_process_res_acc (struct res_acc_data *res_data)
190 {
191         char dbf_txt[15];
192
193         sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
194                 res_data->chpid.id);
195         CIO_TRACE_EVENT( 2, dbf_txt);
196         if (res_data->fla != 0) {
197                 sprintf(dbf_txt, "fla%x", res_data->fla);
198                 CIO_TRACE_EVENT( 2, dbf_txt);
199         }
200         /* Wait until previous actions have settled. */
201         css_wait_for_slow_path();
202         /*
203          * I/O resources may have become accessible.
204          * Scan through all subchannels that may be concerned and
205          * do a validation on those.
206          * The more information we have (info), the less scanning
207          * will we have to do.
208          */
209         for_each_subchannel_staged(__s390_process_res_acc,
210                                    s390_process_res_acc_new_sch, res_data);
211 }
212
213 static int
214 __get_chpid_from_lir(void *data)
215 {
216         struct lir {
217                 u8  iq;
218                 u8  ic;
219                 u16 sci;
220                 /* incident-node descriptor */
221                 u32 indesc[28];
222                 /* attached-node descriptor */
223                 u32 andesc[28];
224                 /* incident-specific information */
225                 u32 isinfo[28];
226         } __attribute__ ((packed)) *lir;
227
228         lir = data;
229         if (!(lir->iq&0x80))
230                 /* NULL link incident record */
231                 return -EINVAL;
232         if (!(lir->indesc[0]&0xc0000000))
233                 /* node descriptor not valid */
234                 return -EINVAL;
235         if (!(lir->indesc[0]&0x10000000))
236                 /* don't handle device-type nodes - FIXME */
237                 return -EINVAL;
238         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
239
240         return (u16) (lir->indesc[0]&0x000000ff);
241 }
242
243 struct chsc_sei_area {
244         struct chsc_header request;
245         u32 reserved1;
246         u32 reserved2;
247         u32 reserved3;
248         struct chsc_header response;
249         u32 reserved4;
250         u8  flags;
251         u8  vf;         /* validity flags */
252         u8  rs;         /* reporting source */
253         u8  cc;         /* content code */
254         u16 fla;        /* full link address */
255         u16 rsid;       /* reporting source id */
256         u32 reserved5;
257         u32 reserved6;
258         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
259         /* ccdf has to be big enough for a link-incident record */
260 } __attribute__ ((packed));
261
262 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
263 {
264         struct chp_id chpid;
265         int id;
266
267         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
268                       sei_area->rs, sei_area->rsid);
269         if (sei_area->rs != 4)
270                 return;
271         id = __get_chpid_from_lir(sei_area->ccdf);
272         if (id < 0)
273                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
274         else {
275                 chp_id_init(&chpid);
276                 chpid.id = id;
277                 chsc_chp_offline(chpid);
278         }
279 }
280
281 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
282 {
283         struct res_acc_data res_data;
284         struct chp_id chpid;
285         int status;
286
287         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
288                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
289         if (sei_area->rs != 4)
290                 return;
291         chp_id_init(&chpid);
292         chpid.id = sei_area->rsid;
293         /* allocate a new channel path structure, if needed */
294         status = chp_get_status(chpid);
295         if (status < 0)
296                 chp_new(chpid);
297         else if (!status)
298                 return;
299         memset(&res_data, 0, sizeof(struct res_acc_data));
300         res_data.chpid = chpid;
301         if ((sei_area->vf & 0xc0) != 0) {
302                 res_data.fla = sei_area->fla;
303                 if ((sei_area->vf & 0xc0) == 0xc0)
304                         /* full link address */
305                         res_data.fla_mask = 0xffff;
306                 else
307                         /* link address */
308                         res_data.fla_mask = 0xff00;
309         }
310         s390_process_res_acc(&res_data);
311 }
312
313 struct chp_config_data {
314         u8 map[32];
315         u8 op;
316         u8 pc;
317 };
318
319 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
320 {
321         struct chp_config_data *data;
322         struct chp_id chpid;
323         int num;
324
325         CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
326         if (sei_area->rs != 0)
327                 return;
328         data = (struct chp_config_data *) &(sei_area->ccdf);
329         chp_id_init(&chpid);
330         for (num = 0; num <= __MAX_CHPID; num++) {
331                 if (!chp_test_bit(data->map, num))
332                         continue;
333                 chpid.id = num;
334                 printk(KERN_WARNING "cio: processing configure event %d for "
335                        "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
336                 switch (data->op) {
337                 case 0:
338                         chp_cfg_schedule(chpid, 1);
339                         break;
340                 case 1:
341                         chp_cfg_schedule(chpid, 0);
342                         break;
343                 case 2:
344                         chp_cfg_cancel_deconfigure(chpid);
345                         break;
346                 }
347         }
348 }
349
350 static void chsc_process_sei(struct chsc_sei_area *sei_area)
351 {
352         /* Check if we might have lost some information. */
353         if (sei_area->flags & 0x40) {
354                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
355                 css_schedule_eval_all();
356         }
357         /* which kind of information was stored? */
358         switch (sei_area->cc) {
359         case 1: /* link incident*/
360                 chsc_process_sei_link_incident(sei_area);
361                 break;
362         case 2: /* i/o resource accessibiliy */
363                 chsc_process_sei_res_acc(sei_area);
364                 break;
365         case 8: /* channel-path-configuration notification */
366                 chsc_process_sei_chp_config(sei_area);
367                 break;
368         default: /* other stuff */
369                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
370                               sei_area->cc);
371                 break;
372         }
373 }
374
375 void chsc_process_crw(void)
376 {
377         struct chsc_sei_area *sei_area;
378
379         if (!sei_page)
380                 return;
381         /* Access to sei_page is serialized through machine check handler
382          * thread, so no need for locking. */
383         sei_area = sei_page;
384
385         CIO_TRACE_EVENT( 2, "prcss");
386         do {
387                 memset(sei_area, 0, sizeof(*sei_area));
388                 sei_area->request.length = 0x0010;
389                 sei_area->request.code = 0x000e;
390                 if (chsc(sei_area))
391                         break;
392
393                 if (sei_area->response.code == 0x0001) {
394                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
395                         chsc_process_sei(sei_area);
396                 } else {
397                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
398                                       sei_area->response.code);
399                         break;
400                 }
401         } while (sei_area->flags & 0x80);
402 }
403
404 void chsc_chp_online(struct chp_id chpid)
405 {
406         char dbf_txt[15];
407         struct res_acc_data res_data;
408
409         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
410         CIO_TRACE_EVENT(2, dbf_txt);
411
412         if (chp_get_status(chpid) != 0) {
413                 memset(&res_data, 0, sizeof(struct res_acc_data));
414                 res_data.chpid = chpid;
415                 /* Wait until previous actions have settled. */
416                 css_wait_for_slow_path();
417                 for_each_subchannel_staged(__s390_process_res_acc, NULL,
418                                            &res_data);
419         }
420 }
421
422 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
423                                          struct chp_id chpid, int on)
424 {
425         unsigned long flags;
426         struct res_acc_data res_data;
427
428         memset(&res_data, 0, sizeof(struct res_acc_data));
429         res_data.chpid = chpid;
430         spin_lock_irqsave(sch->lock, flags);
431         if (sch->driver && sch->driver->chp_event)
432                 sch->driver->chp_event(sch, &res_data,
433                                        on ? CHP_VARY_ON : CHP_VARY_OFF);
434         spin_unlock_irqrestore(sch->lock, flags);
435 }
436
437 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
438 {
439         struct chp_id *chpid = data;
440
441         __s390_subchannel_vary_chpid(sch, *chpid, 0);
442         return 0;
443 }
444
445 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
446 {
447         struct chp_id *chpid = data;
448
449         __s390_subchannel_vary_chpid(sch, *chpid, 1);
450         return 0;
451 }
452
453 static int
454 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
455 {
456         struct schib schib;
457
458         if (stsch_err(schid, &schib))
459                 /* We're through */
460                 return -ENXIO;
461         /* Put it on the slow path. */
462         css_schedule_eval(schid);
463         return 0;
464 }
465
466 /**
467  * chsc_chp_vary - propagate channel-path vary operation to subchannels
468  * @chpid: channl-path ID
469  * @on: non-zero for vary online, zero for vary offline
470  */
471 int chsc_chp_vary(struct chp_id chpid, int on)
472 {
473         /* Wait until previous actions have settled. */
474         css_wait_for_slow_path();
475         /*
476          * Redo PathVerification on the devices the chpid connects to
477          */
478
479         if (on)
480                 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
481                                            __s390_vary_chpid_on, &chpid);
482         else
483                 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
484                                            NULL, &chpid);
485
486         return 0;
487 }
488
489 static void
490 chsc_remove_cmg_attr(struct channel_subsystem *css)
491 {
492         int i;
493
494         for (i = 0; i <= __MAX_CHPID; i++) {
495                 if (!css->chps[i])
496                         continue;
497                 chp_remove_cmg_attr(css->chps[i]);
498         }
499 }
500
501 static int
502 chsc_add_cmg_attr(struct channel_subsystem *css)
503 {
504         int i, ret;
505
506         ret = 0;
507         for (i = 0; i <= __MAX_CHPID; i++) {
508                 if (!css->chps[i])
509                         continue;
510                 ret = chp_add_cmg_attr(css->chps[i]);
511                 if (ret)
512                         goto cleanup;
513         }
514         return ret;
515 cleanup:
516         for (--i; i >= 0; i--) {
517                 if (!css->chps[i])
518                         continue;
519                 chp_remove_cmg_attr(css->chps[i]);
520         }
521         return ret;
522 }
523
524 static int
525 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
526 {
527         struct {
528                 struct chsc_header request;
529                 u32 operation_code : 2;
530                 u32 : 30;
531                 u32 key : 4;
532                 u32 : 28;
533                 u32 zeroes1;
534                 u32 cub_addr1;
535                 u32 zeroes2;
536                 u32 cub_addr2;
537                 u32 reserved[13];
538                 struct chsc_header response;
539                 u32 status : 8;
540                 u32 : 4;
541                 u32 fmt : 4;
542                 u32 : 16;
543         } __attribute__ ((packed)) *secm_area;
544         int ret, ccode;
545
546         secm_area = page;
547         secm_area->request.length = 0x0050;
548         secm_area->request.code = 0x0016;
549
550         secm_area->key = PAGE_DEFAULT_KEY;
551         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
552         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
553
554         secm_area->operation_code = enable ? 0 : 1;
555
556         ccode = chsc(secm_area);
557         if (ccode > 0)
558                 return (ccode == 3) ? -ENODEV : -EBUSY;
559
560         switch (secm_area->response.code) {
561         case 0x0102:
562         case 0x0103:
563                 ret = -EINVAL;
564         default:
565                 ret = chsc_error_from_response(secm_area->response.code);
566         }
567         if (ret != 0)
568                 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
569                               secm_area->response.code);
570         return ret;
571 }
572
573 int
574 chsc_secm(struct channel_subsystem *css, int enable)
575 {
576         void  *secm_area;
577         int ret;
578
579         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
580         if (!secm_area)
581                 return -ENOMEM;
582
583         if (enable && !css->cm_enabled) {
584                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
585                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
586                 if (!css->cub_addr1 || !css->cub_addr2) {
587                         free_page((unsigned long)css->cub_addr1);
588                         free_page((unsigned long)css->cub_addr2);
589                         free_page((unsigned long)secm_area);
590                         return -ENOMEM;
591                 }
592         }
593         ret = __chsc_do_secm(css, enable, secm_area);
594         if (!ret) {
595                 css->cm_enabled = enable;
596                 if (css->cm_enabled) {
597                         ret = chsc_add_cmg_attr(css);
598                         if (ret) {
599                                 memset(secm_area, 0, PAGE_SIZE);
600                                 __chsc_do_secm(css, 0, secm_area);
601                                 css->cm_enabled = 0;
602                         }
603                 } else
604                         chsc_remove_cmg_attr(css);
605         }
606         if (!css->cm_enabled) {
607                 free_page((unsigned long)css->cub_addr1);
608                 free_page((unsigned long)css->cub_addr2);
609         }
610         free_page((unsigned long)secm_area);
611         return ret;
612 }
613
614 int chsc_determine_channel_path_description(struct chp_id chpid,
615                                             struct channel_path_desc *desc)
616 {
617         int ccode, ret;
618
619         struct {
620                 struct chsc_header request;
621                 u32 : 24;
622                 u32 first_chpid : 8;
623                 u32 : 24;
624                 u32 last_chpid : 8;
625                 u32 zeroes1;
626                 struct chsc_header response;
627                 u32 zeroes2;
628                 struct channel_path_desc desc;
629         } __attribute__ ((packed)) *scpd_area;
630
631         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
632         if (!scpd_area)
633                 return -ENOMEM;
634
635         scpd_area->request.length = 0x0010;
636         scpd_area->request.code = 0x0002;
637
638         scpd_area->first_chpid = chpid.id;
639         scpd_area->last_chpid = chpid.id;
640
641         ccode = chsc(scpd_area);
642         if (ccode > 0) {
643                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
644                 goto out;
645         }
646
647         ret = chsc_error_from_response(scpd_area->response.code);
648         if (ret == 0)
649                 /* Success. */
650                 memcpy(desc, &scpd_area->desc,
651                        sizeof(struct channel_path_desc));
652         else
653                 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
654                               scpd_area->response.code);
655 out:
656         free_page((unsigned long)scpd_area);
657         return ret;
658 }
659
660 static void
661 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
662                           struct cmg_chars *chars)
663 {
664         switch (chp->cmg) {
665         case 2:
666         case 3:
667                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
668                                          GFP_KERNEL);
669                 if (chp->cmg_chars) {
670                         int i, mask;
671                         struct cmg_chars *cmg_chars;
672
673                         cmg_chars = chp->cmg_chars;
674                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
675                                 mask = 0x80 >> (i + 3);
676                                 if (cmcv & mask)
677                                         cmg_chars->values[i] = chars->values[i];
678                                 else
679                                         cmg_chars->values[i] = 0;
680                         }
681                 }
682                 break;
683         default:
684                 /* No cmg-dependent data. */
685                 break;
686         }
687 }
688
689 int chsc_get_channel_measurement_chars(struct channel_path *chp)
690 {
691         int ccode, ret;
692
693         struct {
694                 struct chsc_header request;
695                 u32 : 24;
696                 u32 first_chpid : 8;
697                 u32 : 24;
698                 u32 last_chpid : 8;
699                 u32 zeroes1;
700                 struct chsc_header response;
701                 u32 zeroes2;
702                 u32 not_valid : 1;
703                 u32 shared : 1;
704                 u32 : 22;
705                 u32 chpid : 8;
706                 u32 cmcv : 5;
707                 u32 : 11;
708                 u32 cmgq : 8;
709                 u32 cmg : 8;
710                 u32 zeroes3;
711                 u32 data[NR_MEASUREMENT_CHARS];
712         } __attribute__ ((packed)) *scmc_area;
713
714         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
715         if (!scmc_area)
716                 return -ENOMEM;
717
718         scmc_area->request.length = 0x0010;
719         scmc_area->request.code = 0x0022;
720
721         scmc_area->first_chpid = chp->chpid.id;
722         scmc_area->last_chpid = chp->chpid.id;
723
724         ccode = chsc(scmc_area);
725         if (ccode > 0) {
726                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
727                 goto out;
728         }
729
730         ret = chsc_error_from_response(scmc_area->response.code);
731         if (ret == 0) {
732                 /* Success. */
733                 if (!scmc_area->not_valid) {
734                         chp->cmg = scmc_area->cmg;
735                         chp->shared = scmc_area->shared;
736                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
737                                                   (struct cmg_chars *)
738                                                   &scmc_area->data);
739                 } else {
740                         chp->cmg = -1;
741                         chp->shared = -1;
742                 }
743         } else {
744                 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
745                               scmc_area->response.code);
746         }
747 out:
748         free_page((unsigned long)scmc_area);
749         return ret;
750 }
751
752 int __init chsc_alloc_sei_area(void)
753 {
754         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
755         if (!sei_page)
756                 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
757                               "chsc machine checks!\n");
758         return (sei_page ? 0 : -ENOMEM);
759 }
760
761 void __init chsc_free_sei_area(void)
762 {
763         kfree(sei_page);
764 }
765
766 int __init
767 chsc_enable_facility(int operation_code)
768 {
769         int ret;
770         struct {
771                 struct chsc_header request;
772                 u8 reserved1:4;
773                 u8 format:4;
774                 u8 reserved2;
775                 u16 operation_code;
776                 u32 reserved3;
777                 u32 reserved4;
778                 u32 operation_data_area[252];
779                 struct chsc_header response;
780                 u32 reserved5:4;
781                 u32 format2:4;
782                 u32 reserved6:24;
783         } __attribute__ ((packed)) *sda_area;
784
785         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
786         if (!sda_area)
787                 return -ENOMEM;
788         sda_area->request.length = 0x0400;
789         sda_area->request.code = 0x0031;
790         sda_area->operation_code = operation_code;
791
792         ret = chsc(sda_area);
793         if (ret > 0) {
794                 ret = (ret == 3) ? -ENODEV : -EBUSY;
795                 goto out;
796         }
797
798         switch (sda_area->response.code) {
799         case 0x0101:
800                 ret = -EOPNOTSUPP;
801                 break;
802         default:
803                 ret = chsc_error_from_response(sda_area->response.code);
804         }
805         if (ret != 0)
806                 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
807                               operation_code, sda_area->response.code);
808  out:
809         free_page((unsigned long)sda_area);
810         return ret;
811 }
812
813 struct css_general_char css_general_characteristics;
814 struct css_chsc_char css_chsc_characteristics;
815
816 int __init
817 chsc_determine_css_characteristics(void)
818 {
819         int result;
820         struct {
821                 struct chsc_header request;
822                 u32 reserved1;
823                 u32 reserved2;
824                 u32 reserved3;
825                 struct chsc_header response;
826                 u32 reserved4;
827                 u32 general_char[510];
828                 u32 chsc_char[518];
829         } __attribute__ ((packed)) *scsc_area;
830
831         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
832         if (!scsc_area)
833                 return -ENOMEM;
834
835         scsc_area->request.length = 0x0010;
836         scsc_area->request.code = 0x0010;
837
838         result = chsc(scsc_area);
839         if (result) {
840                 result = (result == 3) ? -ENODEV : -EBUSY;
841                 goto exit;
842         }
843
844         result = chsc_error_from_response(scsc_area->response.code);
845         if (result == 0) {
846                 memcpy(&css_general_characteristics, scsc_area->general_char,
847                        sizeof(css_general_characteristics));
848                 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
849                        sizeof(css_chsc_characteristics));
850         } else
851                 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
852                               scsc_area->response.code);
853 exit:
854         free_page ((unsigned long) scsc_area);
855         return result;
856 }
857
858 EXPORT_SYMBOL_GPL(css_general_characteristics);
859 EXPORT_SYMBOL_GPL(css_chsc_characteristics);