2 * driver for channel subsystem
4 * Copyright IBM Corp. 2002, 2009
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
26 #include "cio_debug.h"
33 int css_init_done = 0;
36 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
39 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
41 struct subchannel_id schid;
44 init_subchannel_id(&schid);
48 ret = fn(schid, data);
51 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
53 } while (schid.ssid++ < max_ssid);
60 int (*fn_known_sch)(struct subchannel *, void *);
61 int (*fn_unknown_sch)(struct subchannel_id, void *);
64 static int call_fn_known_sch(struct device *dev, void *data)
66 struct subchannel *sch = to_subchannel(dev);
67 struct cb_data *cb = data;
70 idset_sch_del(cb->set, sch->schid);
72 rc = cb->fn_known_sch(sch, cb->data);
76 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
78 struct cb_data *cb = data;
81 if (idset_sch_contains(cb->set, schid))
82 rc = cb->fn_unknown_sch(schid, cb->data);
86 static int call_fn_all_sch(struct subchannel_id schid, void *data)
88 struct cb_data *cb = data;
89 struct subchannel *sch;
92 sch = get_subchannel_by_schid(schid);
95 rc = cb->fn_known_sch(sch, cb->data);
96 put_device(&sch->dev);
98 if (cb->fn_unknown_sch)
99 rc = cb->fn_unknown_sch(schid, cb->data);
105 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
106 int (*fn_unknown)(struct subchannel_id,
113 cb.fn_known_sch = fn_known;
114 cb.fn_unknown_sch = fn_unknown;
116 cb.set = idset_sch_new();
118 /* fall back to brute force scanning in case of oom */
119 return for_each_subchannel(call_fn_all_sch, &cb);
123 /* Process registered subchannels. */
124 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
127 /* Process unregistered subchannels. */
129 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
136 static struct subchannel *
137 css_alloc_subchannel(struct subchannel_id schid)
139 struct subchannel *sch;
142 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
144 return ERR_PTR(-ENOMEM);
145 ret = cio_validate_subchannel (sch, schid);
154 css_subchannel_release(struct device *dev)
156 struct subchannel *sch;
158 sch = to_subchannel(dev);
159 if (!cio_is_console(sch->schid)) {
160 /* Reset intparm to zeroes. */
161 sch->config.intparm = 0;
162 cio_commit_config(sch);
168 static int css_sch_device_register(struct subchannel *sch)
172 mutex_lock(&sch->reg_mutex);
173 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
175 ret = device_register(&sch->dev);
176 mutex_unlock(&sch->reg_mutex);
181 * css_sch_device_unregister - unregister a subchannel
182 * @sch: subchannel to be unregistered
184 void css_sch_device_unregister(struct subchannel *sch)
186 mutex_lock(&sch->reg_mutex);
187 if (device_is_registered(&sch->dev))
188 device_unregister(&sch->dev);
189 mutex_unlock(&sch->reg_mutex);
191 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
193 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
198 memset(ssd, 0, sizeof(struct chsc_ssd_info));
199 ssd->path_mask = pmcw->pim;
200 for (i = 0; i < 8; i++) {
202 if (pmcw->pim & mask) {
203 chp_id_init(&ssd->chpid[i]);
204 ssd->chpid[i].id = pmcw->chpid[i];
209 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
214 for (i = 0; i < 8; i++) {
216 if (ssd->path_mask & mask)
217 if (!chp_is_registered(ssd->chpid[i]))
218 chp_new(ssd->chpid[i]);
222 void css_update_ssd_info(struct subchannel *sch)
226 if (cio_is_console(sch->schid)) {
227 /* Console is initialized too early for functions requiring
228 * memory allocation. */
229 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
231 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
233 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
234 ssd_register_chpids(&sch->ssd_info);
238 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
241 struct subchannel *sch = to_subchannel(dev);
243 return sprintf(buf, "%01x\n", sch->st);
246 static DEVICE_ATTR(type, 0444, type_show, NULL);
248 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
251 struct subchannel *sch = to_subchannel(dev);
253 return sprintf(buf, "css:t%01X\n", sch->st);
256 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
258 static struct attribute *subch_attrs[] = {
260 &dev_attr_modalias.attr,
264 static struct attribute_group subch_attr_group = {
265 .attrs = subch_attrs,
268 static const struct attribute_group *default_subch_attr_groups[] = {
273 static int css_register_subchannel(struct subchannel *sch)
277 /* Initialize the subchannel structure */
278 sch->dev.parent = &channel_subsystems[0]->device;
279 sch->dev.bus = &css_bus_type;
280 sch->dev.release = &css_subchannel_release;
281 sch->dev.groups = default_subch_attr_groups;
283 * We don't want to generate uevents for I/O subchannels that don't
284 * have a working ccw device behind them since they will be
285 * unregistered before they can be used anyway, so we delay the add
286 * uevent until after device recognition was successful.
287 * Note that we suppress the uevent for all subchannel types;
288 * the subchannel driver can decide itself when it wants to inform
289 * userspace of its existence.
291 dev_set_uevent_suppress(&sch->dev, 1);
292 css_update_ssd_info(sch);
293 /* make it known to the system */
294 ret = css_sch_device_register(sch);
296 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
297 sch->schid.ssid, sch->schid.sch_no, ret);
302 * No driver matched. Generate the uevent now so that
303 * a fitting driver module may be loaded based on the
306 dev_set_uevent_suppress(&sch->dev, 0);
307 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
312 int css_probe_device(struct subchannel_id schid)
315 struct subchannel *sch;
317 if (cio_is_console(schid))
318 sch = cio_get_console_subchannel();
320 sch = css_alloc_subchannel(schid);
324 ret = css_register_subchannel(sch);
326 if (!cio_is_console(schid))
327 put_device(&sch->dev);
333 check_subchannel(struct device * dev, void * data)
335 struct subchannel *sch;
336 struct subchannel_id *schid = data;
338 sch = to_subchannel(dev);
339 return schid_equal(&sch->schid, schid);
343 get_subchannel_by_schid(struct subchannel_id schid)
347 dev = bus_find_device(&css_bus_type, NULL,
348 &schid, check_subchannel);
350 return dev ? to_subchannel(dev) : NULL;
354 * css_sch_is_valid() - check if a subchannel is valid
355 * @schib: subchannel information block for the subchannel
357 int css_sch_is_valid(struct schib *schib)
359 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
361 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
365 EXPORT_SYMBOL_GPL(css_sch_is_valid);
367 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
372 /* Will be done on the slow path. */
375 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
376 /* Unusable - ignore. */
379 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
382 return css_probe_device(schid);
385 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
390 if (sch->driver->sch_event)
391 ret = sch->driver->sch_event(sch, slow);
394 "Got subchannel machine check but "
395 "no sch_event handler provided.\n");
397 if (ret != 0 && ret != -EAGAIN) {
398 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
399 sch->schid.ssid, sch->schid.sch_no, ret);
404 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
406 struct subchannel *sch;
409 sch = get_subchannel_by_schid(schid);
411 ret = css_evaluate_known_subchannel(sch, slow);
412 put_device(&sch->dev);
414 ret = css_evaluate_new_subchannel(schid, slow);
416 css_schedule_eval(schid);
419 static struct idset *slow_subchannel_set;
420 static spinlock_t slow_subchannel_lock;
421 static wait_queue_head_t css_eval_wq;
422 static atomic_t css_eval_scheduled;
424 static int __init slow_subchannel_init(void)
426 spin_lock_init(&slow_subchannel_lock);
427 atomic_set(&css_eval_scheduled, 0);
428 init_waitqueue_head(&css_eval_wq);
429 slow_subchannel_set = idset_sch_new();
430 if (!slow_subchannel_set) {
431 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
437 static int slow_eval_known_fn(struct subchannel *sch, void *data)
442 spin_lock_irq(&slow_subchannel_lock);
443 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
444 idset_sch_del(slow_subchannel_set, sch->schid);
445 spin_unlock_irq(&slow_subchannel_lock);
447 rc = css_evaluate_known_subchannel(sch, 1);
449 css_schedule_eval(sch->schid);
454 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
459 spin_lock_irq(&slow_subchannel_lock);
460 eval = idset_sch_contains(slow_subchannel_set, schid);
461 idset_sch_del(slow_subchannel_set, schid);
462 spin_unlock_irq(&slow_subchannel_lock);
464 rc = css_evaluate_new_subchannel(schid, 1);
467 css_schedule_eval(schid);
473 /* These should abort looping */
482 static void css_slow_path_func(struct work_struct *unused)
486 CIO_TRACE_EVENT(4, "slowpath");
487 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
489 spin_lock_irqsave(&slow_subchannel_lock, flags);
490 if (idset_is_empty(slow_subchannel_set)) {
491 atomic_set(&css_eval_scheduled, 0);
492 wake_up(&css_eval_wq);
494 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
497 static DECLARE_WORK(slow_path_work, css_slow_path_func);
498 struct workqueue_struct *slow_path_wq;
500 void css_schedule_eval(struct subchannel_id schid)
504 spin_lock_irqsave(&slow_subchannel_lock, flags);
505 idset_sch_add(slow_subchannel_set, schid);
506 atomic_set(&css_eval_scheduled, 1);
507 queue_work(slow_path_wq, &slow_path_work);
508 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
511 void css_schedule_eval_all(void)
515 spin_lock_irqsave(&slow_subchannel_lock, flags);
516 idset_fill(slow_subchannel_set);
517 atomic_set(&css_eval_scheduled, 1);
518 queue_work(slow_path_wq, &slow_path_work);
519 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
522 static int __unset_registered(struct device *dev, void *data)
524 struct idset *set = data;
525 struct subchannel *sch = to_subchannel(dev);
527 idset_sch_del(set, sch->schid);
531 void css_schedule_eval_all_unreg(void)
534 struct idset *unreg_set;
536 /* Find unregistered subchannels. */
537 unreg_set = idset_sch_new();
540 css_schedule_eval_all();
543 idset_fill(unreg_set);
544 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
545 /* Apply to slow_subchannel_set. */
546 spin_lock_irqsave(&slow_subchannel_lock, flags);
547 idset_add_set(slow_subchannel_set, unreg_set);
548 atomic_set(&css_eval_scheduled, 1);
549 queue_work(slow_path_wq, &slow_path_work);
550 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
551 idset_free(unreg_set);
554 void css_wait_for_slow_path(void)
556 flush_workqueue(slow_path_wq);
559 /* Schedule reprobing of all unregistered subchannels. */
560 void css_schedule_reprobe(void)
562 css_schedule_eval_all_unreg();
564 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
567 * Called from the machine check handler for subchannel report words.
569 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
571 struct subchannel_id mchk_schid;
574 css_schedule_eval_all();
577 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
578 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
579 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
580 crw0->erc, crw0->rsid);
582 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
583 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
584 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
585 crw1->anc, crw1->erc, crw1->rsid);
586 init_subchannel_id(&mchk_schid);
587 mchk_schid.sch_no = crw0->rsid;
589 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
592 * Since we are always presented with IPI in the CRW, we have to
593 * use stsch() to find out if the subchannel in question has come
596 css_evaluate_subchannel(mchk_schid, 0);
600 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
602 if (css_general_characteristics.mcss) {
603 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
604 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
607 css->global_pgid.pgid_high.cpu_addr = stap();
609 css->global_pgid.pgid_high.cpu_addr = 0;
612 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
613 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
614 css->global_pgid.tod_high = tod_high;
619 channel_subsystem_release(struct device *dev)
621 struct channel_subsystem *css;
624 mutex_destroy(&css->mutex);
625 if (css->pseudo_subchannel) {
626 /* Implies that it has been generated but never registered. */
627 css_subchannel_release(&css->pseudo_subchannel->dev);
628 css->pseudo_subchannel = NULL;
634 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
637 struct channel_subsystem *css = to_css(dev);
642 mutex_lock(&css->mutex);
643 ret = sprintf(buf, "%x\n", css->cm_enabled);
644 mutex_unlock(&css->mutex);
649 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
650 const char *buf, size_t count)
652 struct channel_subsystem *css = to_css(dev);
656 ret = strict_strtoul(buf, 16, &val);
659 mutex_lock(&css->mutex);
662 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
665 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
670 mutex_unlock(&css->mutex);
671 return ret < 0 ? ret : count;
674 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
676 static int __init setup_css(int nr)
680 struct channel_subsystem *css;
682 css = channel_subsystems[nr];
683 memset(css, 0, sizeof(struct channel_subsystem));
684 css->pseudo_subchannel =
685 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
686 if (!css->pseudo_subchannel)
688 css->pseudo_subchannel->dev.parent = &css->device;
689 css->pseudo_subchannel->dev.release = css_subchannel_release;
690 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
691 mutex_init(&css->pseudo_subchannel->reg_mutex);
692 ret = cio_create_sch_lock(css->pseudo_subchannel);
694 kfree(css->pseudo_subchannel);
697 mutex_init(&css->mutex);
700 dev_set_name(&css->device, "css%x", nr);
701 css->device.release = channel_subsystem_release;
702 tod_high = (u32) (get_clock() >> 32);
703 css_generate_pgid(css, tod_high);
707 static int css_reboot_event(struct notifier_block *this,
714 for (i = 0; i <= __MAX_CSSID; i++) {
715 struct channel_subsystem *css;
717 css = channel_subsystems[i];
718 mutex_lock(&css->mutex);
720 if (chsc_secm(css, 0))
722 mutex_unlock(&css->mutex);
728 static struct notifier_block css_reboot_notifier = {
729 .notifier_call = css_reboot_event,
733 * Since the css devices are neither on a bus nor have a class
734 * nor have a special device type, we cannot stop/restart channel
735 * path measurements via the normal suspend/resume callbacks, but have
738 static int css_power_event(struct notifier_block *this, unsigned long event,
745 case PM_HIBERNATION_PREPARE:
746 case PM_SUSPEND_PREPARE:
748 for (i = 0; i <= __MAX_CSSID; i++) {
749 struct channel_subsystem *css;
751 css = channel_subsystems[i];
752 mutex_lock(&css->mutex);
753 if (!css->cm_enabled) {
754 mutex_unlock(&css->mutex);
757 secm_area = (void *)get_zeroed_page(GFP_KERNEL |
760 if (__chsc_do_secm(css, 0, secm_area))
762 free_page((unsigned long)secm_area);
766 mutex_unlock(&css->mutex);
769 case PM_POST_HIBERNATION:
770 case PM_POST_SUSPEND:
772 for (i = 0; i <= __MAX_CSSID; i++) {
773 struct channel_subsystem *css;
775 css = channel_subsystems[i];
776 mutex_lock(&css->mutex);
777 if (!css->cm_enabled) {
778 mutex_unlock(&css->mutex);
781 secm_area = (void *)get_zeroed_page(GFP_KERNEL |
784 if (__chsc_do_secm(css, 1, secm_area))
786 free_page((unsigned long)secm_area);
790 mutex_unlock(&css->mutex);
792 /* search for subchannels, which appeared during hibernation */
793 css_schedule_reprobe();
801 static struct notifier_block css_power_notifier = {
802 .notifier_call = css_power_event,
806 * Now that the driver core is running, we can setup our channel subsystem.
807 * The struct subchannel's are created during probing (except for the
808 * static console subchannel).
810 static int __init css_bus_init(void)
814 ret = chsc_determine_css_characteristics();
818 ret = chsc_alloc_sei_area();
822 /* Try to enable MSS. */
823 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
825 case 0: /* Success. */
826 max_ssid = __MAX_SSID;
834 ret = slow_subchannel_init();
838 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
842 if ((ret = bus_register(&css_bus_type)))
845 /* Setup css structure. */
846 for (i = 0; i <= __MAX_CSSID; i++) {
847 struct channel_subsystem *css;
849 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
854 channel_subsystems[i] = css;
857 kfree(channel_subsystems[i]);
860 ret = device_register(&css->device);
862 put_device(&css->device);
865 if (css_chsc_characteristics.secm) {
866 ret = device_create_file(&css->device,
867 &dev_attr_cm_enable);
871 ret = device_register(&css->pseudo_subchannel->dev);
873 put_device(&css->pseudo_subchannel->dev);
877 ret = register_reboot_notifier(&css_reboot_notifier);
880 ret = register_pm_notifier(&css_power_notifier);
882 unregister_reboot_notifier(&css_reboot_notifier);
887 /* Enable default isc for I/O subchannels. */
888 isc_register(IO_SCH_ISC);
892 if (css_chsc_characteristics.secm)
893 device_remove_file(&channel_subsystems[i]->device,
894 &dev_attr_cm_enable);
896 device_unregister(&channel_subsystems[i]->device);
899 struct channel_subsystem *css;
902 css = channel_subsystems[i];
903 device_unregister(&css->pseudo_subchannel->dev);
904 css->pseudo_subchannel = NULL;
905 if (css_chsc_characteristics.secm)
906 device_remove_file(&css->device,
907 &dev_attr_cm_enable);
908 device_unregister(&css->device);
910 bus_unregister(&css_bus_type);
912 crw_unregister_handler(CRW_RSC_CSS);
913 chsc_free_sei_area();
914 idset_free(slow_subchannel_set);
915 pr_alert("The CSS device driver initialization failed with "
920 static void __init css_bus_cleanup(void)
922 struct channel_subsystem *css;
925 for (i = 0; i <= __MAX_CSSID; i++) {
926 css = channel_subsystems[i];
927 device_unregister(&css->pseudo_subchannel->dev);
928 css->pseudo_subchannel = NULL;
929 if (css_chsc_characteristics.secm)
930 device_remove_file(&css->device, &dev_attr_cm_enable);
931 device_unregister(&css->device);
933 bus_unregister(&css_bus_type);
934 crw_unregister_handler(CRW_RSC_CSS);
935 chsc_free_sei_area();
936 idset_free(slow_subchannel_set);
937 isc_unregister(IO_SCH_ISC);
940 static int __init channel_subsystem_init(void)
944 ret = css_bus_init();
948 ret = io_subchannel_init();
954 subsys_initcall(channel_subsystem_init);
956 static int css_settle(struct device_driver *drv, void *unused)
958 struct css_driver *cssdrv = to_cssdriver(drv);
966 * Wait for the initialization of devices to finish, to make sure we are
967 * done with our setup if the search for the root device starts.
969 static int __init channel_subsystem_init_sync(void)
971 /* Start initial subchannel evaluation. */
972 css_schedule_eval_all();
973 /* Wait for the evaluation of subchannels to finish. */
974 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
975 /* Wait for the subchannel type specific initialization to finish */
976 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
978 subsys_initcall_sync(channel_subsystem_init_sync);
980 int sch_is_pseudo_sch(struct subchannel *sch)
982 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
985 static int css_bus_match(struct device *dev, struct device_driver *drv)
987 struct subchannel *sch = to_subchannel(dev);
988 struct css_driver *driver = to_cssdriver(drv);
989 struct css_device_id *id;
991 for (id = driver->subchannel_type; id->match_flags; id++) {
992 if (sch->st == id->type)
999 static int css_probe(struct device *dev)
1001 struct subchannel *sch;
1004 sch = to_subchannel(dev);
1005 sch->driver = to_cssdriver(dev->driver);
1006 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1012 static int css_remove(struct device *dev)
1014 struct subchannel *sch;
1017 sch = to_subchannel(dev);
1018 ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1023 static void css_shutdown(struct device *dev)
1025 struct subchannel *sch;
1027 sch = to_subchannel(dev);
1028 if (sch->driver && sch->driver->shutdown)
1029 sch->driver->shutdown(sch);
1032 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1034 struct subchannel *sch = to_subchannel(dev);
1037 ret = add_uevent_var(env, "ST=%01X", sch->st);
1040 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1044 static int css_pm_prepare(struct device *dev)
1046 struct subchannel *sch = to_subchannel(dev);
1047 struct css_driver *drv;
1049 if (mutex_is_locked(&sch->reg_mutex))
1051 if (!sch->dev.driver)
1053 drv = to_cssdriver(sch->dev.driver);
1054 /* Notify drivers that they may not register children. */
1055 return drv->prepare ? drv->prepare(sch) : 0;
1058 static void css_pm_complete(struct device *dev)
1060 struct subchannel *sch = to_subchannel(dev);
1061 struct css_driver *drv;
1063 if (!sch->dev.driver)
1065 drv = to_cssdriver(sch->dev.driver);
1070 static int css_pm_freeze(struct device *dev)
1072 struct subchannel *sch = to_subchannel(dev);
1073 struct css_driver *drv;
1075 if (!sch->dev.driver)
1077 drv = to_cssdriver(sch->dev.driver);
1078 return drv->freeze ? drv->freeze(sch) : 0;
1081 static int css_pm_thaw(struct device *dev)
1083 struct subchannel *sch = to_subchannel(dev);
1084 struct css_driver *drv;
1086 if (!sch->dev.driver)
1088 drv = to_cssdriver(sch->dev.driver);
1089 return drv->thaw ? drv->thaw(sch) : 0;
1092 static int css_pm_restore(struct device *dev)
1094 struct subchannel *sch = to_subchannel(dev);
1095 struct css_driver *drv;
1097 if (!sch->dev.driver)
1099 drv = to_cssdriver(sch->dev.driver);
1100 return drv->restore ? drv->restore(sch) : 0;
1103 static struct dev_pm_ops css_pm_ops = {
1104 .prepare = css_pm_prepare,
1105 .complete = css_pm_complete,
1106 .freeze = css_pm_freeze,
1107 .thaw = css_pm_thaw,
1108 .restore = css_pm_restore,
1111 struct bus_type css_bus_type = {
1113 .match = css_bus_match,
1115 .remove = css_remove,
1116 .shutdown = css_shutdown,
1117 .uevent = css_uevent,
1122 * css_driver_register - register a css driver
1123 * @cdrv: css driver to register
1125 * This is mainly a wrapper around driver_register that sets name
1126 * and bus_type in the embedded struct device_driver correctly.
1128 int css_driver_register(struct css_driver *cdrv)
1130 cdrv->drv.name = cdrv->name;
1131 cdrv->drv.bus = &css_bus_type;
1132 cdrv->drv.owner = cdrv->owner;
1133 return driver_register(&cdrv->drv);
1135 EXPORT_SYMBOL_GPL(css_driver_register);
1138 * css_driver_unregister - unregister a css driver
1139 * @cdrv: css driver to unregister
1141 * This is a wrapper around driver_unregister.
1143 void css_driver_unregister(struct css_driver *cdrv)
1145 driver_unregister(&cdrv->drv);
1147 EXPORT_SYMBOL_GPL(css_driver_unregister);
1149 MODULE_LICENSE("GPL");
1150 EXPORT_SYMBOL(css_bus_type);