const: constify remaining dev_pm_ops
[safe/jmp/linux-2.6] / drivers / s390 / char / sclp.c
index 6a83e2d..ec88c59 100644 (file)
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/sclp.c
- *     core function to access sclp interface
+ * core function to access sclp interface
  *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
- *              Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *           Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #include <linux/module.h>
 #include <linux/timer.h>
 #include <linux/reboot.h>
 #include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
 #include <asm/types.h>
 #include <asm/s390_ext.h>
 
@@ -28,10 +31,10 @@ static ext_int_info_t ext_int_info_hwc;
 /* Lock to protect internal data consistency. */
 static DEFINE_SPINLOCK(sclp_lock);
 
-/* Mask of events that we can receive from the sclp interface. */
+/* Mask of events that we can send to the sclp interface. */
 static sccb_mask_t sclp_receive_mask;
 
-/* Mask of events that we can send to the sclp interface. */
+/* Mask of events that we can receive from the sclp interface. */
 static sccb_mask_t sclp_send_mask;
 
 /* List of registered event listeners and senders. */
@@ -46,6 +49,16 @@ static struct sclp_req sclp_init_req;
 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
 
+/* Suspend request */
+static DECLARE_COMPLETION(sclp_request_queue_flushed);
+
+static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
+{
+       complete(&sclp_request_queue_flushed);
+}
+
+static struct sclp_req sclp_suspend_req;
+
 /* Timer for request retries. */
 static struct timer_list sclp_request_timer;
 
@@ -83,6 +96,12 @@ static volatile enum sclp_mask_state_t {
        sclp_mask_state_initializing
 } sclp_mask_state = sclp_mask_state_idle;
 
+/* Internal state: is the driver suspended? */
+static enum sclp_suspend_state_t {
+       sclp_suspend_state_running,
+       sclp_suspend_state_suspended,
+} sclp_suspend_state = sclp_suspend_state_running;
+
 /* Maximum retry counts */
 #define SCLP_INIT_RETRY                3
 #define SCLP_MASK_RETRY                3
@@ -92,6 +111,7 @@ static volatile enum sclp_mask_state_t {
 #define SCLP_RETRY_INTERVAL    30
 
 static void sclp_process_queue(void);
+static void __sclp_make_read_req(void);
 static int sclp_init_mask(int calculate);
 static int sclp_init(void);
 
@@ -114,7 +134,6 @@ sclp_service_call(sclp_cmdw_t command, void *sccb)
        return 0;
 }
 
-static inline void __sclp_make_read_req(void);
 
 static void
 __sclp_queue_read_req(void)
@@ -210,6 +229,8 @@ sclp_process_queue(void)
        del_timer(&sclp_request_timer);
        while (!list_empty(&sclp_req_queue)) {
                req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+               if (!req->sccb)
+                       goto do_post;
                rc = __sclp_start_request(req);
                if (rc == 0)
                        break;
@@ -221,6 +242,7 @@ sclp_process_queue(void)
                                                 sclp_request_timeout, 0);
                        break;
                }
+do_post:
                /* Post-processing for aborted request */
                list_del(&req->list);
                if (req->callback) {
@@ -232,6 +254,19 @@ sclp_process_queue(void)
        spin_unlock_irqrestore(&sclp_lock, flags);
 }
 
+static int __sclp_can_add_request(struct sclp_req *req)
+{
+       if (req == &sclp_suspend_req || req == &sclp_init_req)
+               return 1;
+       if (sclp_suspend_state != sclp_suspend_state_running)
+               return 0;
+       if (sclp_init_state != sclp_init_state_initialized)
+               return 0;
+       if (sclp_activation_state != sclp_activation_state_active)
+               return 0;
+       return 1;
+}
+
 /* Queue a new request. Return zero on success, non-zero otherwise. */
 int
 sclp_add_request(struct sclp_req *req)
@@ -240,9 +275,7 @@ sclp_add_request(struct sclp_req *req)
        int rc;
 
        spin_lock_irqsave(&sclp_lock, flags);
-       if ((sclp_init_state != sclp_init_state_initialized ||
-            sclp_activation_state != sclp_activation_state_active) &&
-           req != &sclp_init_req) {
+       if (!__sclp_can_add_request(req)) {
                spin_unlock_irqrestore(&sclp_lock, flags);
                return -EIO;
        }
@@ -253,10 +286,16 @@ sclp_add_request(struct sclp_req *req)
        /* Start if request is first in list */
        if (sclp_running_state == sclp_running_state_idle &&
            req->list.prev == &sclp_req_queue) {
+               if (!req->sccb) {
+                       list_del(&req->list);
+                       rc = -ENODATA;
+                       goto out;
+               }
                rc = __sclp_start_request(req);
                if (rc)
                        list_del(&req->list);
        }
+out:
        spin_unlock_irqrestore(&sclp_lock, flags);
        return rc;
 }
@@ -279,8 +318,11 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
        rc = 0;
        for (offset = sizeof(struct sccb_header); offset < sccb->length;
             offset += evbuf->length) {
-               /* Search for event handler */
                evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
+               /* Check for malformed hardware response */
+               if (evbuf->length == 0)
+                       break;
+               /* Search for event handler */
                reg = NULL;
                list_for_each(l, &sclp_reg_list) {
                        reg = list_entry(l, struct sclp_register, list);
@@ -317,8 +359,7 @@ sclp_read_cb(struct sclp_req *req, void *data)
 }
 
 /* Prepare read event data request. Called while sclp_lock is locked. */
-static inline void
-__sclp_make_read_req(void)
+static void __sclp_make_read_req(void)
 {
        struct sccb_header *sccb;
 
@@ -380,7 +421,7 @@ sclp_interrupt_handler(__u16 code)
                }
                sclp_running_state = sclp_running_state_idle;
        }
-       if (evbuf_pending && sclp_receive_mask != 0 &&
+       if (evbuf_pending &&
            sclp_activation_state == sclp_activation_state_active)
                __sclp_queue_read_req();
        spin_unlock(&sclp_lock);
@@ -399,6 +440,7 @@ sclp_tod_from_jiffies(unsigned long jiffies)
 void
 sclp_sync_wait(void)
 {
+       unsigned long long old_tick;
        unsigned long flags;
        unsigned long cr0, cr0_sync;
        u64 timeout;
@@ -419,11 +461,12 @@ sclp_sync_wait(void)
        if (!irq_context)
                local_bh_disable();
        /* Enable service-signal interruption, disable timer interrupts */
+       old_tick = local_tick_disable();
        trace_hardirqs_on();
        __ctl_store(cr0, 0, 0);
        cr0_sync = cr0;
+       cr0_sync &= 0xffff00a0;
        cr0_sync |= 0x00000200;
-       cr0_sync &= 0xFFFFF3AC;
        __ctl_load(cr0_sync, 0, 0);
        __raw_local_irq_stosm(0x01);
        /* Loop until driver state indicates finished request */
@@ -439,13 +482,13 @@ sclp_sync_wait(void)
        __ctl_load(cr0, 0, 0);
        if (!irq_context)
                _local_bh_enable();
+       local_tick_enable(old_tick);
        local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL(sclp_sync_wait);
 
 /* Dispatch changes in send and receive mask to registered listeners. */
-static inline void
+static void
 sclp_dispatch_state_change(void)
 {
        struct list_head *l;
@@ -459,8 +502,8 @@ sclp_dispatch_state_change(void)
                reg = NULL;
                list_for_each(l, &sclp_reg_list) {
                        reg = list_entry(l, struct sclp_register, list);
-                       receive_mask = reg->receive_mask & sclp_receive_mask;
-                       send_mask = reg->send_mask & sclp_send_mask;
+                       receive_mask = reg->send_mask & sclp_receive_mask;
+                       send_mask = reg->receive_mask & sclp_send_mask;
                        if (reg->sclp_receive_mask != receive_mask ||
                            reg->sclp_send_mask != send_mask) {
                                reg->sclp_receive_mask = receive_mask;
@@ -506,11 +549,13 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
        if (scbuf->validity_sclp_send_mask)
                sclp_send_mask = scbuf->sclp_send_mask;
        spin_unlock_irqrestore(&sclp_lock, flags);
+       if (scbuf->validity_sclp_active_facility_mask)
+               sclp_facilities = scbuf->sclp_active_facility_mask;
        sclp_dispatch_state_change();
 }
 
 static struct sclp_register sclp_state_change_event = {
-       .receive_mask = EvTyp_StateChange_Mask,
+       .receive_mask = EVTYP_STATECHANGE_MASK,
        .receiver_fn = sclp_state_change_cb
 };
 
@@ -553,6 +598,7 @@ sclp_register(struct sclp_register *reg)
        /* Trigger initial state change callback */
        reg->sclp_receive_mask = 0;
        reg->sclp_send_mask = 0;
+       reg->pm_event_posted = 0;
        list_add(&reg->list, &sclp_reg_list);
        spin_unlock_irqrestore(&sclp_lock, flags);
        rc = sclp_init_mask(1);
@@ -615,8 +661,8 @@ struct init_sccb {
        u16 mask_length;
        sccb_mask_t receive_mask;
        sccb_mask_t send_mask;
-       sccb_mask_t sclp_send_mask;
        sccb_mask_t sclp_receive_mask;
+       sccb_mask_t sclp_send_mask;
 } __attribute__((packed));
 
 /* Prepare init mask request. Called while sclp_lock is locked. */
@@ -782,11 +828,9 @@ sclp_check_handler(__u16 code)
        /* Is this the interrupt we are waiting for? */
        if (finished_sccb == 0)
                return;
-       if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
-               printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
-                      "for buffer at 0x%x\n", finished_sccb);
-               return;
-       }
+       if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
+               panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
+                     finished_sccb);
        spin_lock(&sclp_lock);
        if (sclp_running_state == sclp_running_state_running) {
                sclp_init_req.status = SCLP_REQ_DONE;
@@ -875,22 +919,134 @@ static struct notifier_block sclp_reboot_notifier = {
        .notifier_call = sclp_reboot_event
 };
 
+/*
+ * Suspend/resume SCLP notifier implementation
+ */
+
+static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
+{
+       struct sclp_register *reg;
+       unsigned long flags;
+
+       if (!rollback) {
+               spin_lock_irqsave(&sclp_lock, flags);
+               list_for_each_entry(reg, &sclp_reg_list, list)
+                       reg->pm_event_posted = 0;
+               spin_unlock_irqrestore(&sclp_lock, flags);
+       }
+       do {
+               spin_lock_irqsave(&sclp_lock, flags);
+               list_for_each_entry(reg, &sclp_reg_list, list) {
+                       if (rollback && reg->pm_event_posted)
+                               goto found;
+                       if (!rollback && !reg->pm_event_posted)
+                               goto found;
+               }
+               spin_unlock_irqrestore(&sclp_lock, flags);
+               return;
+found:
+               spin_unlock_irqrestore(&sclp_lock, flags);
+               if (reg->pm_event_fn)
+                       reg->pm_event_fn(reg, sclp_pm_event);
+               reg->pm_event_posted = rollback ? 0 : 1;
+       } while (1);
+}
+
+/*
+ * Susend/resume callbacks for platform device
+ */
+
+static int sclp_freeze(struct device *dev)
+{
+       unsigned long flags;
+       int rc;
+
+       sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
+
+       spin_lock_irqsave(&sclp_lock, flags);
+       sclp_suspend_state = sclp_suspend_state_suspended;
+       spin_unlock_irqrestore(&sclp_lock, flags);
+
+       /* Init supend data */
+       memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
+       sclp_suspend_req.callback = sclp_suspend_req_cb;
+       sclp_suspend_req.status = SCLP_REQ_FILLED;
+       init_completion(&sclp_request_queue_flushed);
+
+       rc = sclp_add_request(&sclp_suspend_req);
+       if (rc == 0)
+               wait_for_completion(&sclp_request_queue_flushed);
+       else if (rc != -ENODATA)
+               goto fail_thaw;
+
+       rc = sclp_deactivate();
+       if (rc)
+               goto fail_thaw;
+       return 0;
+
+fail_thaw:
+       spin_lock_irqsave(&sclp_lock, flags);
+       sclp_suspend_state = sclp_suspend_state_running;
+       spin_unlock_irqrestore(&sclp_lock, flags);
+       sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
+       return rc;
+}
+
+static int sclp_undo_suspend(enum sclp_pm_event event)
+{
+       unsigned long flags;
+       int rc;
+
+       rc = sclp_reactivate();
+       if (rc)
+               return rc;
+
+       spin_lock_irqsave(&sclp_lock, flags);
+       sclp_suspend_state = sclp_suspend_state_running;
+       spin_unlock_irqrestore(&sclp_lock, flags);
+
+       sclp_pm_event(event, 0);
+       return 0;
+}
+
+static int sclp_thaw(struct device *dev)
+{
+       return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+}
+
+static int sclp_restore(struct device *dev)
+{
+       return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
+}
+
+static const struct dev_pm_ops sclp_pm_ops = {
+       .freeze         = sclp_freeze,
+       .thaw           = sclp_thaw,
+       .restore        = sclp_restore,
+};
+
+static struct platform_driver sclp_pdrv = {
+       .driver = {
+               .name   = "sclp",
+               .owner  = THIS_MODULE,
+               .pm     = &sclp_pm_ops,
+       },
+};
+
+static struct platform_device *sclp_pdev;
+
 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  * otherwise. */
 static int
 sclp_init(void)
 {
        unsigned long flags;
-       int rc;
+       int rc = 0;
 
-       if (!MACHINE_HAS_SCLP)
-               return -ENODEV;
        spin_lock_irqsave(&sclp_lock, flags);
        /* Check for previous or running initialization */
-       if (sclp_init_state != sclp_init_state_uninitialized) {
-               spin_unlock_irqrestore(&sclp_lock, flags);
-               return 0;
-       }
+       if (sclp_init_state != sclp_init_state_uninitialized)
+               goto fail_unlock;
        sclp_init_state = sclp_init_state_initializing;
        /* Set up variables */
        INIT_LIST_HEAD(&sclp_req_queue);
@@ -901,27 +1057,17 @@ sclp_init(void)
        spin_unlock_irqrestore(&sclp_lock, flags);
        rc = sclp_check_interface();
        spin_lock_irqsave(&sclp_lock, flags);
-       if (rc) {
-               sclp_init_state = sclp_init_state_uninitialized;
-               spin_unlock_irqrestore(&sclp_lock, flags);
-               return rc;
-       }
+       if (rc)
+               goto fail_init_state_uninitialized;
        /* Register reboot handler */
        rc = register_reboot_notifier(&sclp_reboot_notifier);
-       if (rc) {
-               sclp_init_state = sclp_init_state_uninitialized;
-               spin_unlock_irqrestore(&sclp_lock, flags);
-               return rc;
-       }
+       if (rc)
+               goto fail_init_state_uninitialized;
        /* Register interrupt handler */
        rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
                                               &ext_int_info_hwc);
-       if (rc) {
-               unregister_reboot_notifier(&sclp_reboot_notifier);
-               sclp_init_state = sclp_init_state_uninitialized;
-               spin_unlock_irqrestore(&sclp_lock, flags);
-               return rc;
-       }
+       if (rc)
+               goto fail_unregister_reboot_notifier;
        sclp_init_state = sclp_init_state_initialized;
        spin_unlock_irqrestore(&sclp_lock, flags);
        /* Enable service-signal external interruption - needs to happen with
@@ -929,4 +1075,56 @@ sclp_init(void)
        ctl_set_bit(0, 9);
        sclp_init_mask(1);
        return 0;
+
+fail_unregister_reboot_notifier:
+       unregister_reboot_notifier(&sclp_reboot_notifier);
+fail_init_state_uninitialized:
+       sclp_init_state = sclp_init_state_uninitialized;
+fail_unlock:
+       spin_unlock_irqrestore(&sclp_lock, flags);
+       return rc;
+}
+
+/*
+ * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
+ * to print the panic message.
+ */
+static int sclp_panic_notify(struct notifier_block *self,
+                            unsigned long event, void *data)
+{
+       if (sclp_suspend_state == sclp_suspend_state_suspended)
+               sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+       return NOTIFY_OK;
 }
+
+static struct notifier_block sclp_on_panic_nb = {
+       .notifier_call = sclp_panic_notify,
+       .priority = SCLP_PANIC_PRIO,
+};
+
+static __init int sclp_initcall(void)
+{
+       int rc;
+
+       rc = platform_driver_register(&sclp_pdrv);
+       if (rc)
+               return rc;
+       sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
+       rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+       if (rc)
+               goto fail_platform_driver_unregister;
+       rc = atomic_notifier_chain_register(&panic_notifier_list,
+                                           &sclp_on_panic_nb);
+       if (rc)
+               goto fail_platform_device_unregister;
+
+       return sclp_init();
+
+fail_platform_device_unregister:
+       platform_device_unregister(sclp_pdev);
+fail_platform_driver_unregister:
+       platform_driver_unregister(&sclp_pdrv);
+       return rc;
+}
+
+arch_initcall(sclp_initcall);