netfilter: ebtables: abort if next_offset is too small
[safe/jmp/linux-2.6] / net / rfkill / core.c
index 30a6f8d..c218e07 100644 (file)
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/rfkill.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
 
 #include "rfkill.h"
 
@@ -49,8 +54,10 @@ struct rfkill {
 
        unsigned long           state;
 
+       u32                     idx;
+
        bool                    registered;
-       bool                    suspended;
+       bool                    persistent;
 
        const struct rfkill_ops *ops;
        void                    *data;
@@ -69,6 +76,18 @@ struct rfkill {
 };
 #define to_rfkill(d)   container_of(d, struct rfkill, dev)
 
+struct rfkill_int_event {
+       struct list_head        list;
+       struct rfkill_event     ev;
+};
+
+struct rfkill_data {
+       struct list_head        list;
+       struct list_head        events;
+       struct mutex            mtx;
+       wait_queue_head_t       read_wait;
+       bool                    input_handler;
+};
 
 
 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
@@ -90,6 +109,7 @@ MODULE_LICENSE("GPL");
  */
 static LIST_HEAD(rfkill_list); /* list of registered rf switches */
 static DEFINE_MUTEX(rfkill_global_mutex);
+static LIST_HEAD(rfkill_fds);  /* list of open fds of /dev/rfkill */
 
 static unsigned int rfkill_default_state = 1;
 module_param_named(default_state, rfkill_default_state, uint, 0444);
@@ -97,11 +117,9 @@ MODULE_PARM_DESC(default_state,
                 "Default initial state for all radio types, 0 = radio off");
 
 static struct {
-       bool cur, def;
+       bool cur, sav;
 } rfkill_global_states[NUM_RFKILL_TYPES];
 
-static unsigned long rfkill_states_default_locked;
-
 static bool rfkill_epo_lock_active;
 
 
@@ -171,12 +189,48 @@ static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 }
 #endif /* CONFIG_RFKILL_LEDS */
 
-static void rfkill_uevent(struct rfkill *rfkill)
+static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
+                             enum rfkill_operation op)
 {
-       if (!rfkill->registered || rfkill->suspended)
+       unsigned long flags;
+
+       ev->idx = rfkill->idx;
+       ev->type = rfkill->type;
+       ev->op = op;
+
+       spin_lock_irqsave(&rfkill->lock, flags);
+       ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
+       ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
+                                       RFKILL_BLOCK_SW_PREV));
+       spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+
+static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
+{
+       struct rfkill_data *data;
+       struct rfkill_int_event *ev;
+
+       list_for_each_entry(data, &rfkill_fds, list) {
+               ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+               if (!ev)
+                       continue;
+               rfkill_fill_event(&ev->ev, rfkill, op);
+               mutex_lock(&data->mtx);
+               list_add_tail(&ev->list, &data->events);
+               mutex_unlock(&data->mtx);
+               wake_up_interruptible(&data->read_wait);
+       }
+}
+
+static void rfkill_event(struct rfkill *rfkill)
+{
+       if (!rfkill->registered)
                return;
 
        kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
+
+       /* also send event to /dev/rfkill */
+       rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
 }
 
 static bool __rfkill_set_hw_state(struct rfkill *rfkill,
@@ -216,6 +270,9 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        unsigned long flags;
        int err;
 
+       if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
+               return;
+
        /*
         * Some platforms (...!) generate input events which affect the
         * _hard_ kill state -- whenever something tries to change the
@@ -238,9 +295,6 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
        spin_unlock_irqrestore(&rfkill->lock, flags);
 
-       if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
-               return;
-
        err = rfkill->ops->set_block(rfkill->data, blocked);
 
        spin_lock_irqsave(&rfkill->lock, flags);
@@ -260,9 +314,12 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        spin_unlock_irqrestore(&rfkill->lock, flags);
 
        rfkill_led_trigger_event(rfkill);
-       rfkill_uevent(rfkill);
+       rfkill_event(rfkill);
 }
 
+#ifdef CONFIG_RFKILL_INPUT
+static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
+
 /**
  * __rfkill_switch_all - Toggle state of all switches of given type
  * @type: type of interfaces to be affected
@@ -299,6 +356,9 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
  */
 void rfkill_switch_all(enum rfkill_type type, bool blocked)
 {
+       if (atomic_read(&rfkill_input_disabled))
+               return;
+
        mutex_lock(&rfkill_global_mutex);
 
        if (!rfkill_epo_lock_active)
@@ -321,6 +381,9 @@ void rfkill_epo(void)
        struct rfkill *rfkill;
        int i;
 
+       if (atomic_read(&rfkill_input_disabled))
+               return;
+
        mutex_lock(&rfkill_global_mutex);
 
        rfkill_epo_lock_active = true;
@@ -328,9 +391,10 @@ void rfkill_epo(void)
                rfkill_set_block(rfkill, true);
 
        for (i = 0; i < NUM_RFKILL_TYPES; i++) {
-               rfkill_global_states[i].def = rfkill_global_states[i].cur;
+               rfkill_global_states[i].sav = rfkill_global_states[i].cur;
                rfkill_global_states[i].cur = true;
        }
+
        mutex_unlock(&rfkill_global_mutex);
 }
 
@@ -345,11 +409,14 @@ void rfkill_restore_states(void)
 {
        int i;
 
+       if (atomic_read(&rfkill_input_disabled))
+               return;
+
        mutex_lock(&rfkill_global_mutex);
 
        rfkill_epo_lock_active = false;
        for (i = 0; i < NUM_RFKILL_TYPES; i++)
-               __rfkill_switch_all(i, rfkill_global_states[i].def);
+               __rfkill_switch_all(i, rfkill_global_states[i].sav);
        mutex_unlock(&rfkill_global_mutex);
 }
 
@@ -361,6 +428,9 @@ void rfkill_restore_states(void)
  */
 void rfkill_remove_epo_lock(void)
 {
+       if (atomic_read(&rfkill_input_disabled))
+               return;
+
        mutex_lock(&rfkill_global_mutex);
        rfkill_epo_lock_active = false;
        mutex_unlock(&rfkill_global_mutex);
@@ -391,27 +461,7 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
 {
        return rfkill_global_states[type].cur;
 }
-
-void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked)
-{
-       mutex_lock(&rfkill_global_mutex);
-
-       /* don't allow unblock when epo */
-       if (rfkill_epo_lock_active && !blocked)
-               goto out;
-
-       /* too late */
-       if (rfkill_states_default_locked & BIT(type))
-               goto out;
-
-       rfkill_states_default_locked |= BIT(type);
-
-       rfkill_global_states[type].cur = blocked;
-       rfkill_global_states[type].def = blocked;
- out:
-       mutex_unlock(&rfkill_global_mutex);
-}
-EXPORT_SYMBOL(rfkill_set_global_sw_state);
+#endif
 
 
 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
@@ -470,6 +520,20 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
 }
 EXPORT_SYMBOL(rfkill_set_sw_state);
 
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+       unsigned long flags;
+
+       BUG_ON(!rfkill);
+       BUG_ON(rfkill->registered);
+
+       spin_lock_irqsave(&rfkill->lock, flags);
+       __rfkill_set_sw_state(rfkill, blocked);
+       rfkill->persistent = true;
+       spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+EXPORT_SYMBOL(rfkill_init_sw_state);
+
 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
 {
        unsigned long flags;
@@ -486,16 +550,21 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
        swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
        hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
        __rfkill_set_sw_state(rfkill, sw);
+       if (hw)
+               rfkill->state |= RFKILL_BLOCK_HW;
+       else
+               rfkill->state &= ~RFKILL_BLOCK_HW;
 
        spin_unlock_irqrestore(&rfkill->lock, flags);
 
-       if (!rfkill->registered)
-               return;
+       if (!rfkill->registered) {
+               rfkill->persistent = true;
+       } else {
+               if (swprev != sw || hwprev != hw)
+                       schedule_work(&rfkill->uevent_work);
 
-       if (swprev != sw || hwprev != hw)
-               schedule_work(&rfkill->uevent_work);
-
-       rfkill_led_trigger_event(rfkill);
+               rfkill_led_trigger_event(rfkill);
+       }
 }
 EXPORT_SYMBOL(rfkill_set_states);
 
@@ -510,6 +579,8 @@ static ssize_t rfkill_name_show(struct device *dev,
 
 static const char *rfkill_get_type_str(enum rfkill_type type)
 {
+       BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
+
        switch (type) {
        case RFKILL_TYPE_WLAN:
                return "wlan";
@@ -521,11 +592,13 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
                return "wimax";
        case RFKILL_TYPE_WWAN:
                return "wwan";
+       case RFKILL_TYPE_GPS:
+               return "gps";
+       case RFKILL_TYPE_FM:
+               return "fm";
        default:
                BUG();
        }
-
-       BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1);
 }
 
 static ssize_t rfkill_type_show(struct device *dev,
@@ -537,6 +610,24 @@ static ssize_t rfkill_type_show(struct device *dev,
        return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
 }
 
+static ssize_t rfkill_idx_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       struct rfkill *rfkill = to_rfkill(dev);
+
+       return sprintf(buf, "%d\n", rfkill->idx);
+}
+
+static ssize_t rfkill_persistent_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       struct rfkill *rfkill = to_rfkill(dev);
+
+       return sprintf(buf, "%d\n", rfkill->persistent);
+}
+
 static u8 user_state_from_blocked(unsigned long state)
 {
        if (state & RFKILL_BLOCK_HW)
@@ -566,15 +657,26 @@ static ssize_t rfkill_state_store(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
-       /*
-        * The intention was that userspace can only take control over
-        * a given device when/if rfkill-input doesn't control it due
-        * to user_claim. Since user_claim is currently unsupported,
-        * we never support changing the state from userspace -- this
-        * can be implemented again later.
-        */
+       struct rfkill *rfkill = to_rfkill(dev);
+       unsigned long state;
+       int err;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       err = strict_strtoul(buf, 0, &state);
+       if (err)
+               return err;
+
+       if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
+           state != RFKILL_USER_STATE_UNBLOCKED)
+               return -EINVAL;
+
+       mutex_lock(&rfkill_global_mutex);
+       rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
+       mutex_unlock(&rfkill_global_mutex);
 
-       return -EPERM;
+       return err ?: count;
 }
 
 static ssize_t rfkill_claim_show(struct device *dev,
@@ -594,6 +696,8 @@ static ssize_t rfkill_claim_store(struct device *dev,
 static struct device_attribute rfkill_dev_attrs[] = {
        __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
        __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
+       __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
+       __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
        __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
        __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
        __ATTR_NULL
@@ -656,8 +760,6 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
 
        rfkill_pause_polling(rfkill);
 
-       rfkill->suspended = true;
-
        return 0;
 }
 
@@ -666,14 +768,10 @@ static int rfkill_resume(struct device *dev)
        struct rfkill *rfkill = to_rfkill(dev);
        bool cur;
 
-       mutex_lock(&rfkill_global_mutex);
-       cur = rfkill_global_states[rfkill->type].cur;
-       rfkill_set_block(rfkill, cur);
-       mutex_unlock(&rfkill_global_mutex);
-
-       rfkill->suspended = false;
-
-       schedule_work(&rfkill->uevent_work);
+       if (!rfkill->persistent) {
+               cur = !!(rfkill->state & RFKILL_BLOCK_SW);
+               rfkill_set_block(rfkill, cur);
+       }
 
        rfkill_resume_polling(rfkill);
 
@@ -689,6 +787,19 @@ static struct class rfkill_class = {
        .resume         = rfkill_resume,
 };
 
+bool rfkill_blocked(struct rfkill *rfkill)
+{
+       unsigned long flags;
+       u32 state;
+
+       spin_lock_irqsave(&rfkill->lock, flags);
+       state = rfkill->state;
+       spin_unlock_irqrestore(&rfkill->lock, flags);
+
+       return !!(state & RFKILL_BLOCK_ANY);
+}
+EXPORT_SYMBOL(rfkill_blocked);
+
 
 struct rfkill * __must_check rfkill_alloc(const char *name,
                                          struct device *parent,
@@ -708,7 +819,7 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
        if (WARN_ON(!name))
                return NULL;
 
-       if (WARN_ON(type >= NUM_RFKILL_TYPES))
+       if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
                return NULL;
 
        rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
@@ -754,7 +865,9 @@ static void rfkill_uevent_work(struct work_struct *work)
 
        rfkill = container_of(work, struct rfkill, uevent_work);
 
-       rfkill_uevent(rfkill);
+       mutex_lock(&rfkill_global_mutex);
+       rfkill_event(rfkill);
+       mutex_unlock(&rfkill_global_mutex);
 }
 
 static void rfkill_sync_work(struct work_struct *work)
@@ -785,18 +898,10 @@ int __must_check rfkill_register(struct rfkill *rfkill)
                goto unlock;
        }
 
+       rfkill->idx = rfkill_no;
        dev_set_name(dev, "rfkill%lu", rfkill_no);
        rfkill_no++;
 
-       if (!(rfkill_states_default_locked & BIT(rfkill->type))) {
-               /* first of its kind */
-               BUILD_BUG_ON(NUM_RFKILL_TYPES >
-                       sizeof(rfkill_states_default_locked) * 8);
-               rfkill_states_default_locked |= BIT(rfkill->type);
-               rfkill_global_states[rfkill->type].cur =
-                       rfkill_global_states[rfkill->type].def;
-       }
-
        list_add_tail(&rfkill->node, &rfkill_list);
 
        error = device_add(dev);
@@ -809,16 +914,26 @@ int __must_check rfkill_register(struct rfkill *rfkill)
 
        rfkill->registered = true;
 
-       if (rfkill->ops->poll) {
-               INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
+       INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
+       INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
+       INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
+
+       if (rfkill->ops->poll)
                schedule_delayed_work(&rfkill->poll_work,
                        round_jiffies_relative(POLL_INTERVAL));
-       }
 
-       INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
+       if (!rfkill->persistent || rfkill_epo_lock_active) {
+               schedule_work(&rfkill->sync_work);
+       } else {
+#ifdef CONFIG_RFKILL_INPUT
+               bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
 
-       INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
-       schedule_work(&rfkill->sync_work);
+               if (!atomic_read(&rfkill_input_disabled))
+                       __rfkill_switch_all(rfkill->type, soft_blocked);
+#endif
+       }
+
+       rfkill_send_events(rfkill, RFKILL_OP_ADD);
 
        mutex_unlock(&rfkill_global_mutex);
        return 0;
@@ -848,6 +963,7 @@ void rfkill_unregister(struct rfkill *rfkill)
        device_del(&rfkill->dev);
 
        mutex_lock(&rfkill_global_mutex);
+       rfkill_send_events(rfkill, RFKILL_OP_DEL);
        list_del_init(&rfkill->node);
        mutex_unlock(&rfkill_global_mutex);
 
@@ -862,6 +978,236 @@ void rfkill_destroy(struct rfkill *rfkill)
 }
 EXPORT_SYMBOL(rfkill_destroy);
 
+static int rfkill_fop_open(struct inode *inode, struct file *file)
+{
+       struct rfkill_data *data;
+       struct rfkill *rfkill;
+       struct rfkill_int_event *ev, *tmp;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&data->events);
+       mutex_init(&data->mtx);
+       init_waitqueue_head(&data->read_wait);
+
+       mutex_lock(&rfkill_global_mutex);
+       mutex_lock(&data->mtx);
+       /*
+        * start getting events from elsewhere but hold mtx to get
+        * startup events added first
+        */
+       list_add(&data->list, &rfkill_fds);
+
+       list_for_each_entry(rfkill, &rfkill_list, node) {
+               ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+               if (!ev)
+                       goto free;
+               rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+               list_add_tail(&ev->list, &data->events);
+       }
+       mutex_unlock(&data->mtx);
+       mutex_unlock(&rfkill_global_mutex);
+
+       file->private_data = data;
+
+       return nonseekable_open(inode, file);
+
+ free:
+       mutex_unlock(&data->mtx);
+       mutex_unlock(&rfkill_global_mutex);
+       mutex_destroy(&data->mtx);
+       list_for_each_entry_safe(ev, tmp, &data->events, list)
+               kfree(ev);
+       kfree(data);
+       return -ENOMEM;
+}
+
+static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+{
+       struct rfkill_data *data = file->private_data;
+       unsigned int res = POLLOUT | POLLWRNORM;
+
+       poll_wait(file, &data->read_wait, wait);
+
+       mutex_lock(&data->mtx);
+       if (!list_empty(&data->events))
+               res = POLLIN | POLLRDNORM;
+       mutex_unlock(&data->mtx);
+
+       return res;
+}
+
+static bool rfkill_readable(struct rfkill_data *data)
+{
+       bool r;
+
+       mutex_lock(&data->mtx);
+       r = !list_empty(&data->events);
+       mutex_unlock(&data->mtx);
+
+       return r;
+}
+
+static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+                              size_t count, loff_t *pos)
+{
+       struct rfkill_data *data = file->private_data;
+       struct rfkill_int_event *ev;
+       unsigned long sz;
+       int ret;
+
+       mutex_lock(&data->mtx);
+
+       while (list_empty(&data->events)) {
+               if (file->f_flags & O_NONBLOCK) {
+                       ret = -EAGAIN;
+                       goto out;
+               }
+               mutex_unlock(&data->mtx);
+               ret = wait_event_interruptible(data->read_wait,
+                                              rfkill_readable(data));
+               mutex_lock(&data->mtx);
+
+               if (ret)
+                       goto out;
+       }
+
+       ev = list_first_entry(&data->events, struct rfkill_int_event,
+                               list);
+
+       sz = min_t(unsigned long, sizeof(ev->ev), count);
+       ret = sz;
+       if (copy_to_user(buf, &ev->ev, sz))
+               ret = -EFAULT;
+
+       list_del(&ev->list);
+       kfree(ev);
+ out:
+       mutex_unlock(&data->mtx);
+       return ret;
+}
+
+static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
+                               size_t count, loff_t *pos)
+{
+       struct rfkill *rfkill;
+       struct rfkill_event ev;
+
+       /* we don't need the 'hard' variable but accept it */
+       if (count < RFKILL_EVENT_SIZE_V1 - 1)
+               return -EINVAL;
+
+       /*
+        * Copy as much data as we can accept into our 'ev' buffer,
+        * but tell userspace how much we've copied so it can determine
+        * our API version even in a write() call, if it cares.
+        */
+       count = min(count, sizeof(ev));
+       if (copy_from_user(&ev, buf, count))
+               return -EFAULT;
+
+       if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
+               return -EINVAL;
+
+       if (ev.type >= NUM_RFKILL_TYPES)
+               return -EINVAL;
+
+       mutex_lock(&rfkill_global_mutex);
+
+       if (ev.op == RFKILL_OP_CHANGE_ALL) {
+               if (ev.type == RFKILL_TYPE_ALL) {
+                       enum rfkill_type i;
+                       for (i = 0; i < NUM_RFKILL_TYPES; i++)
+                               rfkill_global_states[i].cur = ev.soft;
+               } else {
+                       rfkill_global_states[ev.type].cur = ev.soft;
+               }
+       }
+
+       list_for_each_entry(rfkill, &rfkill_list, node) {
+               if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
+                       continue;
+
+               if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
+                       continue;
+
+               rfkill_set_block(rfkill, ev.soft);
+       }
+       mutex_unlock(&rfkill_global_mutex);
+
+       return count;
+}
+
+static int rfkill_fop_release(struct inode *inode, struct file *file)
+{
+       struct rfkill_data *data = file->private_data;
+       struct rfkill_int_event *ev, *tmp;
+
+       mutex_lock(&rfkill_global_mutex);
+       list_del(&data->list);
+       mutex_unlock(&rfkill_global_mutex);
+
+       mutex_destroy(&data->mtx);
+       list_for_each_entry_safe(ev, tmp, &data->events, list)
+               kfree(ev);
+
+#ifdef CONFIG_RFKILL_INPUT
+       if (data->input_handler)
+               if (atomic_dec_return(&rfkill_input_disabled) == 0)
+                       printk(KERN_DEBUG "rfkill: input handler enabled\n");
+#endif
+
+       kfree(data);
+
+       return 0;
+}
+
+#ifdef CONFIG_RFKILL_INPUT
+static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       struct rfkill_data *data = file->private_data;
+
+       if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
+               return -ENOSYS;
+
+       if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
+               return -ENOSYS;
+
+       mutex_lock(&data->mtx);
+
+       if (!data->input_handler) {
+               if (atomic_inc_return(&rfkill_input_disabled) == 1)
+                       printk(KERN_DEBUG "rfkill: input handler disabled\n");
+               data->input_handler = true;
+       }
+
+       mutex_unlock(&data->mtx);
+
+       return 0;
+}
+#endif
+
+static const struct file_operations rfkill_fops = {
+       .owner          = THIS_MODULE,
+       .open           = rfkill_fop_open,
+       .read           = rfkill_fop_read,
+       .write          = rfkill_fop_write,
+       .poll           = rfkill_fop_poll,
+       .release        = rfkill_fop_release,
+#ifdef CONFIG_RFKILL_INPUT
+       .unlocked_ioctl = rfkill_fop_ioctl,
+       .compat_ioctl   = rfkill_fop_ioctl,
+#endif
+};
+
+static struct miscdevice rfkill_miscdev = {
+       .name   = "rfkill",
+       .fops   = &rfkill_fops,
+       .minor  = MISC_DYNAMIC_MINOR,
+};
 
 static int __init rfkill_init(void)
 {
@@ -869,16 +1215,25 @@ static int __init rfkill_init(void)
        int i;
 
        for (i = 0; i < NUM_RFKILL_TYPES; i++)
-               rfkill_global_states[i].def = !rfkill_default_state;
+               rfkill_global_states[i].cur = !rfkill_default_state;
 
        error = class_register(&rfkill_class);
        if (error)
                goto out;
 
+       error = misc_register(&rfkill_miscdev);
+       if (error) {
+               class_unregister(&rfkill_class);
+               goto out;
+       }
+
 #ifdef CONFIG_RFKILL_INPUT
        error = rfkill_handler_init();
-       if (error)
+       if (error) {
+               misc_deregister(&rfkill_miscdev);
                class_unregister(&rfkill_class);
+               goto out;
+       }
 #endif
 
  out:
@@ -891,6 +1246,7 @@ static void __exit rfkill_exit(void)
 #ifdef CONFIG_RFKILL_INPUT
        rfkill_handler_exit();
 #endif
+       misc_deregister(&rfkill_miscdev);
        class_unregister(&rfkill_class);
 }
 module_exit(rfkill_exit);