mwl8k: disable softirqs when accessing sta_notify_list
[safe/jmp/linux-2.6] / drivers / net / irda / sir_dev.c
index efc5a88..4b2a1a9 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/smp_lock.h>
 #include <linux/delay.h>
 
 #include <net/irda/irda.h>
 
 #include "sir-dev.h"
 
+
+static struct workqueue_struct *irda_sir_wq;
+
+/* STATE MACHINE */
+
+/* substate handler of the config-fsm to handle the cases where we want
+ * to wait for transmit completion before changing the port configuration
+ */
+
+static int sirdev_tx_complete_fsm(struct sir_dev *dev)
+{
+       struct sir_fsm *fsm = &dev->fsm;
+       unsigned next_state, delay;
+       unsigned bytes_left;
+
+       do {
+               next_state = fsm->substate;     /* default: stay in current substate */
+               delay = 0;
+
+               switch(fsm->substate) {
+
+               case SIRDEV_STATE_WAIT_XMIT:
+                       if (dev->drv->chars_in_buffer)
+                               bytes_left = dev->drv->chars_in_buffer(dev);
+                       else
+                               bytes_left = 0;
+                       if (!bytes_left) {
+                               next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
+                               break;
+                       }
+
+                       if (dev->speed > 115200)
+                               delay = (bytes_left*8*10000) / (dev->speed/100);
+                       else if (dev->speed > 0)
+                               delay = (bytes_left*10*10000) / (dev->speed/100);
+                       else
+                               delay = 0;
+                       /* expected delay (usec) until remaining bytes are sent */
+                       if (delay < 100) {
+                               udelay(delay);
+                               delay = 0;
+                               break;
+                       }
+                       /* sleep some longer delay (msec) */
+                       delay = (delay+999) / 1000;
+                       break;
+
+               case SIRDEV_STATE_WAIT_UNTIL_SENT:
+                       /* block until underlaying hardware buffer are empty */
+                       if (dev->drv->wait_until_sent)
+                               dev->drv->wait_until_sent(dev);
+                       next_state = SIRDEV_STATE_TX_DONE;
+                       break;
+
+               case SIRDEV_STATE_TX_DONE:
+                       return 0;
+
+               default:
+                       IRDA_ERROR("%s - undefined state\n", __func__);
+                       return -EINVAL;
+               }
+               fsm->substate = next_state;
+       } while (delay == 0);
+       return delay;
+}
+
+/*
+ * Function sirdev_config_fsm
+ *
+ * State machine to handle the configuration of the device (and attached dongle, if any).
+ * This handler is scheduled for execution in kIrDAd context, so we can sleep.
+ * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
+ * long. Instead, for longer delays we start a timer to reschedule us later.
+ * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
+ * Both must be unlocked/restarted on completion - but only on final exit.
+ */
+
+static void sirdev_config_fsm(struct work_struct *work)
+{
+       struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
+       struct sir_fsm *fsm = &dev->fsm;
+       int next_state;
+       int ret = -1;
+       unsigned delay;
+
+       IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
+
+       do {
+               IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
+                       __func__, fsm->state, fsm->substate);
+
+               next_state = fsm->state;
+               delay = 0;
+
+               switch(fsm->state) {
+
+               case SIRDEV_STATE_DONGLE_OPEN:
+                       if (dev->dongle_drv != NULL) {
+                               ret = sirdev_put_dongle(dev);
+                               if (ret) {
+                                       fsm->result = -EINVAL;
+                                       next_state = SIRDEV_STATE_ERROR;
+                                       break;
+                               }
+                       }
+
+                       /* Initialize dongle */
+                       ret = sirdev_get_dongle(dev, fsm->param);
+                       if (ret) {
+                               fsm->result = ret;
+                               next_state = SIRDEV_STATE_ERROR;
+                               break;
+                       }
+
+                       /* Dongles are powered through the modem control lines which
+                        * were just set during open. Before resetting, let's wait for
+                        * the power to stabilize. This is what some dongle drivers did
+                        * in open before, while others didn't - should be safe anyway.
+                        */
+
+                       delay = 50;
+                       fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+                       next_state = SIRDEV_STATE_DONGLE_RESET;
+
+                       fsm->param = 9600;
+
+                       break;
+
+               case SIRDEV_STATE_DONGLE_CLOSE:
+                       /* shouldn't we just treat this as success=? */
+                       if (dev->dongle_drv == NULL) {
+                               fsm->result = -EINVAL;
+                               next_state = SIRDEV_STATE_ERROR;
+                               break;
+                       }
+
+                       ret = sirdev_put_dongle(dev);
+                       if (ret) {
+                               fsm->result = ret;
+                               next_state = SIRDEV_STATE_ERROR;
+                               break;
+                       }
+                       next_state = SIRDEV_STATE_DONE;
+                       break;
+
+               case SIRDEV_STATE_SET_DTR_RTS:
+                       ret = sirdev_set_dtr_rts(dev,
+                               (fsm->param&0x02) ? TRUE : FALSE,
+                               (fsm->param&0x01) ? TRUE : FALSE);
+                       next_state = SIRDEV_STATE_DONE;
+                       break;
+
+               case SIRDEV_STATE_SET_SPEED:
+                       fsm->substate = SIRDEV_STATE_WAIT_XMIT;
+                       next_state = SIRDEV_STATE_DONGLE_CHECK;
+                       break;
+
+               case SIRDEV_STATE_DONGLE_CHECK:
+                       ret = sirdev_tx_complete_fsm(dev);
+                       if (ret < 0) {
+                               fsm->result = ret;
+                               next_state = SIRDEV_STATE_ERROR;
+                               break;
+                       }
+                       if ((delay=ret) != 0)
+                               break;
+
+                       if (dev->dongle_drv) {
+                               fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+                               next_state = SIRDEV_STATE_DONGLE_RESET;
+                       }
+                       else {
+                               dev->speed = fsm->param;
+                               next_state = SIRDEV_STATE_PORT_SPEED;
+                       }
+                       break;
+
+               case SIRDEV_STATE_DONGLE_RESET:
+                       if (dev->dongle_drv->reset) {
+                               ret = dev->dongle_drv->reset(dev);
+                               if (ret < 0) {
+                                       fsm->result = ret;
+                                       next_state = SIRDEV_STATE_ERROR;
+                                       break;
+                               }
+                       }
+                       else
+                               ret = 0;
+                       if ((delay=ret) == 0) {
+                               /* set serial port according to dongle default speed */
+                               if (dev->drv->set_speed)
+                                       dev->drv->set_speed(dev, dev->speed);
+                               fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
+                               next_state = SIRDEV_STATE_DONGLE_SPEED;
+                       }
+                       break;
+
+               case SIRDEV_STATE_DONGLE_SPEED:
+                       if (dev->dongle_drv->reset) {
+                               ret = dev->dongle_drv->set_speed(dev, fsm->param);
+                               if (ret < 0) {
+                                       fsm->result = ret;
+                                       next_state = SIRDEV_STATE_ERROR;
+                                       break;
+                               }
+                       }
+                       else
+                               ret = 0;
+                       if ((delay=ret) == 0)
+                               next_state = SIRDEV_STATE_PORT_SPEED;
+                       break;
+
+               case SIRDEV_STATE_PORT_SPEED:
+                       /* Finally we are ready to change the serial port speed */
+                       if (dev->drv->set_speed)
+                               dev->drv->set_speed(dev, dev->speed);
+                       dev->new_speed = 0;
+                       next_state = SIRDEV_STATE_DONE;
+                       break;
+
+               case SIRDEV_STATE_DONE:
+                       /* Signal network layer so it can send more frames */
+                       netif_wake_queue(dev->netdev);
+                       next_state = SIRDEV_STATE_COMPLETE;
+                       break;
+
+               default:
+                       IRDA_ERROR("%s - undefined state\n", __func__);
+                       fsm->result = -EINVAL;
+                       /* fall thru */
+
+               case SIRDEV_STATE_ERROR:
+                       IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
+
+#if 0  /* don't enable this before we have netdev->tx_timeout to recover */
+                       netif_stop_queue(dev->netdev);
+#else
+                       netif_wake_queue(dev->netdev);
+#endif
+                       /* fall thru */
+
+               case SIRDEV_STATE_COMPLETE:
+                       /* config change finished, so we are not busy any longer */
+                       sirdev_enable_rx(dev);
+                       up(&fsm->sem);
+                       return;
+               }
+               fsm->state = next_state;
+       } while(!delay);
+
+       queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
+}
+
+/* schedule some device configuration task for execution by kIrDAd
+ * on behalf of the above state machine.
+ * can be called from process or interrupt/tasklet context.
+ */
+
+int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
+{
+       struct sir_fsm *fsm = &dev->fsm;
+
+       IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
+                       initial_state, param);
+
+       if (down_trylock(&fsm->sem)) {
+               if (in_interrupt()  ||  in_atomic()  ||  irqs_disabled()) {
+                       IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
+                       return -EWOULDBLOCK;
+               } else
+                       down(&fsm->sem);
+       }
+
+       if (fsm->state == SIRDEV_STATE_DEAD) {
+               /* race with sirdev_close should never happen */
+               IRDA_ERROR("%s(), instance staled!\n", __func__);
+               up(&fsm->sem);
+               return -ESTALE;         /* or better EPIPE? */
+       }
+
+       netif_stop_queue(dev->netdev);
+       atomic_set(&dev->enable_rx, 0);
+
+       fsm->state = initial_state;
+       fsm->param = param;
+       fsm->result = 0;
+
+       INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+       queue_delayed_work(irda_sir_wq, &fsm->work, 0);
+       return 0;
+}
+
+
 /***************************************************************************/
 
 void sirdev_enable_rx(struct sir_dev *dev)
@@ -50,7 +342,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
 {
        int err;
 
-       IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type);
+       IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
 
        err = sirdev_schedule_dongle_open(dev, type);
        if (unlikely(err))
@@ -60,6 +352,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
        up(&dev->fsm.sem);
        return err;
 }
+EXPORT_SYMBOL(sirdev_set_dongle);
 
 /* used by dongle drivers for dongle programming */
 
@@ -84,7 +377,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
 
        ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
        if (ret > 0) {
-               IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__);
+               IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
 
                dev->tx_buff.data += ret;
                dev->tx_buff.len -= ret;
@@ -94,6 +387,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
        spin_unlock_irqrestore(&dev->tx_lock, flags);
        return ret;
 }
+EXPORT_SYMBOL(sirdev_raw_write);
 
 /* seems some dongle drivers may need this */
 
@@ -116,15 +410,17 @@ int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
 
        return count;
 }
+EXPORT_SYMBOL(sirdev_raw_read);
 
 int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
 {
        int ret = -ENXIO;
-       if (dev->drv->set_dtr_rts != 0)
+       if (dev->drv->set_dtr_rts)
                ret =  dev->drv->set_dtr_rts(dev, dtr, rts);
        return ret;
 }
-       
+EXPORT_SYMBOL(sirdev_set_dtr_rts);
+
 /**********************************************************************/
 
 /* called from client driver - likely with bh-context - to indicate
@@ -142,7 +438,7 @@ void sirdev_write_complete(struct sir_dev *dev)
        spin_lock_irqsave(&dev->tx_lock, flags);
 
        IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
-                  __FUNCTION__, dev->tx_buff.len);
+                  __func__, dev->tx_buff.len);
 
        if (likely(dev->tx_buff.len > 0))  {
                /* Write data left in transmit buffer */
@@ -155,12 +451,12 @@ void sirdev_write_complete(struct sir_dev *dev)
                else if (unlikely(actual<0)) {
                        /* could be dropped later when we have tx_timeout to recover */
                        IRDA_ERROR("%s: drv->do_write failed (%d)\n",
-                                  __FUNCTION__, actual);
+                                  __func__, actual);
                        if ((skb=dev->tx_skb) != NULL) {
                                dev->tx_skb = NULL;
                                dev_kfree_skb_any(skb);
-                               dev->stats.tx_errors++;               
-                               dev->stats.tx_dropped++;                      
+                               dev->netdev->stats.tx_errors++;
+                               dev->netdev->stats.tx_dropped++;
                        }
                        dev->tx_buff.len = 0;
                }
@@ -176,7 +472,7 @@ void sirdev_write_complete(struct sir_dev *dev)
                 * restarted when the irda-thread has completed the request.
                 */
 
-               IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__);
+               IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
                dev->raw_tx = 0;
                goto done;      /* no post-frame handling in raw mode */
        }
@@ -193,24 +489,24 @@ void sirdev_write_complete(struct sir_dev *dev)
         * re-activated.
         */
 
-       IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__);
+       IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
                
        if ((skb=dev->tx_skb) != NULL) {
                dev->tx_skb = NULL;
-               dev->stats.tx_packets++;                      
-               dev->stats.tx_bytes += skb->len;
+               dev->netdev->stats.tx_packets++;
+               dev->netdev->stats.tx_bytes += skb->len;
                dev_kfree_skb_any(skb);
        }
 
        if (unlikely(dev->new_speed > 0)) {
-               IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);
+               IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
                err = sirdev_schedule_speed(dev, dev->new_speed);
                if (unlikely(err)) {
                        /* should never happen
                         * forget the speed change and hope the stack recovers
                         */
                        IRDA_ERROR("%s - schedule speed change failed: %d\n",
-                                  __FUNCTION__, err);
+                                  __func__, err);
                        netif_wake_queue(dev->netdev);
                }
                /* else: success
@@ -227,6 +523,7 @@ void sirdev_write_complete(struct sir_dev *dev)
 done:
        spin_unlock_irqrestore(&dev->tx_lock, flags);
 }
+EXPORT_SYMBOL(sirdev_write_complete);
 
 /* called from client driver - likely with bh-context - to give us
  * some more received bytes. We put them into the rx-buffer,
@@ -236,13 +533,13 @@ done:
 int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 
 {
        if (!dev || !dev->netdev) {
-               IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
+               IRDA_WARNING("%s(), not ready yet!\n", __func__);
                return -1;
        }
 
        if (!dev->irlap) {
                IRDA_WARNING("%s - too early: %p / %zd!\n",
-                            __FUNCTION__, cp, count);
+                            __func__, cp, count);
                return -1;
        }
 
@@ -251,8 +548,8 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
                 * just update stats and set media busy
                 */
                irda_device_set_media_busy(dev->netdev, TRUE);
-               dev->stats.rx_dropped++;
-               IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count);
+               dev->netdev->stats.rx_dropped++;
+               IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
                return 0;
        }
 
@@ -260,7 +557,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
        if (likely(atomic_read(&dev->enable_rx))) {
                while (count--)
                        /* Unwrap and destuff one byte */
-                       async_unwrap_char(dev->netdev, &dev->stats, 
+                       async_unwrap_char(dev->netdev, &dev->netdev->stats,
                                          &dev->rx_buff, *cp++);
        } else {
                while (count--) {
@@ -279,31 +576,26 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
 
        return 0;
 }
+EXPORT_SYMBOL(sirdev_receive);
 
 /**********************************************************************/
 
 /* callbacks from network layer */
 
-static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
-{
-       struct sir_dev *dev = ndev->priv;
-
-       return (dev) ? &dev->stats : NULL;
-}
-
-static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
+                                         struct net_device *ndev)
 {
-       struct sir_dev *dev = ndev->priv;
+       struct sir_dev *dev = netdev_priv(ndev);
        unsigned long flags;
        int actual = 0;
        int err;
        s32 speed;
 
-       IRDA_ASSERT(dev != NULL, return 0;);
+       IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
 
        netif_stop_queue(ndev);
 
-       IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len);
+       IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
 
        speed = irda_get_next_speed(skb);
        if ((speed != dev->speed) && (speed != -1)) {
@@ -316,7 +608,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
                                 * stopped so the network layer will retry after the
                                 * fsm completes and wakes the queue.
                                 */
-                                return 1;
+                                return NETDEV_TX_BUSY;
                        }
                        else if (unlikely(err)) {
                                /* other fatal error - forget the speed change and
@@ -330,7 +622,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
                         */
 
                        dev_kfree_skb_any(skb);
-                       return 0;
+                       return NETDEV_TX_OK;
                } else
                        dev->new_speed = speed;
        }
@@ -340,7 +632,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        /* Check problems */
        if(spin_is_locked(&dev->tx_lock)) {
-               IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__);
+               IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
        }
 
        /* serialize with write completion */
@@ -356,7 +648,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
         */
        atomic_set(&dev->enable_rx, 0);
        if (unlikely(sirdev_is_receiving(dev)))
-               dev->stats.collisions++;
+               dev->netdev->stats.collisions++;
 
        actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 
@@ -369,15 +661,15 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
        else if (unlikely(actual < 0)) {
                /* could be dropped later when we have tx_timeout to recover */
                IRDA_ERROR("%s: drv->do_write failed (%d)\n",
-                          __FUNCTION__, actual);
+                          __func__, actual);
                dev_kfree_skb_any(skb);
-               dev->stats.tx_errors++;               
-               dev->stats.tx_dropped++;                      
+               dev->netdev->stats.tx_errors++;
+               dev->netdev->stats.tx_dropped++;
                netif_wake_queue(ndev);
        }
        spin_unlock_irqrestore(&dev->tx_lock, flags);
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 /* called from network layer with rtnl hold */
@@ -385,12 +677,12 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
 static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 {
        struct if_irda_req *irq = (struct if_irda_req *) rq;
-       struct sir_dev *dev = ndev->priv;
+       struct sir_dev *dev = netdev_priv(ndev);
        int ret = 0;
 
        IRDA_ASSERT(dev != NULL, return -1;);
 
-       IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd);
+       IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
        
        switch (cmd) {
        case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -462,7 +754,8 @@ static int sirdev_alloc_buffers(struct sir_dev *dev)
        dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; 
 
        /* Bootstrap ZeroCopy Rx */
-       dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL);
+       dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
+                                             GFP_KERNEL);
        if (dev->rx_buff.skb == NULL)
                return -ENOMEM;
        skb_reserve(dev->rx_buff.skb, 1);
@@ -488,17 +781,15 @@ static int sirdev_alloc_buffers(struct sir_dev *dev)
 
 static void sirdev_free_buffers(struct sir_dev *dev)
 {
-       if (dev->rx_buff.skb)
-               kfree_skb(dev->rx_buff.skb);
-       if (dev->tx_buff.head)
-               kfree(dev->tx_buff.head);
+       kfree_skb(dev->rx_buff.skb);
+       kfree(dev->tx_buff.head);
        dev->rx_buff.head = dev->tx_buff.head = NULL;
        dev->rx_buff.skb = NULL;
 }
 
 static int sirdev_open(struct net_device *ndev)
 {
-       struct sir_dev *dev = ndev->priv;
+       struct sir_dev *dev = netdev_priv(ndev);
        const struct sir_driver *drv = dev->drv;
 
        if (!drv)
@@ -508,7 +799,7 @@ static int sirdev_open(struct net_device *ndev)
        if (!try_module_get(drv->owner))
                return -ESTALE;
 
-       IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+       IRDA_DEBUG(2, "%s()\n", __func__);
 
        if (sirdev_alloc_buffers(dev))
                goto errout_dec;
@@ -526,7 +817,7 @@ static int sirdev_open(struct net_device *ndev)
 
        netif_wake_queue(ndev);
 
-       IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed);
+       IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
 
        return 0;
 
@@ -543,10 +834,10 @@ errout_dec:
 
 static int sirdev_close(struct net_device *ndev)
 {
-       struct sir_dev *dev = ndev->priv;
+       struct sir_dev *dev = netdev_priv(ndev);
        const struct sir_driver *drv;
 
-//     IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+//     IRDA_DEBUG(0, "%s\n", __func__);
 
        netif_stop_queue(ndev);
 
@@ -575,6 +866,12 @@ out:
        return 0;
 }
 
+static const struct net_device_ops sirdev_ops = {
+       .ndo_start_xmit = sirdev_hard_xmit,
+       .ndo_open       = sirdev_open,
+       .ndo_stop       = sirdev_close,
+       .ndo_do_ioctl   = sirdev_ioctl,
+};
 /* ----------------------------------------------------------------------------- */
 
 struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
@@ -582,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
        struct net_device *ndev;
        struct sir_dev *dev;
 
-       IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name);
+       IRDA_DEBUG(0, "%s - %s\n", __func__, name);
 
        /* instead of adding tests to protect against drv->do_write==NULL
         * at several places we refuse to create a sir_dev instance for
@@ -596,10 +893,10 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
         */
        ndev = alloc_irdadev(sizeof(*dev));
        if (ndev == NULL) {
-               IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__);
+               IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
                goto out;
        }
-       dev = ndev->priv;
+       dev = netdev_priv(ndev);
 
        irda_init_max_qos_capabilies(&dev->qos);
        dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
@@ -614,24 +911,14 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
        spin_lock_init(&dev->tx_lock);
        init_MUTEX(&dev->fsm.sem);
 
-       INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
-       dev->fsm.rq.pending = 0;
-       init_timer(&dev->fsm.rq.timer);
-
        dev->drv = drv;
        dev->netdev = ndev;
 
-       SET_MODULE_OWNER(ndev);
-
        /* Override the network functions we need to use */
-       ndev->hard_start_xmit = sirdev_hard_xmit;
-       ndev->open = sirdev_open;
-       ndev->stop = sirdev_close;
-       ndev->get_stats = sirdev_get_stats;
-       ndev->do_ioctl = sirdev_ioctl;
+       ndev->netdev_ops = &sirdev_ops;
 
        if (register_netdev(ndev)) {
-               IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+               IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
                goto out_freenetdev;
        }
 
@@ -642,12 +929,13 @@ out_freenetdev:
 out:
        return NULL;
 }
+EXPORT_SYMBOL(sirdev_get_instance);
 
 int sirdev_put_instance(struct sir_dev *dev)
 {
        int err = 0;
 
-       IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+       IRDA_DEBUG(0, "%s\n", __func__);
 
        atomic_set(&dev->enable_rx, 0);
 
@@ -657,7 +945,7 @@ int sirdev_put_instance(struct sir_dev *dev)
        if (dev->dongle_drv)
                err = sirdev_schedule_dongle_close(dev);
        if (err)
-               IRDA_ERROR("%s - error %d\n", __FUNCTION__, err);
+               IRDA_ERROR("%s - error %d\n", __func__, err);
 
        sirdev_close(dev->netdev);
 
@@ -674,4 +962,24 @@ int sirdev_put_instance(struct sir_dev *dev)
 
        return 0;
 }
+EXPORT_SYMBOL(sirdev_put_instance);
+
+static int __init sir_wq_init(void)
+{
+       irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
+       if (!irda_sir_wq)
+               return -ENOMEM;
+       return 0;
+}
+
+static void __exit sir_wq_exit(void)
+{
+       destroy_workqueue(irda_sir_wq);
+}
+
+module_init(sir_wq_init);
+module_exit(sir_wq_exit);
 
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_DESCRIPTION("IrDA SIR core");
+MODULE_LICENSE("GPL");