IPoIB: Close race in ipoib_flush_paths()
[safe/jmp/linux-2.6] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
35  */
36
37 #include "ipoib.h"
38
39 #include <linux/module.h>
40
41 #include <linux/init.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/kernel.h>
45
46 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
47
48 #include <linux/ip.h>
49 #include <linux/in.h>
50
51 #include <net/dst.h>
52
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56
57 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
58 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
59
60 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
61 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
62 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
63 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
64
65 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
66 int ipoib_debug_level;
67
68 module_param_named(debug_level, ipoib_debug_level, int, 0644);
69 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
70 #endif
71
72 struct ipoib_path_iter {
73         struct net_device *dev;
74         struct ipoib_path  path;
75 };
76
77 static const u8 ipv4_bcast_addr[] = {
78         0x00, 0xff, 0xff, 0xff,
79         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
80         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
81 };
82
83 struct workqueue_struct *ipoib_workqueue;
84
85 static void ipoib_add_one(struct ib_device *device);
86 static void ipoib_remove_one(struct ib_device *device);
87
88 static struct ib_client ipoib_client = {
89         .name   = "ipoib",
90         .add    = ipoib_add_one,
91         .remove = ipoib_remove_one
92 };
93
94 int ipoib_open(struct net_device *dev)
95 {
96         struct ipoib_dev_priv *priv = netdev_priv(dev);
97
98         ipoib_dbg(priv, "bringing up interface\n");
99
100         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
101
102         if (ipoib_pkey_dev_delay_open(dev))
103                 return 0;
104
105         if (ipoib_ib_dev_open(dev))
106                 return -EINVAL;
107
108         if (ipoib_ib_dev_up(dev)) {
109                 ipoib_ib_dev_stop(dev);
110                 return -EINVAL;
111         }
112
113         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
114                 struct ipoib_dev_priv *cpriv;
115
116                 /* Bring up any child interfaces too */
117                 mutex_lock(&priv->vlan_mutex);
118                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
119                         int flags;
120
121                         flags = cpriv->dev->flags;
122                         if (flags & IFF_UP)
123                                 continue;
124
125                         dev_change_flags(cpriv->dev, flags | IFF_UP);
126                 }
127                 mutex_unlock(&priv->vlan_mutex);
128         }
129
130         netif_start_queue(dev);
131
132         return 0;
133 }
134
135 static int ipoib_stop(struct net_device *dev)
136 {
137         struct ipoib_dev_priv *priv = netdev_priv(dev);
138
139         ipoib_dbg(priv, "stopping interface\n");
140
141         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
142
143         netif_stop_queue(dev);
144
145         /*
146          * Now flush workqueue to make sure a scheduled task doesn't
147          * bring our internal state back up.
148          */
149         flush_workqueue(ipoib_workqueue);
150
151         ipoib_ib_dev_down(dev, 1);
152         ipoib_ib_dev_stop(dev);
153
154         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
155                 struct ipoib_dev_priv *cpriv;
156
157                 /* Bring down any child interfaces too */
158                 mutex_lock(&priv->vlan_mutex);
159                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
160                         int flags;
161
162                         flags = cpriv->dev->flags;
163                         if (!(flags & IFF_UP))
164                                 continue;
165
166                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
167                 }
168                 mutex_unlock(&priv->vlan_mutex);
169         }
170
171         return 0;
172 }
173
174 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
175 {
176         struct ipoib_dev_priv *priv = netdev_priv(dev);
177
178         if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
179                 return -EINVAL;
180
181         priv->admin_mtu = new_mtu;
182
183         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
184
185         return 0;
186 }
187
188 static struct ipoib_path *__path_find(struct net_device *dev,
189                                       union ib_gid *gid)
190 {
191         struct ipoib_dev_priv *priv = netdev_priv(dev);
192         struct rb_node *n = priv->path_tree.rb_node;
193         struct ipoib_path *path;
194         int ret;
195
196         while (n) {
197                 path = rb_entry(n, struct ipoib_path, rb_node);
198
199                 ret = memcmp(gid->raw, path->pathrec.dgid.raw,
200                              sizeof (union ib_gid));
201
202                 if (ret < 0)
203                         n = n->rb_left;
204                 else if (ret > 0)
205                         n = n->rb_right;
206                 else
207                         return path;
208         }
209
210         return NULL;
211 }
212
213 static int __path_add(struct net_device *dev, struct ipoib_path *path)
214 {
215         struct ipoib_dev_priv *priv = netdev_priv(dev);
216         struct rb_node **n = &priv->path_tree.rb_node;
217         struct rb_node *pn = NULL;
218         struct ipoib_path *tpath;
219         int ret;
220
221         while (*n) {
222                 pn = *n;
223                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
224
225                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
226                              sizeof (union ib_gid));
227                 if (ret < 0)
228                         n = &pn->rb_left;
229                 else if (ret > 0)
230                         n = &pn->rb_right;
231                 else
232                         return -EEXIST;
233         }
234
235         rb_link_node(&path->rb_node, pn, n);
236         rb_insert_color(&path->rb_node, &priv->path_tree);
237
238         list_add_tail(&path->list, &priv->path_list);
239
240         return 0;
241 }
242
243 static void path_free(struct net_device *dev, struct ipoib_path *path)
244 {
245         struct ipoib_dev_priv *priv = netdev_priv(dev);
246         struct ipoib_neigh *neigh, *tn;
247         struct sk_buff *skb;
248         unsigned long flags;
249
250         while ((skb = __skb_dequeue(&path->queue)))
251                 dev_kfree_skb_irq(skb);
252
253         spin_lock_irqsave(&priv->lock, flags);
254
255         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
256                 /*
257                  * It's safe to call ipoib_put_ah() inside priv->lock
258                  * here, because we know that path->ah will always
259                  * hold one more reference, so ipoib_put_ah() will
260                  * never do more than decrement the ref count.
261                  */
262                 if (neigh->ah)
263                         ipoib_put_ah(neigh->ah);
264
265                 ipoib_neigh_free(neigh);
266         }
267
268         spin_unlock_irqrestore(&priv->lock, flags);
269
270         if (path->ah)
271                 ipoib_put_ah(path->ah);
272
273         kfree(path);
274 }
275
276 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
277
278 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
279 {
280         struct ipoib_path_iter *iter;
281
282         iter = kmalloc(sizeof *iter, GFP_KERNEL);
283         if (!iter)
284                 return NULL;
285
286         iter->dev = dev;
287         memset(iter->path.pathrec.dgid.raw, 0, 16);
288
289         if (ipoib_path_iter_next(iter)) {
290                 kfree(iter);
291                 return NULL;
292         }
293
294         return iter;
295 }
296
297 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
298 {
299         struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
300         struct rb_node *n;
301         struct ipoib_path *path;
302         int ret = 1;
303
304         spin_lock_irq(&priv->lock);
305
306         n = rb_first(&priv->path_tree);
307
308         while (n) {
309                 path = rb_entry(n, struct ipoib_path, rb_node);
310
311                 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
312                            sizeof (union ib_gid)) < 0) {
313                         iter->path = *path;
314                         ret = 0;
315                         break;
316                 }
317
318                 n = rb_next(n);
319         }
320
321         spin_unlock_irq(&priv->lock);
322
323         return ret;
324 }
325
326 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
327                           struct ipoib_path *path)
328 {
329         *path = iter->path;
330 }
331
332 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
333
334 void ipoib_flush_paths(struct net_device *dev)
335 {
336         struct ipoib_dev_priv *priv = netdev_priv(dev);
337         struct ipoib_path *path, *tp;
338         LIST_HEAD(remove_list);
339         unsigned long flags;
340
341         spin_lock_irqsave(&priv->lock, flags);
342
343         list_splice(&priv->path_list, &remove_list);
344         INIT_LIST_HEAD(&priv->path_list);
345
346         list_for_each_entry(path, &remove_list, list)
347                 rb_erase(&path->rb_node, &priv->path_tree);
348
349         list_for_each_entry_safe(path, tp, &remove_list, list) {
350                 if (path->query)
351                         ib_sa_cancel_query(path->query_id, path->query);
352                 spin_unlock_irqrestore(&priv->lock, flags);
353                 wait_for_completion(&path->done);
354                 path_free(dev, path);
355                 spin_lock_irqsave(&priv->lock, flags);
356         }
357         spin_unlock_irqrestore(&priv->lock, flags);
358 }
359
360 static void path_rec_completion(int status,
361                                 struct ib_sa_path_rec *pathrec,
362                                 void *path_ptr)
363 {
364         struct ipoib_path *path = path_ptr;
365         struct net_device *dev = path->dev;
366         struct ipoib_dev_priv *priv = netdev_priv(dev);
367         struct ipoib_ah *ah = NULL;
368         struct ipoib_neigh *neigh;
369         struct sk_buff_head skqueue;
370         struct sk_buff *skb;
371         unsigned long flags;
372
373         if (pathrec)
374                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
375                           be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
376         else
377                 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
378                           status, IPOIB_GID_ARG(path->pathrec.dgid));
379
380         skb_queue_head_init(&skqueue);
381
382         if (!status) {
383                 struct ib_ah_attr av = {
384                         .dlid          = be16_to_cpu(pathrec->dlid),
385                         .sl            = pathrec->sl,
386                         .port_num      = priv->port,
387                         .static_rate   = pathrec->rate
388                 };
389
390                 ah = ipoib_create_ah(dev, priv->pd, &av);
391         }
392
393         spin_lock_irqsave(&priv->lock, flags);
394
395         path->ah = ah;
396
397         if (ah) {
398                 path->pathrec = *pathrec;
399
400                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
401                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
402
403                 while ((skb = __skb_dequeue(&path->queue)))
404                         __skb_queue_tail(&skqueue, skb);
405
406                 list_for_each_entry(neigh, &path->neigh_list, list) {
407                         kref_get(&path->ah->ref);
408                         neigh->ah = path->ah;
409
410                         while ((skb = __skb_dequeue(&neigh->queue)))
411                                 __skb_queue_tail(&skqueue, skb);
412                 }
413         }
414
415         path->query = NULL;
416         complete(&path->done);
417
418         spin_unlock_irqrestore(&priv->lock, flags);
419
420         while ((skb = __skb_dequeue(&skqueue))) {
421                 skb->dev = dev;
422                 if (dev_queue_xmit(skb))
423                         ipoib_warn(priv, "dev_queue_xmit failed "
424                                    "to requeue packet\n");
425         }
426 }
427
428 static struct ipoib_path *path_rec_create(struct net_device *dev,
429                                           union ib_gid *gid)
430 {
431         struct ipoib_dev_priv *priv = netdev_priv(dev);
432         struct ipoib_path *path;
433
434         path = kzalloc(sizeof *path, GFP_ATOMIC);
435         if (!path)
436                 return NULL;
437
438         path->dev = dev;
439
440         skb_queue_head_init(&path->queue);
441
442         INIT_LIST_HEAD(&path->neigh_list);
443
444         memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
445         path->pathrec.sgid      = priv->local_gid;
446         path->pathrec.pkey      = cpu_to_be16(priv->pkey);
447         path->pathrec.numb_path = 1;
448
449         return path;
450 }
451
452 static int path_rec_start(struct net_device *dev,
453                           struct ipoib_path *path)
454 {
455         struct ipoib_dev_priv *priv = netdev_priv(dev);
456
457         ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
458                   IPOIB_GID_ARG(path->pathrec.dgid));
459
460         init_completion(&path->done);
461
462         path->query_id =
463                 ib_sa_path_rec_get(priv->ca, priv->port,
464                                    &path->pathrec,
465                                    IB_SA_PATH_REC_DGID          |
466                                    IB_SA_PATH_REC_SGID          |
467                                    IB_SA_PATH_REC_NUMB_PATH     |
468                                    IB_SA_PATH_REC_PKEY,
469                                    1000, GFP_ATOMIC,
470                                    path_rec_completion,
471                                    path, &path->query);
472         if (path->query_id < 0) {
473                 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
474                 path->query = NULL;
475                 return path->query_id;
476         }
477
478         return 0;
479 }
480
481 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
482 {
483         struct ipoib_dev_priv *priv = netdev_priv(dev);
484         struct ipoib_path *path;
485         struct ipoib_neigh *neigh;
486
487         neigh = ipoib_neigh_alloc(skb->dst->neighbour);
488         if (!neigh) {
489                 ++priv->stats.tx_dropped;
490                 dev_kfree_skb_any(skb);
491                 return;
492         }
493
494         skb_queue_head_init(&neigh->queue);
495
496         /*
497          * We can only be called from ipoib_start_xmit, so we're
498          * inside tx_lock -- no need to save/restore flags.
499          */
500         spin_lock(&priv->lock);
501
502         path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
503         if (!path) {
504                 path = path_rec_create(dev,
505                                        (union ib_gid *) (skb->dst->neighbour->ha + 4));
506                 if (!path)
507                         goto err_path;
508
509                 __path_add(dev, path);
510         }
511
512         list_add_tail(&neigh->list, &path->neigh_list);
513
514         if (path->ah) {
515                 kref_get(&path->ah->ref);
516                 neigh->ah = path->ah;
517
518                 ipoib_send(dev, skb, path->ah,
519                            be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
520         } else {
521                 neigh->ah  = NULL;
522                 __skb_queue_tail(&neigh->queue, skb);
523
524                 if (!path->query && path_rec_start(dev, path))
525                         goto err_list;
526         }
527
528         spin_unlock(&priv->lock);
529         return;
530
531 err_list:
532         list_del(&neigh->list);
533
534 err_path:
535         ipoib_neigh_free(neigh);
536         ++priv->stats.tx_dropped;
537         dev_kfree_skb_any(skb);
538
539         spin_unlock(&priv->lock);
540 }
541
542 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
543 {
544         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
545
546         /* Look up path record for unicasts */
547         if (skb->dst->neighbour->ha[4] != 0xff) {
548                 neigh_add_path(skb, dev);
549                 return;
550         }
551
552         /* Add in the P_Key for multicasts */
553         skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
554         skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
555         ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
556 }
557
558 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
559                              struct ipoib_pseudoheader *phdr)
560 {
561         struct ipoib_dev_priv *priv = netdev_priv(dev);
562         struct ipoib_path *path;
563
564         /*
565          * We can only be called from ipoib_start_xmit, so we're
566          * inside tx_lock -- no need to save/restore flags.
567          */
568         spin_lock(&priv->lock);
569
570         path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
571         if (!path) {
572                 path = path_rec_create(dev,
573                                        (union ib_gid *) (phdr->hwaddr + 4));
574                 if (path) {
575                         /* put pseudoheader back on for next time */
576                         skb_push(skb, sizeof *phdr);
577                         __skb_queue_tail(&path->queue, skb);
578
579                         if (path_rec_start(dev, path)) {
580                                 spin_unlock(&priv->lock);
581                                 path_free(dev, path);
582                                 return;
583                         } else
584                                 __path_add(dev, path);
585                 } else {
586                         ++priv->stats.tx_dropped;
587                         dev_kfree_skb_any(skb);
588                 }
589
590                 spin_unlock(&priv->lock);
591                 return;
592         }
593
594         if (path->ah) {
595                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
596                           be16_to_cpu(path->pathrec.dlid));
597
598                 ipoib_send(dev, skb, path->ah,
599                            be32_to_cpup((__be32 *) phdr->hwaddr));
600         } else if ((path->query || !path_rec_start(dev, path)) &&
601                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
602                 /* put pseudoheader back on for next time */
603                 skb_push(skb, sizeof *phdr);
604                 __skb_queue_tail(&path->queue, skb);
605         } else {
606                 ++priv->stats.tx_dropped;
607                 dev_kfree_skb_any(skb);
608         }
609
610         spin_unlock(&priv->lock);
611 }
612
613 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
614 {
615         struct ipoib_dev_priv *priv = netdev_priv(dev);
616         struct ipoib_neigh *neigh;
617         unsigned long flags;
618
619         if (!spin_trylock_irqsave(&priv->tx_lock, flags))
620                 return NETDEV_TX_LOCKED;
621
622         /*
623          * Check if our queue is stopped.  Since we have the LLTX bit
624          * set, we can't rely on netif_stop_queue() preventing our
625          * xmit function from being called with a full queue.
626          */
627         if (unlikely(netif_queue_stopped(dev))) {
628                 spin_unlock_irqrestore(&priv->tx_lock, flags);
629                 return NETDEV_TX_BUSY;
630         }
631
632         if (skb->dst && skb->dst->neighbour) {
633                 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
634                         ipoib_path_lookup(skb, dev);
635                         goto out;
636                 }
637
638                 neigh = *to_ipoib_neigh(skb->dst->neighbour);
639
640                 if (likely(neigh->ah)) {
641                         ipoib_send(dev, skb, neigh->ah,
642                                    be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
643                         goto out;
644                 }
645
646                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
647                         spin_lock(&priv->lock);
648                         __skb_queue_tail(&neigh->queue, skb);
649                         spin_unlock(&priv->lock);
650                 } else {
651                         ++priv->stats.tx_dropped;
652                         dev_kfree_skb_any(skb);
653                 }
654         } else {
655                 struct ipoib_pseudoheader *phdr =
656                         (struct ipoib_pseudoheader *) skb->data;
657                 skb_pull(skb, sizeof *phdr);
658
659                 if (phdr->hwaddr[4] == 0xff) {
660                         /* Add in the P_Key for multicast*/
661                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
662                         phdr->hwaddr[9] = priv->pkey & 0xff;
663
664                         ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
665                 } else {
666                         /* unicast GID -- should be ARP or RARP reply */
667
668                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
669                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
670                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
671                                            IPOIB_GID_FMT "\n",
672                                            skb->dst ? "neigh" : "dst",
673                                            be16_to_cpup((__be16 *) skb->data),
674                                            be32_to_cpup((__be32 *) phdr->hwaddr),
675                                            IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
676                                 dev_kfree_skb_any(skb);
677                                 ++priv->stats.tx_dropped;
678                                 goto out;
679                         }
680
681                         unicast_arp_send(skb, dev, phdr);
682                 }
683         }
684
685 out:
686         spin_unlock_irqrestore(&priv->tx_lock, flags);
687
688         return NETDEV_TX_OK;
689 }
690
691 static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
692 {
693         struct ipoib_dev_priv *priv = netdev_priv(dev);
694
695         return &priv->stats;
696 }
697
698 static void ipoib_timeout(struct net_device *dev)
699 {
700         struct ipoib_dev_priv *priv = netdev_priv(dev);
701
702         ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
703                    jiffies_to_msecs(jiffies - dev->trans_start));
704         ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
705                    netif_queue_stopped(dev),
706                    priv->tx_head, priv->tx_tail);
707         /* XXX reset QP, etc. */
708 }
709
710 static int ipoib_hard_header(struct sk_buff *skb,
711                              struct net_device *dev,
712                              unsigned short type,
713                              void *daddr, void *saddr, unsigned len)
714 {
715         struct ipoib_header *header;
716
717         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
718
719         header->proto = htons(type);
720         header->reserved = 0;
721
722         /*
723          * If we don't have a neighbour structure, stuff the
724          * destination address onto the front of the skb so we can
725          * figure out where to send the packet later.
726          */
727         if ((!skb->dst || !skb->dst->neighbour) && daddr) {
728                 struct ipoib_pseudoheader *phdr =
729                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
730                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
731         }
732
733         return 0;
734 }
735
736 static void ipoib_set_mcast_list(struct net_device *dev)
737 {
738         struct ipoib_dev_priv *priv = netdev_priv(dev);
739
740         if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
741                 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
742                 return;
743         }
744
745         queue_work(ipoib_workqueue, &priv->restart_task);
746 }
747
748 static void ipoib_neigh_destructor(struct neighbour *n)
749 {
750         struct ipoib_neigh *neigh;
751         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
752         unsigned long flags;
753         struct ipoib_ah *ah = NULL;
754
755         ipoib_dbg(priv,
756                   "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
757                   be32_to_cpup((__be32 *) n->ha),
758                   IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
759
760         spin_lock_irqsave(&priv->lock, flags);
761
762         neigh = *to_ipoib_neigh(n);
763         if (neigh) {
764                 if (neigh->ah)
765                         ah = neigh->ah;
766                 list_del(&neigh->list);
767                 ipoib_neigh_free(neigh);
768         }
769
770         spin_unlock_irqrestore(&priv->lock, flags);
771
772         if (ah)
773                 ipoib_put_ah(ah);
774 }
775
776 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
777 {
778         struct ipoib_neigh *neigh;
779
780         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
781         if (!neigh)
782                 return NULL;
783
784         neigh->neighbour = neighbour;
785         *to_ipoib_neigh(neighbour) = neigh;
786
787         return neigh;
788 }
789
790 void ipoib_neigh_free(struct ipoib_neigh *neigh)
791 {
792         *to_ipoib_neigh(neigh->neighbour) = NULL;
793         kfree(neigh);
794 }
795
796 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
797 {
798         parms->neigh_destructor = ipoib_neigh_destructor;
799
800         return 0;
801 }
802
803 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
804 {
805         struct ipoib_dev_priv *priv = netdev_priv(dev);
806
807         /* Allocate RX/TX "rings" to hold queued skbs */
808         priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
809                                 GFP_KERNEL);
810         if (!priv->rx_ring) {
811                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
812                        ca->name, ipoib_recvq_size);
813                 goto out;
814         }
815
816         priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring,
817                                 GFP_KERNEL);
818         if (!priv->tx_ring) {
819                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
820                        ca->name, ipoib_sendq_size);
821                 goto out_rx_ring_cleanup;
822         }
823
824         /* priv->tx_head & tx_tail are already 0 */
825
826         if (ipoib_ib_dev_init(dev, ca, port))
827                 goto out_tx_ring_cleanup;
828
829         return 0;
830
831 out_tx_ring_cleanup:
832         kfree(priv->tx_ring);
833
834 out_rx_ring_cleanup:
835         kfree(priv->rx_ring);
836
837 out:
838         return -ENOMEM;
839 }
840
841 void ipoib_dev_cleanup(struct net_device *dev)
842 {
843         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
844
845         ipoib_delete_debug_files(dev);
846
847         /* Delete any child interfaces first */
848         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
849                 unregister_netdev(cpriv->dev);
850                 ipoib_dev_cleanup(cpriv->dev);
851                 free_netdev(cpriv->dev);
852         }
853
854         ipoib_ib_dev_cleanup(dev);
855
856         kfree(priv->rx_ring);
857         kfree(priv->tx_ring);
858
859         priv->rx_ring = NULL;
860         priv->tx_ring = NULL;
861 }
862
863 static void ipoib_setup(struct net_device *dev)
864 {
865         struct ipoib_dev_priv *priv = netdev_priv(dev);
866
867         dev->open                = ipoib_open;
868         dev->stop                = ipoib_stop;
869         dev->change_mtu          = ipoib_change_mtu;
870         dev->hard_start_xmit     = ipoib_start_xmit;
871         dev->get_stats           = ipoib_get_stats;
872         dev->tx_timeout          = ipoib_timeout;
873         dev->hard_header         = ipoib_hard_header;
874         dev->set_multicast_list  = ipoib_set_mcast_list;
875         dev->neigh_setup         = ipoib_neigh_setup_dev;
876
877         dev->watchdog_timeo      = HZ;
878
879         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
880
881         /*
882          * We add in INFINIBAND_ALEN to allow for the destination
883          * address "pseudoheader" for skbs without neighbour struct.
884          */
885         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
886         dev->addr_len            = INFINIBAND_ALEN;
887         dev->type                = ARPHRD_INFINIBAND;
888         dev->tx_queue_len        = ipoib_sendq_size * 2;
889         dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
890
891         /* MTU will be reset when mcast join happens */
892         dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
893         priv->mcast_mtu          = priv->admin_mtu = dev->mtu;
894
895         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
896
897         netif_carrier_off(dev);
898
899         SET_MODULE_OWNER(dev);
900
901         priv->dev = dev;
902
903         spin_lock_init(&priv->lock);
904         spin_lock_init(&priv->tx_lock);
905
906         mutex_init(&priv->mcast_mutex);
907         mutex_init(&priv->vlan_mutex);
908
909         INIT_LIST_HEAD(&priv->path_list);
910         INIT_LIST_HEAD(&priv->child_intfs);
911         INIT_LIST_HEAD(&priv->dead_ahs);
912         INIT_LIST_HEAD(&priv->multicast_list);
913
914         INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
915         INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
916         INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
917         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
918         INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
919 }
920
921 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
922 {
923         struct net_device *dev;
924
925         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
926                            ipoib_setup);
927         if (!dev)
928                 return NULL;
929
930         return netdev_priv(dev);
931 }
932
933 static ssize_t show_pkey(struct class_device *cdev, char *buf)
934 {
935         struct ipoib_dev_priv *priv =
936                 netdev_priv(container_of(cdev, struct net_device, class_dev));
937
938         return sprintf(buf, "0x%04x\n", priv->pkey);
939 }
940 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
941
942 static ssize_t create_child(struct class_device *cdev,
943                             const char *buf, size_t count)
944 {
945         int pkey;
946         int ret;
947
948         if (sscanf(buf, "%i", &pkey) != 1)
949                 return -EINVAL;
950
951         if (pkey < 0 || pkey > 0xffff)
952                 return -EINVAL;
953
954         /*
955          * Set the full membership bit, so that we join the right
956          * broadcast group, etc.
957          */
958         pkey |= 0x8000;
959
960         ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
961                              pkey);
962
963         return ret ? ret : count;
964 }
965 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
966
967 static ssize_t delete_child(struct class_device *cdev,
968                             const char *buf, size_t count)
969 {
970         int pkey;
971         int ret;
972
973         if (sscanf(buf, "%i", &pkey) != 1)
974                 return -EINVAL;
975
976         if (pkey < 0 || pkey > 0xffff)
977                 return -EINVAL;
978
979         ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
980                                 pkey);
981
982         return ret ? ret : count;
983
984 }
985 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
986
987 int ipoib_add_pkey_attr(struct net_device *dev)
988 {
989         return class_device_create_file(&dev->class_dev,
990                                         &class_device_attr_pkey);
991 }
992
993 static struct net_device *ipoib_add_port(const char *format,
994                                          struct ib_device *hca, u8 port)
995 {
996         struct ipoib_dev_priv *priv;
997         int result = -ENOMEM;
998
999         priv = ipoib_intf_alloc(format);
1000         if (!priv)
1001                 goto alloc_mem_failed;
1002
1003         SET_NETDEV_DEV(priv->dev, hca->dma_device);
1004
1005         result = ib_query_pkey(hca, port, 0, &priv->pkey);
1006         if (result) {
1007                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1008                        hca->name, port, result);
1009                 goto alloc_mem_failed;
1010         }
1011
1012         /*
1013          * Set the full membership bit, so that we join the right
1014          * broadcast group, etc.
1015          */
1016         priv->pkey |= 0x8000;
1017
1018         priv->dev->broadcast[8] = priv->pkey >> 8;
1019         priv->dev->broadcast[9] = priv->pkey & 0xff;
1020
1021         result = ib_query_gid(hca, port, 0, &priv->local_gid);
1022         if (result) {
1023                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1024                        hca->name, port, result);
1025                 goto alloc_mem_failed;
1026         } else
1027                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1028
1029
1030         result = ipoib_dev_init(priv->dev, hca, port);
1031         if (result < 0) {
1032                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1033                        hca->name, port, result);
1034                 goto device_init_failed;
1035         }
1036
1037         INIT_IB_EVENT_HANDLER(&priv->event_handler,
1038                               priv->ca, ipoib_event);
1039         result = ib_register_event_handler(&priv->event_handler);
1040         if (result < 0) {
1041                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1042                        "port %d (ret = %d)\n",
1043                        hca->name, port, result);
1044                 goto event_failed;
1045         }
1046
1047         result = register_netdev(priv->dev);
1048         if (result) {
1049                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1050                        hca->name, port, result);
1051                 goto register_failed;
1052         }
1053
1054         ipoib_create_debug_files(priv->dev);
1055
1056         if (ipoib_add_pkey_attr(priv->dev))
1057                 goto sysfs_failed;
1058         if (class_device_create_file(&priv->dev->class_dev,
1059                                      &class_device_attr_create_child))
1060                 goto sysfs_failed;
1061         if (class_device_create_file(&priv->dev->class_dev,
1062                                      &class_device_attr_delete_child))
1063                 goto sysfs_failed;
1064
1065         return priv->dev;
1066
1067 sysfs_failed:
1068         ipoib_delete_debug_files(priv->dev);
1069         unregister_netdev(priv->dev);
1070
1071 register_failed:
1072         ib_unregister_event_handler(&priv->event_handler);
1073         flush_scheduled_work();
1074
1075 event_failed:
1076         ipoib_dev_cleanup(priv->dev);
1077
1078 device_init_failed:
1079         free_netdev(priv->dev);
1080
1081 alloc_mem_failed:
1082         return ERR_PTR(result);
1083 }
1084
1085 static void ipoib_add_one(struct ib_device *device)
1086 {
1087         struct list_head *dev_list;
1088         struct net_device *dev;
1089         struct ipoib_dev_priv *priv;
1090         int s, e, p;
1091
1092         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1093         if (!dev_list)
1094                 return;
1095
1096         INIT_LIST_HEAD(dev_list);
1097
1098         if (device->node_type == IB_NODE_SWITCH) {
1099                 s = 0;
1100                 e = 0;
1101         } else {
1102                 s = 1;
1103                 e = device->phys_port_cnt;
1104         }
1105
1106         for (p = s; p <= e; ++p) {
1107                 dev = ipoib_add_port("ib%d", device, p);
1108                 if (!IS_ERR(dev)) {
1109                         priv = netdev_priv(dev);
1110                         list_add_tail(&priv->list, dev_list);
1111                 }
1112         }
1113
1114         ib_set_client_data(device, &ipoib_client, dev_list);
1115 }
1116
1117 static void ipoib_remove_one(struct ib_device *device)
1118 {
1119         struct ipoib_dev_priv *priv, *tmp;
1120         struct list_head *dev_list;
1121
1122         dev_list = ib_get_client_data(device, &ipoib_client);
1123
1124         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1125                 ib_unregister_event_handler(&priv->event_handler);
1126                 flush_scheduled_work();
1127
1128                 unregister_netdev(priv->dev);
1129                 ipoib_dev_cleanup(priv->dev);
1130                 free_netdev(priv->dev);
1131         }
1132
1133         kfree(dev_list);
1134 }
1135
1136 static int __init ipoib_init_module(void)
1137 {
1138         int ret;
1139
1140         ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1141         ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1142         ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1143
1144         ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1145         ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1146         ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
1147
1148         ret = ipoib_register_debugfs();
1149         if (ret)
1150                 return ret;
1151
1152         /*
1153          * We create our own workqueue mainly because we want to be
1154          * able to flush it when devices are being removed.  We can't
1155          * use schedule_work()/flush_scheduled_work() because both
1156          * unregister_netdev() and linkwatch_event take the rtnl lock,
1157          * so flush_scheduled_work() can deadlock during device
1158          * removal.
1159          */
1160         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1161         if (!ipoib_workqueue) {
1162                 ret = -ENOMEM;
1163                 goto err_fs;
1164         }
1165
1166         ret = ib_register_client(&ipoib_client);
1167         if (ret)
1168                 goto err_wq;
1169
1170         return 0;
1171
1172 err_wq:
1173         destroy_workqueue(ipoib_workqueue);
1174
1175 err_fs:
1176         ipoib_unregister_debugfs();
1177
1178         return ret;
1179 }
1180
1181 static void __exit ipoib_cleanup_module(void)
1182 {
1183         ib_unregister_client(&ipoib_client);
1184         ipoib_unregister_debugfs();
1185         destroy_workqueue(ipoib_workqueue);
1186 }
1187
1188 module_init(ipoib_init_module);
1189 module_exit(ipoib_cleanup_module);