2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53 MODULE_LICENSE("Dual BSD/GPL");
55 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
56 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
58 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
59 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
64 module_param(lro, bool, 0444);
65 MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
67 static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
68 module_param(lro_max_aggr, int, 0644);
69 MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73 int ipoib_debug_level;
75 module_param_named(debug_level, ipoib_debug_level, int, 0644);
76 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
79 struct ipoib_path_iter {
80 struct net_device *dev;
81 struct ipoib_path path;
84 static const u8 ipv4_bcast_addr[] = {
85 0x00, 0xff, 0xff, 0xff,
86 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
90 struct workqueue_struct *ipoib_workqueue;
92 struct ib_sa_client ipoib_sa_client;
94 static void ipoib_add_one(struct ib_device *device);
95 static void ipoib_remove_one(struct ib_device *device);
97 static struct ib_client ipoib_client = {
100 .remove = ipoib_remove_one
103 int ipoib_open(struct net_device *dev)
105 struct ipoib_dev_priv *priv = netdev_priv(dev);
107 ipoib_dbg(priv, "bringing up interface\n");
109 napi_enable(&priv->napi);
110 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
112 if (ipoib_pkey_dev_delay_open(dev))
115 if (ipoib_ib_dev_open(dev)) {
116 napi_disable(&priv->napi);
120 if (ipoib_ib_dev_up(dev)) {
121 ipoib_ib_dev_stop(dev, 1);
122 napi_disable(&priv->napi);
126 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
127 struct ipoib_dev_priv *cpriv;
129 /* Bring up any child interfaces too */
130 mutex_lock(&priv->vlan_mutex);
131 list_for_each_entry(cpriv, &priv->child_intfs, list) {
134 flags = cpriv->dev->flags;
138 dev_change_flags(cpriv->dev, flags | IFF_UP);
140 mutex_unlock(&priv->vlan_mutex);
143 netif_start_queue(dev);
148 static int ipoib_stop(struct net_device *dev)
150 struct ipoib_dev_priv *priv = netdev_priv(dev);
152 ipoib_dbg(priv, "stopping interface\n");
154 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
155 napi_disable(&priv->napi);
157 netif_stop_queue(dev);
160 * Now flush workqueue to make sure a scheduled task doesn't
161 * bring our internal state back up.
163 flush_workqueue(ipoib_workqueue);
165 ipoib_ib_dev_down(dev, 1);
166 ipoib_ib_dev_stop(dev, 1);
168 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
169 struct ipoib_dev_priv *cpriv;
171 /* Bring down any child interfaces too */
172 mutex_lock(&priv->vlan_mutex);
173 list_for_each_entry(cpriv, &priv->child_intfs, list) {
176 flags = cpriv->dev->flags;
177 if (!(flags & IFF_UP))
180 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
182 mutex_unlock(&priv->vlan_mutex);
188 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
190 struct ipoib_dev_priv *priv = netdev_priv(dev);
192 /* dev->mtu > 2K ==> connected mode */
193 if (ipoib_cm_admin_enabled(dev)) {
194 if (new_mtu > ipoib_cm_max_mtu(dev))
197 if (new_mtu > priv->mcast_mtu)
198 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
205 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
208 priv->admin_mtu = new_mtu;
210 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
215 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
217 struct ipoib_dev_priv *priv = netdev_priv(dev);
218 struct rb_node *n = priv->path_tree.rb_node;
219 struct ipoib_path *path;
223 path = rb_entry(n, struct ipoib_path, rb_node);
225 ret = memcmp(gid, path->pathrec.dgid.raw,
226 sizeof (union ib_gid));
239 static int __path_add(struct net_device *dev, struct ipoib_path *path)
241 struct ipoib_dev_priv *priv = netdev_priv(dev);
242 struct rb_node **n = &priv->path_tree.rb_node;
243 struct rb_node *pn = NULL;
244 struct ipoib_path *tpath;
249 tpath = rb_entry(pn, struct ipoib_path, rb_node);
251 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
252 sizeof (union ib_gid));
261 rb_link_node(&path->rb_node, pn, n);
262 rb_insert_color(&path->rb_node, &priv->path_tree);
264 list_add_tail(&path->list, &priv->path_list);
269 static void path_free(struct net_device *dev, struct ipoib_path *path)
271 struct ipoib_dev_priv *priv = netdev_priv(dev);
272 struct ipoib_neigh *neigh, *tn;
276 while ((skb = __skb_dequeue(&path->queue)))
277 dev_kfree_skb_irq(skb);
279 spin_lock_irqsave(&priv->lock, flags);
281 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
283 * It's safe to call ipoib_put_ah() inside priv->lock
284 * here, because we know that path->ah will always
285 * hold one more reference, so ipoib_put_ah() will
286 * never do more than decrement the ref count.
289 ipoib_put_ah(neigh->ah);
291 ipoib_neigh_free(dev, neigh);
294 spin_unlock_irqrestore(&priv->lock, flags);
297 ipoib_put_ah(path->ah);
302 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
304 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
306 struct ipoib_path_iter *iter;
308 iter = kmalloc(sizeof *iter, GFP_KERNEL);
313 memset(iter->path.pathrec.dgid.raw, 0, 16);
315 if (ipoib_path_iter_next(iter)) {
323 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
325 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
327 struct ipoib_path *path;
330 spin_lock_irq(&priv->lock);
332 n = rb_first(&priv->path_tree);
335 path = rb_entry(n, struct ipoib_path, rb_node);
337 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
338 sizeof (union ib_gid)) < 0) {
347 spin_unlock_irq(&priv->lock);
352 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
353 struct ipoib_path *path)
358 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
360 void ipoib_flush_paths(struct net_device *dev)
362 struct ipoib_dev_priv *priv = netdev_priv(dev);
363 struct ipoib_path *path, *tp;
364 LIST_HEAD(remove_list);
366 spin_lock_irq(&priv->tx_lock);
367 spin_lock(&priv->lock);
369 list_splice_init(&priv->path_list, &remove_list);
371 list_for_each_entry(path, &remove_list, list)
372 rb_erase(&path->rb_node, &priv->path_tree);
374 list_for_each_entry_safe(path, tp, &remove_list, list) {
376 ib_sa_cancel_query(path->query_id, path->query);
377 spin_unlock(&priv->lock);
378 spin_unlock_irq(&priv->tx_lock);
379 wait_for_completion(&path->done);
380 path_free(dev, path);
381 spin_lock_irq(&priv->tx_lock);
382 spin_lock(&priv->lock);
384 spin_unlock(&priv->lock);
385 spin_unlock_irq(&priv->tx_lock);
388 static void path_rec_completion(int status,
389 struct ib_sa_path_rec *pathrec,
392 struct ipoib_path *path = path_ptr;
393 struct net_device *dev = path->dev;
394 struct ipoib_dev_priv *priv = netdev_priv(dev);
395 struct ipoib_ah *ah = NULL;
396 struct ipoib_neigh *neigh, *tn;
397 struct sk_buff_head skqueue;
402 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
403 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
405 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
406 status, IPOIB_GID_ARG(path->pathrec.dgid));
408 skb_queue_head_init(&skqueue);
411 struct ib_ah_attr av;
413 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
414 ah = ipoib_create_ah(dev, priv->pd, &av);
417 spin_lock_irqsave(&priv->lock, flags);
422 path->pathrec = *pathrec;
424 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
425 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
427 while ((skb = __skb_dequeue(&path->queue)))
428 __skb_queue_tail(&skqueue, skb);
430 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
431 kref_get(&path->ah->ref);
432 neigh->ah = path->ah;
433 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
434 sizeof(union ib_gid));
436 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
437 if (!ipoib_cm_get(neigh))
438 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
441 if (!ipoib_cm_get(neigh)) {
442 list_del(&neigh->list);
444 ipoib_put_ah(neigh->ah);
445 ipoib_neigh_free(dev, neigh);
450 while ((skb = __skb_dequeue(&neigh->queue)))
451 __skb_queue_tail(&skqueue, skb);
456 complete(&path->done);
458 spin_unlock_irqrestore(&priv->lock, flags);
460 while ((skb = __skb_dequeue(&skqueue))) {
462 if (dev_queue_xmit(skb))
463 ipoib_warn(priv, "dev_queue_xmit failed "
464 "to requeue packet\n");
468 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
470 struct ipoib_dev_priv *priv = netdev_priv(dev);
471 struct ipoib_path *path;
473 if (!priv->broadcast)
476 path = kzalloc(sizeof *path, GFP_ATOMIC);
482 skb_queue_head_init(&path->queue);
484 INIT_LIST_HEAD(&path->neigh_list);
486 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
487 path->pathrec.sgid = priv->local_gid;
488 path->pathrec.pkey = cpu_to_be16(priv->pkey);
489 path->pathrec.numb_path = 1;
490 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
495 static int path_rec_start(struct net_device *dev,
496 struct ipoib_path *path)
498 struct ipoib_dev_priv *priv = netdev_priv(dev);
500 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
501 IPOIB_GID_ARG(path->pathrec.dgid));
503 init_completion(&path->done);
506 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
508 IB_SA_PATH_REC_DGID |
509 IB_SA_PATH_REC_SGID |
510 IB_SA_PATH_REC_NUMB_PATH |
511 IB_SA_PATH_REC_TRAFFIC_CLASS |
516 if (path->query_id < 0) {
517 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
519 return path->query_id;
525 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
527 struct ipoib_dev_priv *priv = netdev_priv(dev);
528 struct ipoib_path *path;
529 struct ipoib_neigh *neigh;
531 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
533 ++dev->stats.tx_dropped;
534 dev_kfree_skb_any(skb);
539 * We can only be called from ipoib_start_xmit, so we're
540 * inside tx_lock -- no need to save/restore flags.
542 spin_lock(&priv->lock);
544 path = __path_find(dev, skb->dst->neighbour->ha + 4);
546 path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
550 __path_add(dev, path);
553 list_add_tail(&neigh->list, &path->neigh_list);
556 kref_get(&path->ah->ref);
557 neigh->ah = path->ah;
558 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
559 sizeof(union ib_gid));
561 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
562 if (!ipoib_cm_get(neigh))
563 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
564 if (!ipoib_cm_get(neigh)) {
565 list_del(&neigh->list);
567 ipoib_put_ah(neigh->ah);
568 ipoib_neigh_free(dev, neigh);
571 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
572 __skb_queue_tail(&neigh->queue, skb);
574 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
575 skb_queue_len(&neigh->queue));
579 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
583 if (!path->query && path_rec_start(dev, path))
586 __skb_queue_tail(&neigh->queue, skb);
589 spin_unlock(&priv->lock);
593 list_del(&neigh->list);
596 ipoib_neigh_free(dev, neigh);
598 ++dev->stats.tx_dropped;
599 dev_kfree_skb_any(skb);
601 spin_unlock(&priv->lock);
604 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
606 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
608 /* Look up path record for unicasts */
609 if (skb->dst->neighbour->ha[4] != 0xff) {
610 neigh_add_path(skb, dev);
614 /* Add in the P_Key for multicasts */
615 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
616 skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
617 ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
620 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
621 struct ipoib_pseudoheader *phdr)
623 struct ipoib_dev_priv *priv = netdev_priv(dev);
624 struct ipoib_path *path;
627 * We can only be called from ipoib_start_xmit, so we're
628 * inside tx_lock -- no need to save/restore flags.
630 spin_lock(&priv->lock);
632 path = __path_find(dev, phdr->hwaddr + 4);
634 path = path_rec_create(dev, phdr->hwaddr + 4);
636 /* put pseudoheader back on for next time */
637 skb_push(skb, sizeof *phdr);
638 __skb_queue_tail(&path->queue, skb);
640 if (path_rec_start(dev, path)) {
641 spin_unlock(&priv->lock);
642 path_free(dev, path);
645 __path_add(dev, path);
647 ++dev->stats.tx_dropped;
648 dev_kfree_skb_any(skb);
651 spin_unlock(&priv->lock);
656 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
657 be16_to_cpu(path->pathrec.dlid));
659 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
660 } else if ((path->query || !path_rec_start(dev, path)) &&
661 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
662 /* put pseudoheader back on for next time */
663 skb_push(skb, sizeof *phdr);
664 __skb_queue_tail(&path->queue, skb);
666 ++dev->stats.tx_dropped;
667 dev_kfree_skb_any(skb);
670 spin_unlock(&priv->lock);
673 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
675 struct ipoib_dev_priv *priv = netdev_priv(dev);
676 struct ipoib_neigh *neigh;
679 if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
680 return NETDEV_TX_LOCKED;
682 if (likely(skb->dst && skb->dst->neighbour)) {
683 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
684 ipoib_path_lookup(skb, dev);
688 neigh = *to_ipoib_neigh(skb->dst->neighbour);
691 if (unlikely((memcmp(&neigh->dgid.raw,
692 skb->dst->neighbour->ha + 4,
693 sizeof(union ib_gid))) ||
694 (neigh->dev != dev))) {
695 spin_lock(&priv->lock);
697 * It's safe to call ipoib_put_ah() inside
698 * priv->lock here, because we know that
699 * path->ah will always hold one more reference,
700 * so ipoib_put_ah() will never do more than
701 * decrement the ref count.
703 ipoib_put_ah(neigh->ah);
704 list_del(&neigh->list);
705 ipoib_neigh_free(dev, neigh);
706 spin_unlock(&priv->lock);
707 ipoib_path_lookup(skb, dev);
711 if (ipoib_cm_get(neigh)) {
712 if (ipoib_cm_up(neigh)) {
713 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
716 } else if (neigh->ah) {
717 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
721 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
722 spin_lock(&priv->lock);
723 __skb_queue_tail(&neigh->queue, skb);
724 spin_unlock(&priv->lock);
726 ++dev->stats.tx_dropped;
727 dev_kfree_skb_any(skb);
730 struct ipoib_pseudoheader *phdr =
731 (struct ipoib_pseudoheader *) skb->data;
732 skb_pull(skb, sizeof *phdr);
734 if (phdr->hwaddr[4] == 0xff) {
735 /* Add in the P_Key for multicast*/
736 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
737 phdr->hwaddr[9] = priv->pkey & 0xff;
739 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
741 /* unicast GID -- should be ARP or RARP reply */
743 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
744 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
745 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
747 skb->dst ? "neigh" : "dst",
748 be16_to_cpup((__be16 *) skb->data),
749 IPOIB_QPN(phdr->hwaddr),
750 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
751 dev_kfree_skb_any(skb);
752 ++dev->stats.tx_dropped;
756 unicast_arp_send(skb, dev, phdr);
761 spin_unlock_irqrestore(&priv->tx_lock, flags);
766 static void ipoib_timeout(struct net_device *dev)
768 struct ipoib_dev_priv *priv = netdev_priv(dev);
770 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
771 jiffies_to_msecs(jiffies - dev->trans_start));
772 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
773 netif_queue_stopped(dev),
774 priv->tx_head, priv->tx_tail);
775 /* XXX reset QP, etc. */
778 static int ipoib_hard_header(struct sk_buff *skb,
779 struct net_device *dev,
781 const void *daddr, const void *saddr, unsigned len)
783 struct ipoib_header *header;
785 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
787 header->proto = htons(type);
788 header->reserved = 0;
791 * If we don't have a neighbour structure, stuff the
792 * destination address onto the front of the skb so we can
793 * figure out where to send the packet later.
795 if ((!skb->dst || !skb->dst->neighbour) && daddr) {
796 struct ipoib_pseudoheader *phdr =
797 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
798 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
804 static void ipoib_set_mcast_list(struct net_device *dev)
806 struct ipoib_dev_priv *priv = netdev_priv(dev);
808 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
809 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
813 queue_work(ipoib_workqueue, &priv->restart_task);
816 static void ipoib_neigh_cleanup(struct neighbour *n)
818 struct ipoib_neigh *neigh;
819 struct ipoib_dev_priv *priv = netdev_priv(n->dev);
821 struct ipoib_ah *ah = NULL;
823 neigh = *to_ipoib_neigh(n);
825 priv = netdev_priv(neigh->dev);
829 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
831 IPOIB_GID_RAW_ARG(n->ha + 4));
833 spin_lock_irqsave(&priv->lock, flags);
837 list_del(&neigh->list);
838 ipoib_neigh_free(n->dev, neigh);
840 spin_unlock_irqrestore(&priv->lock, flags);
846 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
847 struct net_device *dev)
849 struct ipoib_neigh *neigh;
851 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
855 neigh->neighbour = neighbour;
857 *to_ipoib_neigh(neighbour) = neigh;
858 skb_queue_head_init(&neigh->queue);
859 ipoib_cm_set(neigh, NULL);
864 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
867 *to_ipoib_neigh(neigh->neighbour) = NULL;
868 while ((skb = __skb_dequeue(&neigh->queue))) {
869 ++dev->stats.tx_dropped;
870 dev_kfree_skb_any(skb);
872 if (ipoib_cm_get(neigh))
873 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
877 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
879 parms->neigh_cleanup = ipoib_neigh_cleanup;
884 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
886 struct ipoib_dev_priv *priv = netdev_priv(dev);
888 /* Allocate RX/TX "rings" to hold queued skbs */
889 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
891 if (!priv->rx_ring) {
892 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
893 ca->name, ipoib_recvq_size);
897 priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
898 if (!priv->tx_ring) {
899 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
900 ca->name, ipoib_sendq_size);
901 goto out_rx_ring_cleanup;
903 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
905 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
907 if (ipoib_ib_dev_init(dev, ca, port))
908 goto out_tx_ring_cleanup;
913 vfree(priv->tx_ring);
916 kfree(priv->rx_ring);
922 void ipoib_dev_cleanup(struct net_device *dev)
924 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
926 ipoib_delete_debug_files(dev);
928 /* Delete any child interfaces first */
929 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
930 unregister_netdev(cpriv->dev);
931 ipoib_dev_cleanup(cpriv->dev);
932 free_netdev(cpriv->dev);
935 ipoib_ib_dev_cleanup(dev);
937 kfree(priv->rx_ring);
938 vfree(priv->tx_ring);
940 priv->rx_ring = NULL;
941 priv->tx_ring = NULL;
944 static const struct header_ops ipoib_header_ops = {
945 .create = ipoib_hard_header,
948 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
949 void **tcph, u64 *hdr_flags, void *priv)
954 if (unlikely(skb->protocol != htons(ETH_P_IP)))
958 * In the future we may add an else clause that verifies the
959 * checksum and allows devices which do not calculate checksum
962 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
965 /* Check for non-TCP packet */
966 skb_reset_network_header(skb);
968 if (iph->protocol != IPPROTO_TCP)
971 ip_len = ip_hdrlen(skb);
972 skb_set_transport_header(skb, ip_len);
973 *tcph = tcp_hdr(skb);
975 /* check if IP header and TCP header are complete */
976 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
979 *hdr_flags = LRO_IPV4 | LRO_TCP;
985 static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
987 priv->lro.lro_mgr.max_aggr = lro_max_aggr;
988 priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
989 priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
990 priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
991 priv->lro.lro_mgr.features = LRO_F_NAPI;
992 priv->lro.lro_mgr.dev = priv->dev;
993 priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
996 static void ipoib_setup(struct net_device *dev)
998 struct ipoib_dev_priv *priv = netdev_priv(dev);
1000 dev->open = ipoib_open;
1001 dev->stop = ipoib_stop;
1002 dev->change_mtu = ipoib_change_mtu;
1003 dev->hard_start_xmit = ipoib_start_xmit;
1004 dev->tx_timeout = ipoib_timeout;
1005 dev->header_ops = &ipoib_header_ops;
1006 dev->set_multicast_list = ipoib_set_mcast_list;
1007 dev->neigh_setup = ipoib_neigh_setup_dev;
1009 ipoib_set_ethtool_ops(dev);
1011 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1013 dev->watchdog_timeo = HZ;
1015 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1018 * We add in INFINIBAND_ALEN to allow for the destination
1019 * address "pseudoheader" for skbs without neighbour struct.
1021 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1022 dev->addr_len = INFINIBAND_ALEN;
1023 dev->type = ARPHRD_INFINIBAND;
1024 dev->tx_queue_len = ipoib_sendq_size * 2;
1025 dev->features = (NETIF_F_VLAN_CHALLENGED |
1029 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1031 netif_carrier_off(dev);
1035 ipoib_lro_setup(priv);
1037 spin_lock_init(&priv->lock);
1038 spin_lock_init(&priv->tx_lock);
1040 mutex_init(&priv->mcast_mutex);
1041 mutex_init(&priv->vlan_mutex);
1043 INIT_LIST_HEAD(&priv->path_list);
1044 INIT_LIST_HEAD(&priv->child_intfs);
1045 INIT_LIST_HEAD(&priv->dead_ahs);
1046 INIT_LIST_HEAD(&priv->multicast_list);
1048 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1049 INIT_WORK(&priv->pkey_event_task, ipoib_pkey_event);
1050 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1051 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
1052 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1053 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1056 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1058 struct net_device *dev;
1060 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1065 return netdev_priv(dev);
1068 static ssize_t show_pkey(struct device *dev,
1069 struct device_attribute *attr, char *buf)
1071 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1073 return sprintf(buf, "0x%04x\n", priv->pkey);
1075 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1077 static ssize_t show_umcast(struct device *dev,
1078 struct device_attribute *attr, char *buf)
1080 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1082 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1085 static ssize_t set_umcast(struct device *dev,
1086 struct device_attribute *attr,
1087 const char *buf, size_t count)
1089 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1090 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1092 if (umcast_val > 0) {
1093 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1094 ipoib_warn(priv, "ignoring multicast groups joined directly "
1097 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1101 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1103 int ipoib_add_umcast_attr(struct net_device *dev)
1105 return device_create_file(&dev->dev, &dev_attr_umcast);
1108 static ssize_t create_child(struct device *dev,
1109 struct device_attribute *attr,
1110 const char *buf, size_t count)
1115 if (sscanf(buf, "%i", &pkey) != 1)
1118 if (pkey < 0 || pkey > 0xffff)
1122 * Set the full membership bit, so that we join the right
1123 * broadcast group, etc.
1127 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1129 return ret ? ret : count;
1131 static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
1133 static ssize_t delete_child(struct device *dev,
1134 struct device_attribute *attr,
1135 const char *buf, size_t count)
1140 if (sscanf(buf, "%i", &pkey) != 1)
1143 if (pkey < 0 || pkey > 0xffff)
1146 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1148 return ret ? ret : count;
1151 static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1153 int ipoib_add_pkey_attr(struct net_device *dev)
1155 return device_create_file(&dev->dev, &dev_attr_pkey);
1158 static struct net_device *ipoib_add_port(const char *format,
1159 struct ib_device *hca, u8 port)
1161 struct ipoib_dev_priv *priv;
1162 struct ib_device_attr *device_attr;
1163 struct ib_port_attr attr;
1164 int result = -ENOMEM;
1166 priv = ipoib_intf_alloc(format);
1168 goto alloc_mem_failed;
1170 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1172 if (!ib_query_port(hca, port, &attr))
1173 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1175 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1177 goto device_init_failed;
1180 /* MTU will be reset when mcast join happens */
1181 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1182 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1184 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1186 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1187 hca->name, port, result);
1188 goto device_init_failed;
1191 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1193 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1194 hca->name, sizeof *device_attr);
1195 goto device_init_failed;
1198 result = ib_query_device(hca, device_attr);
1200 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1203 goto device_init_failed;
1205 priv->hca_caps = device_attr->device_cap_flags;
1209 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1210 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1211 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1215 priv->dev->features |= NETIF_F_LRO;
1218 * Set the full membership bit, so that we join the right
1219 * broadcast group, etc.
1221 priv->pkey |= 0x8000;
1223 priv->dev->broadcast[8] = priv->pkey >> 8;
1224 priv->dev->broadcast[9] = priv->pkey & 0xff;
1226 result = ib_query_gid(hca, port, 0, &priv->local_gid);
1228 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1229 hca->name, port, result);
1230 goto device_init_failed;
1232 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1234 result = ipoib_dev_init(priv->dev, hca, port);
1236 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1237 hca->name, port, result);
1238 goto device_init_failed;
1241 INIT_IB_EVENT_HANDLER(&priv->event_handler,
1242 priv->ca, ipoib_event);
1243 result = ib_register_event_handler(&priv->event_handler);
1245 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1246 "port %d (ret = %d)\n",
1247 hca->name, port, result);
1251 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1252 priv->dev->features |= NETIF_F_TSO;
1254 result = register_netdev(priv->dev);
1256 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1257 hca->name, port, result);
1258 goto register_failed;
1261 ipoib_create_debug_files(priv->dev);
1263 if (ipoib_cm_add_mode_attr(priv->dev))
1265 if (ipoib_add_pkey_attr(priv->dev))
1267 if (ipoib_add_umcast_attr(priv->dev))
1269 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1271 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1277 ipoib_delete_debug_files(priv->dev);
1278 unregister_netdev(priv->dev);
1281 ib_unregister_event_handler(&priv->event_handler);
1282 flush_scheduled_work();
1285 ipoib_dev_cleanup(priv->dev);
1288 free_netdev(priv->dev);
1291 return ERR_PTR(result);
1294 static void ipoib_add_one(struct ib_device *device)
1296 struct list_head *dev_list;
1297 struct net_device *dev;
1298 struct ipoib_dev_priv *priv;
1301 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1304 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1308 INIT_LIST_HEAD(dev_list);
1310 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1315 e = device->phys_port_cnt;
1318 for (p = s; p <= e; ++p) {
1319 dev = ipoib_add_port("ib%d", device, p);
1321 priv = netdev_priv(dev);
1322 list_add_tail(&priv->list, dev_list);
1326 ib_set_client_data(device, &ipoib_client, dev_list);
1329 static void ipoib_remove_one(struct ib_device *device)
1331 struct ipoib_dev_priv *priv, *tmp;
1332 struct list_head *dev_list;
1334 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1337 dev_list = ib_get_client_data(device, &ipoib_client);
1339 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1340 ib_unregister_event_handler(&priv->event_handler);
1341 flush_scheduled_work();
1343 unregister_netdev(priv->dev);
1344 ipoib_dev_cleanup(priv->dev);
1345 free_netdev(priv->dev);
1351 static int __init ipoib_init_module(void)
1355 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1356 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1357 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1359 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1360 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1361 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
1362 IPOIB_MIN_QUEUE_SIZE));
1363 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1364 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1368 * When copying small received packets, we only copy from the
1369 * linear data part of the SKB, so we rely on this condition.
1371 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1373 ret = ipoib_register_debugfs();
1378 * We create our own workqueue mainly because we want to be
1379 * able to flush it when devices are being removed. We can't
1380 * use schedule_work()/flush_scheduled_work() because both
1381 * unregister_netdev() and linkwatch_event take the rtnl lock,
1382 * so flush_scheduled_work() can deadlock during device
1385 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1386 if (!ipoib_workqueue) {
1391 ib_sa_register_client(&ipoib_sa_client);
1393 ret = ib_register_client(&ipoib_client);
1400 ib_sa_unregister_client(&ipoib_sa_client);
1401 destroy_workqueue(ipoib_workqueue);
1404 ipoib_unregister_debugfs();
1409 static void __exit ipoib_cleanup_module(void)
1411 ib_unregister_client(&ipoib_client);
1412 ib_sa_unregister_client(&ipoib_sa_client);
1413 ipoib_unregister_debugfs();
1414 destroy_workqueue(ipoib_workqueue);
1417 module_init(ipoib_init_module);
1418 module_exit(ipoib_cleanup_module);