2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
41 #include <rdma/ib_cache.h>
45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46 static int data_debug_level;
48 module_param(data_debug_level, int, 0644);
49 MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
53 static DEFINE_MUTEX(pkey_mutex);
55 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
56 struct ib_pd *pd, struct ib_ah_attr *attr)
60 ah = kmalloc(sizeof *ah, GFP_KERNEL);
68 ah->ah = ib_create_ah(pd, attr);
73 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
78 void ipoib_free_ah(struct kref *kref)
80 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
81 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
85 spin_lock_irqsave(&priv->lock, flags);
86 list_add_tail(&ah->list, &priv->dead_ahs);
87 spin_unlock_irqrestore(&priv->lock, flags);
90 static int ipoib_ib_post_receive(struct net_device *dev, int id)
92 struct ipoib_dev_priv *priv = netdev_priv(dev);
94 struct ib_recv_wr param;
95 struct ib_recv_wr *bad_wr;
98 list.addr = priv->rx_ring[id].mapping;
99 list.length = IPOIB_BUF_SIZE;
100 list.lkey = priv->mr->lkey;
103 param.wr_id = id | IPOIB_OP_RECV;
104 param.sg_list = &list;
107 ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
109 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
110 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
111 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
112 dev_kfree_skb_any(priv->rx_ring[id].skb);
113 priv->rx_ring[id].skb = NULL;
119 static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
121 struct ipoib_dev_priv *priv = netdev_priv(dev);
125 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
130 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
131 * header. So we need 4 more bytes to get to 48 and align the
132 * IP header to a multiple of 16.
136 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
138 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
139 dev_kfree_skb_any(skb);
143 priv->rx_ring[id].skb = skb;
144 priv->rx_ring[id].mapping = addr;
149 static int ipoib_ib_post_receives(struct net_device *dev)
151 struct ipoib_dev_priv *priv = netdev_priv(dev);
154 for (i = 0; i < ipoib_recvq_size; ++i) {
155 if (ipoib_alloc_rx_skb(dev, i)) {
156 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
159 if (ipoib_ib_post_receive(dev, i)) {
160 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
168 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
170 struct ipoib_dev_priv *priv = netdev_priv(dev);
171 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
175 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
178 if (unlikely(wr_id >= ipoib_recvq_size)) {
179 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
180 wr_id, ipoib_recvq_size);
184 skb = priv->rx_ring[wr_id].skb;
185 addr = priv->rx_ring[wr_id].mapping;
187 if (unlikely(wc->status != IB_WC_SUCCESS)) {
188 if (wc->status != IB_WC_WR_FLUSH_ERR)
189 ipoib_warn(priv, "failed recv event "
190 "(status=%d, wrid=%d vend_err %x)\n",
191 wc->status, wr_id, wc->vendor_err);
192 ib_dma_unmap_single(priv->ca, addr,
193 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
194 dev_kfree_skb_any(skb);
195 priv->rx_ring[wr_id].skb = NULL;
200 * Drop packets that this interface sent, ie multicast packets
201 * that the HCA has replicated.
203 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
207 * If we can't allocate a new RX buffer, dump
208 * this packet and reuse the old buffer.
210 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
211 ++dev->stats.rx_dropped;
215 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
216 wc->byte_len, wc->slid);
218 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
220 skb_put(skb, wc->byte_len);
221 skb_pull(skb, IB_GRH_BYTES);
223 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
224 skb_reset_mac_header(skb);
225 skb_pull(skb, IPOIB_ENCAP_LEN);
227 dev->last_rx = jiffies;
228 ++dev->stats.rx_packets;
229 dev->stats.rx_bytes += skb->len;
232 /* XXX get correct PACKET_ type here */
233 skb->pkt_type = PACKET_HOST;
235 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
236 skb->ip_summed = CHECKSUM_UNNECESSARY;
238 netif_receive_skb(skb);
241 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
242 ipoib_warn(priv, "ipoib_ib_post_receive failed "
243 "for buf %d\n", wr_id);
246 static int ipoib_dma_map_tx(struct ib_device *ca,
247 struct ipoib_tx_buf *tx_req)
249 struct sk_buff *skb = tx_req->skb;
250 u64 *mapping = tx_req->mapping;
253 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
255 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
258 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
260 mapping[i + 1] = ib_dma_map_page(ca, frag->page,
261 frag->page_offset, frag->size,
263 if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1])))
269 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
272 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
273 ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE);
278 static void ipoib_dma_unmap_tx(struct ib_device *ca,
279 struct ipoib_tx_buf *tx_req)
281 struct sk_buff *skb = tx_req->skb;
282 u64 *mapping = tx_req->mapping;
285 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
287 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
288 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
289 ib_dma_unmap_page(ca, mapping[i + 1], frag->size,
294 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
296 struct ipoib_dev_priv *priv = netdev_priv(dev);
297 unsigned int wr_id = wc->wr_id;
298 struct ipoib_tx_buf *tx_req;
301 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
304 if (unlikely(wr_id >= ipoib_sendq_size)) {
305 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
306 wr_id, ipoib_sendq_size);
310 tx_req = &priv->tx_ring[wr_id];
312 ipoib_dma_unmap_tx(priv->ca, tx_req);
314 ++dev->stats.tx_packets;
315 dev->stats.tx_bytes += tx_req->skb->len;
317 dev_kfree_skb_any(tx_req->skb);
319 spin_lock_irqsave(&priv->tx_lock, flags);
321 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
322 netif_queue_stopped(dev) &&
323 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
324 netif_wake_queue(dev);
325 spin_unlock_irqrestore(&priv->tx_lock, flags);
327 if (wc->status != IB_WC_SUCCESS &&
328 wc->status != IB_WC_WR_FLUSH_ERR)
329 ipoib_warn(priv, "failed send event "
330 "(status=%d, wrid=%d vend_err %x)\n",
331 wc->status, wr_id, wc->vendor_err);
334 int ipoib_poll(struct napi_struct *napi, int budget)
336 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
337 struct net_device *dev = priv->dev;
345 while (done < budget) {
346 int max = (budget - done);
348 t = min(IPOIB_NUM_WC, max);
349 n = ib_poll_cq(priv->cq, t, priv->ibwc);
351 for (i = 0; i < n; i++) {
352 struct ib_wc *wc = priv->ibwc + i;
354 if (wc->wr_id & IPOIB_OP_RECV) {
356 if (wc->wr_id & IPOIB_OP_CM)
357 ipoib_cm_handle_rx_wc(dev, wc);
359 ipoib_ib_handle_rx_wc(dev, wc);
361 if (wc->wr_id & IPOIB_OP_CM)
362 ipoib_cm_handle_tx_wc(dev, wc);
364 ipoib_ib_handle_tx_wc(dev, wc);
373 netif_rx_complete(dev, napi);
374 if (unlikely(ib_req_notify_cq(priv->cq,
376 IB_CQ_REPORT_MISSED_EVENTS)) &&
377 netif_rx_reschedule(dev, napi))
384 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
386 struct net_device *dev = dev_ptr;
387 struct ipoib_dev_priv *priv = netdev_priv(dev);
389 netif_rx_schedule(dev, &priv->napi);
392 static inline int post_send(struct ipoib_dev_priv *priv,
394 struct ib_ah *address, u32 qpn,
395 u64 *mapping, int headlen,
399 struct ib_send_wr *bad_wr;
402 priv->tx_sge[0].addr = mapping[0];
403 priv->tx_sge[0].length = headlen;
404 for (i = 0; i < nr_frags; ++i) {
405 priv->tx_sge[i + 1].addr = mapping[i + 1];
406 priv->tx_sge[i + 1].length = frags[i].size;
408 priv->tx_wr.num_sge = nr_frags + 1;
409 priv->tx_wr.wr_id = wr_id;
410 priv->tx_wr.wr.ud.remote_qpn = qpn;
411 priv->tx_wr.wr.ud.ah = address;
413 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
416 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
417 struct ipoib_ah *address, u32 qpn)
419 struct ipoib_dev_priv *priv = netdev_priv(dev);
420 struct ipoib_tx_buf *tx_req;
422 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
423 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
424 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
425 ++dev->stats.tx_dropped;
426 ++dev->stats.tx_errors;
427 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
431 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
432 skb->len, address, qpn);
435 * We put the skb into the tx_ring _before_ we call post_send()
436 * because it's entirely possible that the completion handler will
437 * run before we execute anything after the post_send(). That
438 * means we have to make sure everything is properly recorded and
439 * our state is consistent before we call post_send().
441 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
443 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
444 ++dev->stats.tx_errors;
445 dev_kfree_skb_any(skb);
449 if (skb->ip_summed == CHECKSUM_PARTIAL)
450 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
452 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
454 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
456 tx_req->mapping, skb_headlen(skb),
457 skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
458 ipoib_warn(priv, "post_send failed\n");
459 ++dev->stats.tx_errors;
460 ipoib_dma_unmap_tx(priv->ca, tx_req);
461 dev_kfree_skb_any(skb);
463 dev->trans_start = jiffies;
465 address->last_send = priv->tx_head;
468 if (++priv->tx_outstanding == ipoib_sendq_size) {
469 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
470 netif_stop_queue(dev);
475 static void __ipoib_reap_ah(struct net_device *dev)
477 struct ipoib_dev_priv *priv = netdev_priv(dev);
478 struct ipoib_ah *ah, *tah;
479 LIST_HEAD(remove_list);
481 spin_lock_irq(&priv->tx_lock);
482 spin_lock(&priv->lock);
483 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
484 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
486 ib_destroy_ah(ah->ah);
489 spin_unlock(&priv->lock);
490 spin_unlock_irq(&priv->tx_lock);
493 void ipoib_reap_ah(struct work_struct *work)
495 struct ipoib_dev_priv *priv =
496 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
497 struct net_device *dev = priv->dev;
499 __ipoib_reap_ah(dev);
501 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
502 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
503 round_jiffies_relative(HZ));
506 int ipoib_ib_dev_open(struct net_device *dev)
508 struct ipoib_dev_priv *priv = netdev_priv(dev);
511 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
512 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
513 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
516 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
518 ret = ipoib_init_qp(dev);
520 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
524 ret = ipoib_ib_post_receives(dev);
526 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
527 ipoib_ib_dev_stop(dev, 1);
531 ret = ipoib_cm_dev_open(dev);
533 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
534 ipoib_ib_dev_stop(dev, 1);
538 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
539 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
540 round_jiffies_relative(HZ));
542 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
547 static void ipoib_pkey_dev_check_presence(struct net_device *dev)
549 struct ipoib_dev_priv *priv = netdev_priv(dev);
552 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
553 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
555 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
558 int ipoib_ib_dev_up(struct net_device *dev)
560 struct ipoib_dev_priv *priv = netdev_priv(dev);
562 ipoib_pkey_dev_check_presence(dev);
564 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
565 ipoib_dbg(priv, "PKEY is not assigned.\n");
569 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
571 return ipoib_mcast_start_thread(dev);
574 int ipoib_ib_dev_down(struct net_device *dev, int flush)
576 struct ipoib_dev_priv *priv = netdev_priv(dev);
578 ipoib_dbg(priv, "downing ib_dev\n");
580 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
581 netif_carrier_off(dev);
583 /* Shutdown the P_Key thread if still active */
584 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
585 mutex_lock(&pkey_mutex);
586 set_bit(IPOIB_PKEY_STOP, &priv->flags);
587 cancel_delayed_work(&priv->pkey_poll_task);
588 mutex_unlock(&pkey_mutex);
590 flush_workqueue(ipoib_workqueue);
593 ipoib_mcast_stop_thread(dev, flush);
594 ipoib_mcast_dev_flush(dev);
596 ipoib_flush_paths(dev);
601 static int recvs_pending(struct net_device *dev)
603 struct ipoib_dev_priv *priv = netdev_priv(dev);
607 for (i = 0; i < ipoib_recvq_size; ++i)
608 if (priv->rx_ring[i].skb)
614 void ipoib_drain_cq(struct net_device *dev)
616 struct ipoib_dev_priv *priv = netdev_priv(dev);
619 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
620 for (i = 0; i < n; ++i) {
622 * Convert any successful completions to flush
623 * errors to avoid passing packets up the
624 * stack after bringing the device down.
626 if (priv->ibwc[i].status == IB_WC_SUCCESS)
627 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
629 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
630 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
631 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
633 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
635 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
636 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
638 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
641 } while (n == IPOIB_NUM_WC);
644 int ipoib_ib_dev_stop(struct net_device *dev, int flush)
646 struct ipoib_dev_priv *priv = netdev_priv(dev);
647 struct ib_qp_attr qp_attr;
649 struct ipoib_tx_buf *tx_req;
652 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
654 ipoib_cm_dev_stop(dev);
657 * Move our QP to the error state and then reinitialize in
658 * when all work requests have completed or have been flushed.
660 qp_attr.qp_state = IB_QPS_ERR;
661 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
662 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
664 /* Wait for all sends and receives to complete */
667 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
668 if (time_after(jiffies, begin + 5 * HZ)) {
669 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
670 priv->tx_head - priv->tx_tail, recvs_pending(dev));
673 * assume the HW is wedged and just free up
674 * all our pending work requests.
676 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
677 tx_req = &priv->tx_ring[priv->tx_tail &
678 (ipoib_sendq_size - 1)];
679 ipoib_dma_unmap_tx(priv->ca, tx_req);
680 dev_kfree_skb_any(tx_req->skb);
682 --priv->tx_outstanding;
685 for (i = 0; i < ipoib_recvq_size; ++i) {
686 struct ipoib_rx_buf *rx_req;
688 rx_req = &priv->rx_ring[i];
691 ib_dma_unmap_single(priv->ca,
695 dev_kfree_skb_any(rx_req->skb);
707 ipoib_dbg(priv, "All sends and receives done.\n");
710 qp_attr.qp_state = IB_QPS_RESET;
711 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
712 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
714 /* Wait for all AHs to be reaped */
715 set_bit(IPOIB_STOP_REAPER, &priv->flags);
716 cancel_delayed_work(&priv->ah_reap_task);
718 flush_workqueue(ipoib_workqueue);
722 while (!list_empty(&priv->dead_ahs)) {
723 __ipoib_reap_ah(dev);
725 if (time_after(jiffies, begin + HZ)) {
726 ipoib_warn(priv, "timing out; will leak address handles\n");
733 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP);
738 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
740 struct ipoib_dev_priv *priv = netdev_priv(dev);
746 if (ipoib_transport_dev_init(dev, ca)) {
747 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
751 if (dev->flags & IFF_UP) {
752 if (ipoib_ib_dev_open(dev)) {
753 ipoib_transport_dev_cleanup(dev);
761 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
763 struct ipoib_dev_priv *cpriv;
764 struct net_device *dev = priv->dev;
767 mutex_lock(&priv->vlan_mutex);
770 * Flush any child interfaces too -- they might be up even if
771 * the parent is down.
773 list_for_each_entry(cpriv, &priv->child_intfs, list)
774 __ipoib_ib_dev_flush(cpriv, pkey_event);
776 mutex_unlock(&priv->vlan_mutex);
778 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
779 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
783 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
784 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
789 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
790 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
791 ipoib_ib_dev_down(dev, 0);
792 ipoib_ib_dev_stop(dev, 0);
793 ipoib_pkey_dev_delay_open(dev);
796 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
798 /* restart QP only if P_Key index is changed */
799 if (new_index == priv->pkey_index) {
800 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
803 priv->pkey_index = new_index;
806 ipoib_dbg(priv, "flushing\n");
808 ipoib_ib_dev_down(dev, 0);
811 ipoib_ib_dev_stop(dev, 0);
812 ipoib_ib_dev_open(dev);
816 * The device could have been brought down between the start and when
817 * we get here, don't bring it back up if it's not configured up
819 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
820 ipoib_ib_dev_up(dev);
821 ipoib_mcast_restart_task(&priv->restart_task);
825 void ipoib_ib_dev_flush(struct work_struct *work)
827 struct ipoib_dev_priv *priv =
828 container_of(work, struct ipoib_dev_priv, flush_task);
830 ipoib_dbg(priv, "Flushing %s\n", priv->dev->name);
831 __ipoib_ib_dev_flush(priv, 0);
834 void ipoib_pkey_event(struct work_struct *work)
836 struct ipoib_dev_priv *priv =
837 container_of(work, struct ipoib_dev_priv, pkey_event_task);
839 ipoib_dbg(priv, "Flushing %s and restarting its QP\n", priv->dev->name);
840 __ipoib_ib_dev_flush(priv, 1);
843 void ipoib_ib_dev_cleanup(struct net_device *dev)
845 struct ipoib_dev_priv *priv = netdev_priv(dev);
847 ipoib_dbg(priv, "cleaning up ib_dev\n");
849 ipoib_mcast_stop_thread(dev, 1);
850 ipoib_mcast_dev_flush(dev);
852 ipoib_transport_dev_cleanup(dev);
856 * Delayed P_Key Assigment Interim Support
858 * The following is initial implementation of delayed P_Key assigment
859 * mechanism. It is using the same approach implemented for the multicast
860 * group join. The single goal of this implementation is to quickly address
861 * Bug #2507. This implementation will probably be removed when the P_Key
862 * change async notification is available.
865 void ipoib_pkey_poll(struct work_struct *work)
867 struct ipoib_dev_priv *priv =
868 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
869 struct net_device *dev = priv->dev;
871 ipoib_pkey_dev_check_presence(dev);
873 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
876 mutex_lock(&pkey_mutex);
877 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
878 queue_delayed_work(ipoib_workqueue,
879 &priv->pkey_poll_task,
881 mutex_unlock(&pkey_mutex);
885 int ipoib_pkey_dev_delay_open(struct net_device *dev)
887 struct ipoib_dev_priv *priv = netdev_priv(dev);
889 /* Look for the interface pkey value in the IB Port P_Key table and */
890 /* set the interface pkey assigment flag */
891 ipoib_pkey_dev_check_presence(dev);
893 /* P_Key value not assigned yet - start polling */
894 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
895 mutex_lock(&pkey_mutex);
896 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
897 queue_delayed_work(ipoib_workqueue,
898 &priv->pkey_poll_task,
900 mutex_unlock(&pkey_mutex);