2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_cm.h>
34 #include <rdma/ib_cache.h>
37 #include <linux/icmpv6.h>
38 #include <linux/delay.h>
39 #include <linux/vmalloc.h>
43 int ipoib_max_conn_qp = 128;
45 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
46 MODULE_PARM_DESC(max_nonsrq_conn_qp,
47 "Max number of connected-mode QPs per interface "
48 "(applied only if shared receive queue is not available)");
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
51 static int data_debug_level;
53 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
54 MODULE_PARM_DESC(cm_data_debug_level,
55 "Enable data path debug tracing for connected mode if > 0");
58 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
60 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
61 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
62 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
63 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
65 static struct ib_qp_attr ipoib_cm_err_attr = {
66 .qp_state = IB_QPS_ERR
69 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
71 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
72 .wr_id = IPOIB_CM_RX_DRAIN_WRID,
76 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
77 struct ib_cm_event *event);
79 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
80 u64 mapping[IPOIB_CM_RX_SG])
84 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
86 for (i = 0; i < frags; ++i)
87 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
90 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
92 struct ipoib_dev_priv *priv = netdev_priv(dev);
93 struct ib_recv_wr *bad_wr;
96 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
98 for (i = 0; i < priv->cm.num_frags; ++i)
99 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
101 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
103 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
104 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
105 priv->cm.srq_ring[id].mapping);
106 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
107 priv->cm.srq_ring[id].skb = NULL;
113 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
114 struct ipoib_cm_rx *rx, int id)
116 struct ipoib_dev_priv *priv = netdev_priv(dev);
117 struct ib_recv_wr *bad_wr;
120 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
122 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
123 priv->cm.rx_sge[i].addr = rx->rx_ring[id].mapping[i];
125 ret = ib_post_recv(rx->qp, &priv->cm.rx_wr, &bad_wr);
127 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
128 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
129 rx->rx_ring[id].mapping);
130 dev_kfree_skb_any(rx->rx_ring[id].skb);
131 rx->rx_ring[id].skb = NULL;
137 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
138 struct ipoib_cm_rx_buf *rx_ring,
140 u64 mapping[IPOIB_CM_RX_SG])
142 struct ipoib_dev_priv *priv = netdev_priv(dev);
146 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
151 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
152 * IP header to a multiple of 16.
154 skb_reserve(skb, 12);
156 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
158 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
159 dev_kfree_skb_any(skb);
163 for (i = 0; i < frags; i++) {
164 struct page *page = alloc_page(GFP_ATOMIC);
168 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
170 mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
171 0, PAGE_SIZE, DMA_FROM_DEVICE);
172 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
176 rx_ring[id].skb = skb;
181 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
184 ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
186 dev_kfree_skb_any(skb);
190 static void ipoib_cm_free_rx_ring(struct net_device *dev,
191 struct ipoib_cm_rx_buf *rx_ring)
193 struct ipoib_dev_priv *priv = netdev_priv(dev);
196 for (i = 0; i < ipoib_recvq_size; ++i)
197 if (rx_ring[i].skb) {
198 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
200 dev_kfree_skb_any(rx_ring[i].skb);
206 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
208 struct ib_send_wr *bad_wr;
209 struct ipoib_cm_rx *p;
211 /* We only reserved 1 extra slot in CQ for drain WRs, so
212 * make sure we have at most 1 outstanding WR. */
213 if (list_empty(&priv->cm.rx_flush_list) ||
214 !list_empty(&priv->cm.rx_drain_list))
218 * QPs on flush list are error state. This way, a "flush
219 * error" WC will be immediately generated for each WR we post.
221 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
222 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
223 ipoib_warn(priv, "failed to post drain wr\n");
225 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
228 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
230 struct ipoib_cm_rx *p = ctx;
231 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
234 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
237 spin_lock_irqsave(&priv->lock, flags);
238 list_move(&p->list, &priv->cm.rx_flush_list);
239 p->state = IPOIB_CM_RX_FLUSH;
240 ipoib_cm_start_rx_drain(priv);
241 spin_unlock_irqrestore(&priv->lock, flags);
244 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
245 struct ipoib_cm_rx *p)
247 struct ipoib_dev_priv *priv = netdev_priv(dev);
248 struct ib_qp_init_attr attr = {
249 .event_handler = ipoib_cm_rx_event_handler,
250 .send_cq = priv->recv_cq, /* For drain WR */
251 .recv_cq = priv->recv_cq,
253 .cap.max_send_wr = 1, /* For drain WR */
254 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
255 .sq_sig_type = IB_SIGNAL_ALL_WR,
256 .qp_type = IB_QPT_RC,
260 if (!ipoib_cm_has_srq(dev)) {
261 attr.cap.max_recv_wr = ipoib_recvq_size;
262 attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
265 return ib_create_qp(priv->pd, &attr);
268 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
269 struct ib_cm_id *cm_id, struct ib_qp *qp,
272 struct ipoib_dev_priv *priv = netdev_priv(dev);
273 struct ib_qp_attr qp_attr;
274 int qp_attr_mask, ret;
276 qp_attr.qp_state = IB_QPS_INIT;
277 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
279 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
282 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
284 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
287 qp_attr.qp_state = IB_QPS_RTR;
288 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
290 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
293 qp_attr.rq_psn = psn;
294 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
296 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
301 * Current Mellanox HCA firmware won't generate completions
302 * with error for drain WRs unless the QP has been moved to
303 * RTS first. This work-around leaves a window where a QP has
304 * moved to error asynchronously, but this will eventually get
305 * fixed in firmware, so let's not error out if modify QP
308 qp_attr.qp_state = IB_QPS_RTS;
309 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
311 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
314 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
316 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
323 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
324 struct ipoib_cm_rx *rx)
326 struct ipoib_dev_priv *priv = netdev_priv(dev);
330 rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
334 spin_lock_irq(&priv->lock);
336 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
337 spin_unlock_irq(&priv->lock);
338 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
342 ++priv->cm.nonsrq_conn_qp;
344 spin_unlock_irq(&priv->lock);
346 for (i = 0; i < ipoib_recvq_size; ++i) {
347 if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
348 rx->rx_ring[i].mapping)) {
349 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
353 ret = ipoib_cm_post_receive_nonsrq(dev, rx, i);
355 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
356 "failed for buf %d\n", i);
362 rx->recv_count = ipoib_recvq_size;
367 spin_lock_irq(&priv->lock);
368 --priv->cm.nonsrq_conn_qp;
369 spin_unlock_irq(&priv->lock);
372 ipoib_cm_free_rx_ring(dev, rx->rx_ring);
377 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
378 struct ib_qp *qp, struct ib_cm_req_event_param *req,
381 struct ipoib_dev_priv *priv = netdev_priv(dev);
382 struct ipoib_cm_data data = {};
383 struct ib_cm_rep_param rep = {};
385 data.qpn = cpu_to_be32(priv->qp->qp_num);
386 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
388 rep.private_data = &data;
389 rep.private_data_len = sizeof data;
390 rep.flow_control = 0;
391 rep.rnr_retry_count = req->rnr_retry_count;
392 rep.srq = ipoib_cm_has_srq(dev);
393 rep.qp_num = qp->qp_num;
394 rep.starting_psn = psn;
395 return ib_send_cm_rep(cm_id, &rep);
398 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
400 struct net_device *dev = cm_id->context;
401 struct ipoib_dev_priv *priv = netdev_priv(dev);
402 struct ipoib_cm_rx *p;
406 ipoib_dbg(priv, "REQ arrived\n");
407 p = kzalloc(sizeof *p, GFP_KERNEL);
413 p->state = IPOIB_CM_RX_LIVE;
414 p->jiffies = jiffies;
415 INIT_LIST_HEAD(&p->list);
417 p->qp = ipoib_cm_create_rx_qp(dev, p);
419 ret = PTR_ERR(p->qp);
423 psn = random32() & 0xffffff;
424 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
428 if (!ipoib_cm_has_srq(dev)) {
429 ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
434 spin_lock_irq(&priv->lock);
435 queue_delayed_work(ipoib_workqueue,
436 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
437 /* Add this entry to passive ids list head, but do not re-add it
438 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
439 p->jiffies = jiffies;
440 if (p->state == IPOIB_CM_RX_LIVE)
441 list_move(&p->list, &priv->cm.passive_ids);
442 spin_unlock_irq(&priv->lock);
444 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
446 ipoib_warn(priv, "failed to send REP: %d\n", ret);
447 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
448 ipoib_warn(priv, "unable to move qp to error state\n");
453 ib_destroy_qp(p->qp);
459 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
460 struct ib_cm_event *event)
462 struct ipoib_cm_rx *p;
463 struct ipoib_dev_priv *priv;
465 switch (event->event) {
466 case IB_CM_REQ_RECEIVED:
467 return ipoib_cm_req_handler(cm_id, event);
468 case IB_CM_DREQ_RECEIVED:
470 ib_send_cm_drep(cm_id, NULL, 0);
472 case IB_CM_REJ_RECEIVED:
474 priv = netdev_priv(p->dev);
475 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
476 ipoib_warn(priv, "unable to move qp to error state\n");
482 /* Adjust length of skb with fragments to match received data */
483 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
484 unsigned int length, struct sk_buff *toskb)
489 /* put header into skb */
490 size = min(length, hdr_space);
495 num_frags = skb_shinfo(skb)->nr_frags;
496 for (i = 0; i < num_frags; i++) {
497 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
500 /* don't need this page */
501 skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
502 --skb_shinfo(skb)->nr_frags;
504 size = min(length, (unsigned) PAGE_SIZE);
507 skb->data_len += size;
508 skb->truesize += size;
515 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
517 struct ipoib_dev_priv *priv = netdev_priv(dev);
518 struct ipoib_cm_rx_buf *rx_ring;
519 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
520 struct sk_buff *skb, *newskb;
521 struct ipoib_cm_rx *p;
523 u64 mapping[IPOIB_CM_RX_SG];
526 struct sk_buff *small_skb;
528 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
531 if (unlikely(wr_id >= ipoib_recvq_size)) {
532 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
533 spin_lock_irqsave(&priv->lock, flags);
534 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
535 ipoib_cm_start_rx_drain(priv);
536 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
537 spin_unlock_irqrestore(&priv->lock, flags);
539 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
540 wr_id, ipoib_recvq_size);
544 p = wc->qp->qp_context;
546 has_srq = ipoib_cm_has_srq(dev);
547 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
549 skb = rx_ring[wr_id].skb;
551 if (unlikely(wc->status != IB_WC_SUCCESS)) {
552 ipoib_dbg(priv, "cm recv error "
553 "(status=%d, wrid=%d vend_err %x)\n",
554 wc->status, wr_id, wc->vendor_err);
555 ++dev->stats.rx_dropped;
559 if (!--p->recv_count) {
560 spin_lock_irqsave(&priv->lock, flags);
561 list_move(&p->list, &priv->cm.rx_reap_list);
562 spin_unlock_irqrestore(&priv->lock, flags);
563 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
569 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
570 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
571 spin_lock_irqsave(&priv->lock, flags);
572 p->jiffies = jiffies;
573 /* Move this entry to list head, but do not re-add it
574 * if it has been moved out of list. */
575 if (p->state == IPOIB_CM_RX_LIVE)
576 list_move(&p->list, &priv->cm.passive_ids);
577 spin_unlock_irqrestore(&priv->lock, flags);
581 if (wc->byte_len < IPOIB_CM_COPYBREAK) {
582 int dlen = wc->byte_len;
584 small_skb = dev_alloc_skb(dlen + 12);
586 skb_reserve(small_skb, 12);
587 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
588 dlen, DMA_FROM_DEVICE);
589 skb_copy_from_linear_data(skb, small_skb->data, dlen);
590 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
591 dlen, DMA_FROM_DEVICE);
592 skb_put(small_skb, dlen);
598 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
599 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
601 newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
602 if (unlikely(!newskb)) {
604 * If we can't allocate a new RX buffer, dump
605 * this packet and reuse the old buffer.
607 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
608 ++dev->stats.rx_dropped;
612 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
613 memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
615 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
616 wc->byte_len, wc->slid);
618 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
621 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
622 skb_reset_mac_header(skb);
623 skb_pull(skb, IPOIB_ENCAP_LEN);
625 dev->last_rx = jiffies;
626 ++dev->stats.rx_packets;
627 dev->stats.rx_bytes += skb->len;
630 /* XXX get correct PACKET_ type here */
631 skb->pkt_type = PACKET_HOST;
632 netif_receive_skb(skb);
636 if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
637 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
638 "for buf %d\n", wr_id);
640 if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, wr_id))) {
642 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
643 "for buf %d\n", wr_id);
648 static inline int post_send(struct ipoib_dev_priv *priv,
649 struct ipoib_cm_tx *tx,
653 struct ib_send_wr *bad_wr;
655 priv->tx_sge[0].addr = addr;
656 priv->tx_sge[0].length = len;
658 priv->tx_wr.num_sge = 1;
659 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
661 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
664 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
666 struct ipoib_dev_priv *priv = netdev_priv(dev);
667 struct ipoib_tx_buf *tx_req;
670 if (unlikely(skb->len > tx->mtu)) {
671 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
673 ++dev->stats.tx_dropped;
674 ++dev->stats.tx_errors;
675 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
679 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
680 tx->tx_head, skb->len, tx->qp->qp_num);
683 * We put the skb into the tx_ring _before_ we call post_send()
684 * because it's entirely possible that the completion handler will
685 * run before we execute anything after the post_send(). That
686 * means we have to make sure everything is properly recorded and
687 * our state is consistent before we call post_send().
689 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
691 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
692 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
693 ++dev->stats.tx_errors;
694 dev_kfree_skb_any(skb);
698 tx_req->mapping[0] = addr;
700 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
702 ipoib_warn(priv, "post_send failed\n");
703 ++dev->stats.tx_errors;
704 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
705 dev_kfree_skb_any(skb);
707 dev->trans_start = jiffies;
710 if (++priv->tx_outstanding == ipoib_sendq_size) {
711 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
713 netif_stop_queue(dev);
718 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
720 struct ipoib_dev_priv *priv = netdev_priv(dev);
721 struct ipoib_cm_tx *tx = wc->qp->qp_context;
722 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
723 struct ipoib_tx_buf *tx_req;
726 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
729 if (unlikely(wr_id >= ipoib_sendq_size)) {
730 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
731 wr_id, ipoib_sendq_size);
735 tx_req = &tx->tx_ring[wr_id];
737 ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE);
739 /* FIXME: is this right? Shouldn't we only increment on success? */
740 ++dev->stats.tx_packets;
741 dev->stats.tx_bytes += tx_req->skb->len;
743 dev_kfree_skb_any(tx_req->skb);
745 spin_lock_irqsave(&priv->tx_lock, flags);
747 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
748 netif_queue_stopped(dev) &&
749 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
750 netif_wake_queue(dev);
752 if (wc->status != IB_WC_SUCCESS &&
753 wc->status != IB_WC_WR_FLUSH_ERR) {
754 struct ipoib_neigh *neigh;
756 ipoib_dbg(priv, "failed cm send event "
757 "(status=%d, wrid=%d vend_err %x)\n",
758 wc->status, wr_id, wc->vendor_err);
760 spin_lock(&priv->lock);
765 list_del(&neigh->list);
767 ipoib_put_ah(neigh->ah);
768 ipoib_neigh_free(dev, neigh);
773 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
774 list_move(&tx->list, &priv->cm.reap_list);
775 queue_work(ipoib_workqueue, &priv->cm.reap_task);
778 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
780 spin_unlock(&priv->lock);
783 spin_unlock_irqrestore(&priv->tx_lock, flags);
786 int ipoib_cm_dev_open(struct net_device *dev)
788 struct ipoib_dev_priv *priv = netdev_priv(dev);
791 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
794 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
795 if (IS_ERR(priv->cm.id)) {
796 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
797 ret = PTR_ERR(priv->cm.id);
801 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
804 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
805 IPOIB_CM_IETF_ID | priv->qp->qp_num);
812 ib_destroy_cm_id(priv->cm.id);
818 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
820 struct ipoib_dev_priv *priv = netdev_priv(dev);
821 struct ipoib_cm_rx *rx, *n;
824 spin_lock_irq(&priv->lock);
825 list_splice_init(&priv->cm.rx_reap_list, &list);
826 spin_unlock_irq(&priv->lock);
828 list_for_each_entry_safe(rx, n, &list, list) {
829 ib_destroy_cm_id(rx->id);
830 ib_destroy_qp(rx->qp);
831 if (!ipoib_cm_has_srq(dev)) {
832 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
833 spin_lock_irq(&priv->lock);
834 --priv->cm.nonsrq_conn_qp;
835 spin_unlock_irq(&priv->lock);
841 void ipoib_cm_dev_stop(struct net_device *dev)
843 struct ipoib_dev_priv *priv = netdev_priv(dev);
844 struct ipoib_cm_rx *p;
848 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
851 ib_destroy_cm_id(priv->cm.id);
854 spin_lock_irq(&priv->lock);
855 while (!list_empty(&priv->cm.passive_ids)) {
856 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
857 list_move(&p->list, &priv->cm.rx_error_list);
858 p->state = IPOIB_CM_RX_ERROR;
859 spin_unlock_irq(&priv->lock);
860 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
862 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
863 spin_lock_irq(&priv->lock);
866 /* Wait for all RX to be drained */
869 while (!list_empty(&priv->cm.rx_error_list) ||
870 !list_empty(&priv->cm.rx_flush_list) ||
871 !list_empty(&priv->cm.rx_drain_list)) {
872 if (time_after(jiffies, begin + 5 * HZ)) {
873 ipoib_warn(priv, "RX drain timing out\n");
876 * assume the HW is wedged and just free up everything.
878 list_splice_init(&priv->cm.rx_flush_list,
879 &priv->cm.rx_reap_list);
880 list_splice_init(&priv->cm.rx_error_list,
881 &priv->cm.rx_reap_list);
882 list_splice_init(&priv->cm.rx_drain_list,
883 &priv->cm.rx_reap_list);
886 spin_unlock_irq(&priv->lock);
889 spin_lock_irq(&priv->lock);
892 spin_unlock_irq(&priv->lock);
894 ipoib_cm_free_rx_reap_list(dev);
896 cancel_delayed_work(&priv->cm.stale_task);
899 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
901 struct ipoib_cm_tx *p = cm_id->context;
902 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
903 struct ipoib_cm_data *data = event->private_data;
904 struct sk_buff_head skqueue;
905 struct ib_qp_attr qp_attr;
906 int qp_attr_mask, ret;
909 p->mtu = be32_to_cpu(data->mtu);
911 if (p->mtu <= IPOIB_ENCAP_LEN) {
912 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
913 p->mtu, IPOIB_ENCAP_LEN);
917 qp_attr.qp_state = IB_QPS_RTR;
918 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
920 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
924 qp_attr.rq_psn = 0 /* FIXME */;
925 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
927 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
931 qp_attr.qp_state = IB_QPS_RTS;
932 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
934 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
937 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
939 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
943 skb_queue_head_init(&skqueue);
945 spin_lock_irq(&priv->lock);
946 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
948 while ((skb = __skb_dequeue(&p->neigh->queue)))
949 __skb_queue_tail(&skqueue, skb);
950 spin_unlock_irq(&priv->lock);
952 while ((skb = __skb_dequeue(&skqueue))) {
954 if (dev_queue_xmit(skb))
955 ipoib_warn(priv, "dev_queue_xmit failed "
956 "to requeue packet\n");
959 ret = ib_send_cm_rtu(cm_id, NULL, 0);
961 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
967 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
969 struct ipoib_dev_priv *priv = netdev_priv(dev);
970 struct ib_qp_init_attr attr = {
971 .send_cq = priv->recv_cq,
972 .recv_cq = priv->recv_cq,
974 .cap.max_send_wr = ipoib_sendq_size,
975 .cap.max_send_sge = 1,
976 .sq_sig_type = IB_SIGNAL_ALL_WR,
977 .qp_type = IB_QPT_RC,
981 return ib_create_qp(priv->pd, &attr);
984 static int ipoib_cm_send_req(struct net_device *dev,
985 struct ib_cm_id *id, struct ib_qp *qp,
987 struct ib_sa_path_rec *pathrec)
989 struct ipoib_dev_priv *priv = netdev_priv(dev);
990 struct ipoib_cm_data data = {};
991 struct ib_cm_req_param req = {};
993 data.qpn = cpu_to_be32(priv->qp->qp_num);
994 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
996 req.primary_path = pathrec;
997 req.alternate_path = NULL;
998 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
999 req.qp_num = qp->qp_num;
1000 req.qp_type = qp->qp_type;
1001 req.private_data = &data;
1002 req.private_data_len = sizeof data;
1003 req.flow_control = 0;
1005 req.starting_psn = 0; /* FIXME */
1008 * Pick some arbitrary defaults here; we could make these
1009 * module parameters if anyone cared about setting them.
1011 req.responder_resources = 4;
1012 req.remote_cm_response_timeout = 20;
1013 req.local_cm_response_timeout = 20;
1014 req.retry_count = 0; /* RFC draft warns against retries */
1015 req.rnr_retry_count = 0; /* RFC draft warns against retries */
1016 req.max_cm_retries = 15;
1017 req.srq = ipoib_cm_has_srq(dev);
1018 return ib_send_cm_req(id, &req);
1021 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1022 struct ib_cm_id *cm_id, struct ib_qp *qp)
1024 struct ipoib_dev_priv *priv = netdev_priv(dev);
1025 struct ib_qp_attr qp_attr;
1026 int qp_attr_mask, ret;
1027 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1029 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1033 qp_attr.qp_state = IB_QPS_INIT;
1034 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1035 qp_attr.port_num = priv->port;
1036 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1038 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1040 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1046 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1047 struct ib_sa_path_rec *pathrec)
1049 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1052 p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
1054 ipoib_warn(priv, "failed to allocate tx ring\n");
1058 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1060 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1061 if (IS_ERR(p->qp)) {
1062 ret = PTR_ERR(p->qp);
1063 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1067 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1068 if (IS_ERR(p->id)) {
1069 ret = PTR_ERR(p->id);
1070 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1074 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
1076 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1080 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1082 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1086 ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n",
1087 p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn);
1093 ib_destroy_cm_id(p->id);
1096 ib_destroy_qp(p->qp);
1104 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1106 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1107 struct ipoib_tx_buf *tx_req;
1108 unsigned long flags;
1109 unsigned long begin;
1111 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1112 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1115 ib_destroy_cm_id(p->id);
1118 /* Wait for all sends to complete */
1120 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1121 if (time_after(jiffies, begin + 5 * HZ)) {
1122 ipoib_warn(priv, "timing out; %d sends not completed\n",
1123 p->tx_head - p->tx_tail);
1133 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1134 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1135 ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len,
1137 dev_kfree_skb_any(tx_req->skb);
1139 spin_lock_irqsave(&priv->tx_lock, flags);
1140 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1141 netif_queue_stopped(p->dev) &&
1142 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1143 netif_wake_queue(p->dev);
1144 spin_unlock_irqrestore(&priv->tx_lock, flags);
1148 ib_destroy_qp(p->qp);
1154 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1155 struct ib_cm_event *event)
1157 struct ipoib_cm_tx *tx = cm_id->context;
1158 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1159 struct net_device *dev = priv->dev;
1160 struct ipoib_neigh *neigh;
1163 switch (event->event) {
1164 case IB_CM_DREQ_RECEIVED:
1165 ipoib_dbg(priv, "DREQ received.\n");
1166 ib_send_cm_drep(cm_id, NULL, 0);
1168 case IB_CM_REP_RECEIVED:
1169 ipoib_dbg(priv, "REP received.\n");
1170 ret = ipoib_cm_rep_handler(cm_id, event);
1172 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1175 case IB_CM_REQ_ERROR:
1176 case IB_CM_REJ_RECEIVED:
1177 case IB_CM_TIMEWAIT_EXIT:
1178 ipoib_dbg(priv, "CM error %d.\n", event->event);
1179 spin_lock_irq(&priv->tx_lock);
1180 spin_lock(&priv->lock);
1185 list_del(&neigh->list);
1187 ipoib_put_ah(neigh->ah);
1188 ipoib_neigh_free(dev, neigh);
1193 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1194 list_move(&tx->list, &priv->cm.reap_list);
1195 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1198 spin_unlock(&priv->lock);
1199 spin_unlock_irq(&priv->tx_lock);
1208 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1209 struct ipoib_neigh *neigh)
1211 struct ipoib_dev_priv *priv = netdev_priv(dev);
1212 struct ipoib_cm_tx *tx;
1214 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1222 list_add(&tx->list, &priv->cm.start_list);
1223 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1224 queue_work(ipoib_workqueue, &priv->cm.start_task);
1228 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1230 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1231 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1232 list_move(&tx->list, &priv->cm.reap_list);
1233 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1234 ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n",
1235 IPOIB_GID_ARG(tx->neigh->dgid));
1240 static void ipoib_cm_tx_start(struct work_struct *work)
1242 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1244 struct net_device *dev = priv->dev;
1245 struct ipoib_neigh *neigh;
1246 struct ipoib_cm_tx *p;
1247 unsigned long flags;
1250 struct ib_sa_path_rec pathrec;
1253 spin_lock_irqsave(&priv->tx_lock, flags);
1254 spin_lock(&priv->lock);
1255 while (!list_empty(&priv->cm.start_list)) {
1256 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1257 list_del_init(&p->list);
1259 qpn = IPOIB_QPN(neigh->neighbour->ha);
1260 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1261 spin_unlock(&priv->lock);
1262 spin_unlock_irqrestore(&priv->tx_lock, flags);
1263 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1264 spin_lock_irqsave(&priv->tx_lock, flags);
1265 spin_lock(&priv->lock);
1270 list_del(&neigh->list);
1272 ipoib_put_ah(neigh->ah);
1273 ipoib_neigh_free(dev, neigh);
1279 spin_unlock(&priv->lock);
1280 spin_unlock_irqrestore(&priv->tx_lock, flags);
1283 static void ipoib_cm_tx_reap(struct work_struct *work)
1285 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1287 struct ipoib_cm_tx *p;
1289 spin_lock_irq(&priv->tx_lock);
1290 spin_lock(&priv->lock);
1291 while (!list_empty(&priv->cm.reap_list)) {
1292 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1294 spin_unlock(&priv->lock);
1295 spin_unlock_irq(&priv->tx_lock);
1296 ipoib_cm_tx_destroy(p);
1297 spin_lock_irq(&priv->tx_lock);
1298 spin_lock(&priv->lock);
1300 spin_unlock(&priv->lock);
1301 spin_unlock_irq(&priv->tx_lock);
1304 static void ipoib_cm_skb_reap(struct work_struct *work)
1306 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1308 struct sk_buff *skb;
1310 unsigned mtu = priv->mcast_mtu;
1312 spin_lock_irq(&priv->tx_lock);
1313 spin_lock(&priv->lock);
1314 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1315 spin_unlock(&priv->lock);
1316 spin_unlock_irq(&priv->tx_lock);
1317 if (skb->protocol == htons(ETH_P_IP))
1318 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1319 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1320 else if (skb->protocol == htons(ETH_P_IPV6))
1321 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
1323 dev_kfree_skb_any(skb);
1324 spin_lock_irq(&priv->tx_lock);
1325 spin_lock(&priv->lock);
1327 spin_unlock(&priv->lock);
1328 spin_unlock_irq(&priv->tx_lock);
1331 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1334 struct ipoib_dev_priv *priv = netdev_priv(dev);
1335 int e = skb_queue_empty(&priv->cm.skb_queue);
1338 skb->dst->ops->update_pmtu(skb->dst, mtu);
1340 skb_queue_tail(&priv->cm.skb_queue, skb);
1342 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1345 static void ipoib_cm_rx_reap(struct work_struct *work)
1347 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1348 cm.rx_reap_task)->dev);
1351 static void ipoib_cm_stale_task(struct work_struct *work)
1353 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1354 cm.stale_task.work);
1355 struct ipoib_cm_rx *p;
1358 spin_lock_irq(&priv->lock);
1359 while (!list_empty(&priv->cm.passive_ids)) {
1360 /* List is sorted by LRU, start from tail,
1361 * stop when we see a recently used entry */
1362 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1363 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1365 list_move(&p->list, &priv->cm.rx_error_list);
1366 p->state = IPOIB_CM_RX_ERROR;
1367 spin_unlock_irq(&priv->lock);
1368 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1370 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1371 spin_lock_irq(&priv->lock);
1374 if (!list_empty(&priv->cm.passive_ids))
1375 queue_delayed_work(ipoib_workqueue,
1376 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1377 spin_unlock_irq(&priv->lock);
1381 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1384 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1386 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1387 return sprintf(buf, "connected\n");
1389 return sprintf(buf, "datagram\n");
1392 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1393 const char *buf, size_t count)
1395 struct net_device *dev = to_net_dev(d);
1396 struct ipoib_dev_priv *priv = netdev_priv(dev);
1398 /* flush paths if we switch modes so that connections are restarted */
1399 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1400 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1401 ipoib_warn(priv, "enabling connected mode "
1402 "will cause multicast packet drops\n");
1404 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1405 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1407 ipoib_flush_paths(dev);
1411 if (!strcmp(buf, "datagram\n")) {
1412 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1413 dev->mtu = min(priv->mcast_mtu, dev->mtu);
1414 ipoib_flush_paths(dev);
1416 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1417 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1418 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1419 dev->features |= NETIF_F_TSO;
1428 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1430 int ipoib_cm_add_mode_attr(struct net_device *dev)
1432 return device_create_file(&dev->dev, &dev_attr_mode);
1435 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1437 struct ipoib_dev_priv *priv = netdev_priv(dev);
1438 struct ib_srq_init_attr srq_init_attr = {
1440 .max_wr = ipoib_recvq_size,
1445 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1446 if (IS_ERR(priv->cm.srq)) {
1447 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1448 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1449 priv->ca->name, PTR_ERR(priv->cm.srq));
1450 priv->cm.srq = NULL;
1454 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
1456 if (!priv->cm.srq_ring) {
1457 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1458 priv->ca->name, ipoib_recvq_size);
1459 ib_destroy_srq(priv->cm.srq);
1460 priv->cm.srq = NULL;
1464 int ipoib_cm_dev_init(struct net_device *dev)
1466 struct ipoib_dev_priv *priv = netdev_priv(dev);
1468 struct ib_device_attr attr;
1470 INIT_LIST_HEAD(&priv->cm.passive_ids);
1471 INIT_LIST_HEAD(&priv->cm.reap_list);
1472 INIT_LIST_HEAD(&priv->cm.start_list);
1473 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1474 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1475 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1476 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1477 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1478 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1479 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1480 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1481 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1483 skb_queue_head_init(&priv->cm.skb_queue);
1485 ret = ib_query_device(priv->ca, &attr);
1487 printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
1491 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
1493 attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
1494 ipoib_cm_create_srq(dev, attr.max_srq_sge);
1495 if (ipoib_cm_has_srq(dev)) {
1496 priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
1497 priv->cm.num_frags = attr.max_srq_sge;
1498 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1499 priv->cm.max_cm_mtu, priv->cm.num_frags);
1501 priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1502 priv->cm.num_frags = IPOIB_CM_RX_SG;
1505 for (i = 0; i < priv->cm.num_frags; ++i)
1506 priv->cm.rx_sge[i].lkey = priv->mr->lkey;
1508 priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
1509 for (i = 1; i < priv->cm.num_frags; ++i)
1510 priv->cm.rx_sge[i].length = PAGE_SIZE;
1511 priv->cm.rx_wr.next = NULL;
1512 priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
1513 priv->cm.rx_wr.num_sge = priv->cm.num_frags;
1515 if (ipoib_cm_has_srq(dev)) {
1516 for (i = 0; i < ipoib_recvq_size; ++i) {
1517 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1518 priv->cm.num_frags - 1,
1519 priv->cm.srq_ring[i].mapping)) {
1520 ipoib_warn(priv, "failed to allocate "
1521 "receive buffer %d\n", i);
1522 ipoib_cm_dev_cleanup(dev);
1526 if (ipoib_cm_post_receive_srq(dev, i)) {
1527 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1528 "failed for buf %d\n", i);
1529 ipoib_cm_dev_cleanup(dev);
1535 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1539 void ipoib_cm_dev_cleanup(struct net_device *dev)
1541 struct ipoib_dev_priv *priv = netdev_priv(dev);
1547 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1549 ret = ib_destroy_srq(priv->cm.srq);
1551 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1553 priv->cm.srq = NULL;
1554 if (!priv->cm.srq_ring)
1557 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1558 priv->cm.srq_ring = NULL;