IB: Add CQ comp_vector support
[safe/jmp/linux-2.6] / drivers / infiniband / ulp / ipoib / ipoib_cm.c
1 /*
2  * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id$
33  */
34
35 #include <rdma/ib_cm.h>
36 #include <rdma/ib_cache.h>
37 #include <net/dst.h>
38 #include <net/icmp.h>
39 #include <linux/icmpv6.h>
40
41 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
42 static int data_debug_level;
43
44 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
45 MODULE_PARM_DESC(cm_data_debug_level,
46                  "Enable data path debug tracing for connected mode if > 0");
47 #endif
48
49 #include "ipoib.h"
50
51 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
52
53 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
54 #define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
55 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
56 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
57
58 struct ipoib_cm_id {
59         struct ib_cm_id *id;
60         int flags;
61         u32 remote_qpn;
62         u32 remote_mtu;
63 };
64
65 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
66                                struct ib_cm_event *event);
67
68 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
69                                   u64 mapping[IPOIB_CM_RX_SG])
70 {
71         int i;
72
73         ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
74
75         for (i = 0; i < frags; ++i)
76                 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
77 }
78
79 static int ipoib_cm_post_receive(struct net_device *dev, int id)
80 {
81         struct ipoib_dev_priv *priv = netdev_priv(dev);
82         struct ib_recv_wr *bad_wr;
83         int i, ret;
84
85         priv->cm.rx_wr.wr_id = id | IPOIB_CM_OP_SRQ;
86
87         for (i = 0; i < IPOIB_CM_RX_SG; ++i)
88                 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
89
90         ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
91         if (unlikely(ret)) {
92                 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
93                 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
94                                       priv->cm.srq_ring[id].mapping);
95                 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
96                 priv->cm.srq_ring[id].skb = NULL;
97         }
98
99         return ret;
100 }
101
102 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags,
103                                              u64 mapping[IPOIB_CM_RX_SG])
104 {
105         struct ipoib_dev_priv *priv = netdev_priv(dev);
106         struct sk_buff *skb;
107         int i;
108
109         skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
110         if (unlikely(!skb))
111                 return NULL;
112
113         /*
114          * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
115          * IP header to a multiple of 16.
116          */
117         skb_reserve(skb, 12);
118
119         mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
120                                        DMA_FROM_DEVICE);
121         if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
122                 dev_kfree_skb_any(skb);
123                 return NULL;
124         }
125
126         for (i = 0; i < frags; i++) {
127                 struct page *page = alloc_page(GFP_ATOMIC);
128
129                 if (!page)
130                         goto partial_error;
131                 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
132
133                 mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
134                                                  0, PAGE_SIZE, DMA_FROM_DEVICE);
135                 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
136                         goto partial_error;
137         }
138
139         priv->cm.srq_ring[id].skb = skb;
140         return skb;
141
142 partial_error:
143
144         ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
145
146         for (; i >= 0; --i)
147                 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
148
149         dev_kfree_skb_any(skb);
150         return NULL;
151 }
152
153 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
154                                            struct ipoib_cm_rx *p)
155 {
156         struct ipoib_dev_priv *priv = netdev_priv(dev);
157         struct ib_qp_init_attr attr = {
158                 .send_cq = priv->cq, /* does not matter, we never send anything */
159                 .recv_cq = priv->cq,
160                 .srq = priv->cm.srq,
161                 .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */
162                 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
163                 .sq_sig_type = IB_SIGNAL_ALL_WR,
164                 .qp_type = IB_QPT_RC,
165                 .qp_context = p,
166         };
167         return ib_create_qp(priv->pd, &attr);
168 }
169
170 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
171                                   struct ib_cm_id *cm_id, struct ib_qp *qp,
172                                   unsigned psn)
173 {
174         struct ipoib_dev_priv *priv = netdev_priv(dev);
175         struct ib_qp_attr qp_attr;
176         int qp_attr_mask, ret;
177
178         qp_attr.qp_state = IB_QPS_INIT;
179         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
180         if (ret) {
181                 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
182                 return ret;
183         }
184         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
185         if (ret) {
186                 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
187                 return ret;
188         }
189         qp_attr.qp_state = IB_QPS_RTR;
190         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
191         if (ret) {
192                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
193                 return ret;
194         }
195         qp_attr.rq_psn = psn;
196         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
197         if (ret) {
198                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
199                 return ret;
200         }
201         return 0;
202 }
203
204 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
205                              struct ib_qp *qp, struct ib_cm_req_event_param *req,
206                              unsigned psn)
207 {
208         struct ipoib_dev_priv *priv = netdev_priv(dev);
209         struct ipoib_cm_data data = {};
210         struct ib_cm_rep_param rep = {};
211
212         data.qpn = cpu_to_be32(priv->qp->qp_num);
213         data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
214
215         rep.private_data = &data;
216         rep.private_data_len = sizeof data;
217         rep.flow_control = 0;
218         rep.rnr_retry_count = req->rnr_retry_count;
219         rep.target_ack_delay = 20; /* FIXME */
220         rep.srq = 1;
221         rep.qp_num = qp->qp_num;
222         rep.starting_psn = psn;
223         return ib_send_cm_rep(cm_id, &rep);
224 }
225
226 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
227 {
228         struct net_device *dev = cm_id->context;
229         struct ipoib_dev_priv *priv = netdev_priv(dev);
230         struct ipoib_cm_rx *p;
231         unsigned psn;
232         int ret;
233
234         ipoib_dbg(priv, "REQ arrived\n");
235         p = kzalloc(sizeof *p, GFP_KERNEL);
236         if (!p)
237                 return -ENOMEM;
238         p->dev = dev;
239         p->id = cm_id;
240         p->qp = ipoib_cm_create_rx_qp(dev, p);
241         if (IS_ERR(p->qp)) {
242                 ret = PTR_ERR(p->qp);
243                 goto err_qp;
244         }
245
246         psn = random32() & 0xffffff;
247         ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
248         if (ret)
249                 goto err_modify;
250
251         ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
252         if (ret) {
253                 ipoib_warn(priv, "failed to send REP: %d\n", ret);
254                 goto err_rep;
255         }
256
257         cm_id->context = p;
258         p->jiffies = jiffies;
259         spin_lock_irq(&priv->lock);
260         list_add(&p->list, &priv->cm.passive_ids);
261         spin_unlock_irq(&priv->lock);
262         queue_delayed_work(ipoib_workqueue,
263                            &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
264         return 0;
265
266 err_rep:
267 err_modify:
268         ib_destroy_qp(p->qp);
269 err_qp:
270         kfree(p);
271         return ret;
272 }
273
274 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
275                                struct ib_cm_event *event)
276 {
277         struct ipoib_cm_rx *p;
278         struct ipoib_dev_priv *priv;
279         int ret;
280
281         switch (event->event) {
282         case IB_CM_REQ_RECEIVED:
283                 return ipoib_cm_req_handler(cm_id, event);
284         case IB_CM_DREQ_RECEIVED:
285                 p = cm_id->context;
286                 ib_send_cm_drep(cm_id, NULL, 0);
287                 /* Fall through */
288         case IB_CM_REJ_RECEIVED:
289                 p = cm_id->context;
290                 priv = netdev_priv(p->dev);
291                 spin_lock_irq(&priv->lock);
292                 if (list_empty(&p->list))
293                         ret = 0; /* Connection is going away already. */
294                 else {
295                         list_del_init(&p->list);
296                         ret = -ECONNRESET;
297                 }
298                 spin_unlock_irq(&priv->lock);
299                 if (ret) {
300                         ib_destroy_qp(p->qp);
301                         kfree(p);
302                         return ret;
303                 }
304                 return 0;
305         default:
306                 return 0;
307         }
308 }
309 /* Adjust length of skb with fragments to match received data */
310 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
311                           unsigned int length, struct sk_buff *toskb)
312 {
313         int i, num_frags;
314         unsigned int size;
315
316         /* put header into skb */
317         size = min(length, hdr_space);
318         skb->tail += size;
319         skb->len += size;
320         length -= size;
321
322         num_frags = skb_shinfo(skb)->nr_frags;
323         for (i = 0; i < num_frags; i++) {
324                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
325
326                 if (length == 0) {
327                         /* don't need this page */
328                         skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
329                         --skb_shinfo(skb)->nr_frags;
330                 } else {
331                         size = min(length, (unsigned) PAGE_SIZE);
332
333                         frag->size = size;
334                         skb->data_len += size;
335                         skb->truesize += size;
336                         skb->len += size;
337                         length -= size;
338                 }
339         }
340 }
341
342 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
343 {
344         struct ipoib_dev_priv *priv = netdev_priv(dev);
345         unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ;
346         struct sk_buff *skb, *newskb;
347         struct ipoib_cm_rx *p;
348         unsigned long flags;
349         u64 mapping[IPOIB_CM_RX_SG];
350         int frags;
351
352         ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
353                        wr_id, wc->status);
354
355         if (unlikely(wr_id >= ipoib_recvq_size)) {
356                 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
357                            wr_id, ipoib_recvq_size);
358                 return;
359         }
360
361         skb  = priv->cm.srq_ring[wr_id].skb;
362
363         if (unlikely(wc->status != IB_WC_SUCCESS)) {
364                 ipoib_dbg(priv, "cm recv error "
365                            "(status=%d, wrid=%d vend_err %x)\n",
366                            wc->status, wr_id, wc->vendor_err);
367                 ++priv->stats.rx_dropped;
368                 goto repost;
369         }
370
371         if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) {
372                 p = wc->qp->qp_context;
373                 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
374                         spin_lock_irqsave(&priv->lock, flags);
375                         p->jiffies = jiffies;
376                         /* Move this entry to list head, but do
377                          * not re-add it if it has been removed. */
378                         if (!list_empty(&p->list))
379                                 list_move(&p->list, &priv->cm.passive_ids);
380                         spin_unlock_irqrestore(&priv->lock, flags);
381                         queue_delayed_work(ipoib_workqueue,
382                                            &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
383                 }
384         }
385
386         frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
387                                               (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
388
389         newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
390         if (unlikely(!newskb)) {
391                 /*
392                  * If we can't allocate a new RX buffer, dump
393                  * this packet and reuse the old buffer.
394                  */
395                 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
396                 ++priv->stats.rx_dropped;
397                 goto repost;
398         }
399
400         ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping);
401         memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
402
403         ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
404                        wc->byte_len, wc->slid);
405
406         skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
407
408         skb->protocol = ((struct ipoib_header *) skb->data)->proto;
409         skb_reset_mac_header(skb);
410         skb_pull(skb, IPOIB_ENCAP_LEN);
411
412         dev->last_rx = jiffies;
413         ++priv->stats.rx_packets;
414         priv->stats.rx_bytes += skb->len;
415
416         skb->dev = dev;
417         /* XXX get correct PACKET_ type here */
418         skb->pkt_type = PACKET_HOST;
419         netif_rx_ni(skb);
420
421 repost:
422         if (unlikely(ipoib_cm_post_receive(dev, wr_id)))
423                 ipoib_warn(priv, "ipoib_cm_post_receive failed "
424                            "for buf %d\n", wr_id);
425 }
426
427 static inline int post_send(struct ipoib_dev_priv *priv,
428                             struct ipoib_cm_tx *tx,
429                             unsigned int wr_id,
430                             u64 addr, int len)
431 {
432         struct ib_send_wr *bad_wr;
433
434         priv->tx_sge.addr             = addr;
435         priv->tx_sge.length           = len;
436
437         priv->tx_wr.wr_id             = wr_id;
438
439         return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
440 }
441
442 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
443 {
444         struct ipoib_dev_priv *priv = netdev_priv(dev);
445         struct ipoib_tx_buf *tx_req;
446         u64 addr;
447
448         if (unlikely(skb->len > tx->mtu)) {
449                 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
450                            skb->len, tx->mtu);
451                 ++priv->stats.tx_dropped;
452                 ++priv->stats.tx_errors;
453                 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
454                 return;
455         }
456
457         ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
458                        tx->tx_head, skb->len, tx->qp->qp_num);
459
460         /*
461          * We put the skb into the tx_ring _before_ we call post_send()
462          * because it's entirely possible that the completion handler will
463          * run before we execute anything after the post_send().  That
464          * means we have to make sure everything is properly recorded and
465          * our state is consistent before we call post_send().
466          */
467         tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
468         tx_req->skb = skb;
469         addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
470         if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
471                 ++priv->stats.tx_errors;
472                 dev_kfree_skb_any(skb);
473                 return;
474         }
475
476         tx_req->mapping = addr;
477
478         if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
479                                 addr, skb->len))) {
480                 ipoib_warn(priv, "post_send failed\n");
481                 ++priv->stats.tx_errors;
482                 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
483                 dev_kfree_skb_any(skb);
484         } else {
485                 dev->trans_start = jiffies;
486                 ++tx->tx_head;
487
488                 if (tx->tx_head - tx->tx_tail == ipoib_sendq_size) {
489                         ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
490                                   tx->qp->qp_num);
491                         netif_stop_queue(dev);
492                         set_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags);
493                 }
494         }
495 }
496
497 static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx,
498                                   struct ib_wc *wc)
499 {
500         struct ipoib_dev_priv *priv = netdev_priv(dev);
501         unsigned int wr_id = wc->wr_id;
502         struct ipoib_tx_buf *tx_req;
503         unsigned long flags;
504
505         ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
506                        wr_id, wc->status);
507
508         if (unlikely(wr_id >= ipoib_sendq_size)) {
509                 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
510                            wr_id, ipoib_sendq_size);
511                 return;
512         }
513
514         tx_req = &tx->tx_ring[wr_id];
515
516         ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
517
518         /* FIXME: is this right? Shouldn't we only increment on success? */
519         ++priv->stats.tx_packets;
520         priv->stats.tx_bytes += tx_req->skb->len;
521
522         dev_kfree_skb_any(tx_req->skb);
523
524         spin_lock_irqsave(&priv->tx_lock, flags);
525         ++tx->tx_tail;
526         if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) &&
527             tx->tx_head - tx->tx_tail <= ipoib_sendq_size >> 1) {
528                 clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags);
529                 netif_wake_queue(dev);
530         }
531
532         if (wc->status != IB_WC_SUCCESS &&
533             wc->status != IB_WC_WR_FLUSH_ERR) {
534                 struct ipoib_neigh *neigh;
535
536                 ipoib_dbg(priv, "failed cm send event "
537                            "(status=%d, wrid=%d vend_err %x)\n",
538                            wc->status, wr_id, wc->vendor_err);
539
540                 spin_lock(&priv->lock);
541                 neigh = tx->neigh;
542
543                 if (neigh) {
544                         neigh->cm = NULL;
545                         list_del(&neigh->list);
546                         if (neigh->ah)
547                                 ipoib_put_ah(neigh->ah);
548                         ipoib_neigh_free(dev, neigh);
549
550                         tx->neigh = NULL;
551                 }
552
553                 /* queue would be re-started anyway when TX is destroyed,
554                  * but it makes sense to do it ASAP here. */
555                 if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags))
556                         netif_wake_queue(dev);
557
558                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
559                         list_move(&tx->list, &priv->cm.reap_list);
560                         queue_work(ipoib_workqueue, &priv->cm.reap_task);
561                 }
562
563                 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
564
565                 spin_unlock(&priv->lock);
566         }
567
568         spin_unlock_irqrestore(&priv->tx_lock, flags);
569 }
570
571 static void ipoib_cm_tx_completion(struct ib_cq *cq, void *tx_ptr)
572 {
573         struct ipoib_cm_tx *tx = tx_ptr;
574         int n, i;
575
576         ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
577         do {
578                 n = ib_poll_cq(cq, IPOIB_NUM_WC, tx->ibwc);
579                 for (i = 0; i < n; ++i)
580                         ipoib_cm_handle_tx_wc(tx->dev, tx, tx->ibwc + i);
581         } while (n == IPOIB_NUM_WC);
582 }
583
584 int ipoib_cm_dev_open(struct net_device *dev)
585 {
586         struct ipoib_dev_priv *priv = netdev_priv(dev);
587         int ret;
588
589         if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
590                 return 0;
591
592         priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
593         if (IS_ERR(priv->cm.id)) {
594                 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
595                 ret = PTR_ERR(priv->cm.id);
596                 priv->cm.id = NULL;
597                 return ret;
598         }
599
600         ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
601                            0, NULL);
602         if (ret) {
603                 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
604                        IPOIB_CM_IETF_ID | priv->qp->qp_num);
605                 ib_destroy_cm_id(priv->cm.id);
606                 priv->cm.id = NULL;
607                 return ret;
608         }
609         return 0;
610 }
611
612 void ipoib_cm_dev_stop(struct net_device *dev)
613 {
614         struct ipoib_dev_priv *priv = netdev_priv(dev);
615         struct ipoib_cm_rx *p;
616
617         if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
618                 return;
619
620         ib_destroy_cm_id(priv->cm.id);
621         priv->cm.id = NULL;
622         spin_lock_irq(&priv->lock);
623         while (!list_empty(&priv->cm.passive_ids)) {
624                 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
625                 list_del_init(&p->list);
626                 spin_unlock_irq(&priv->lock);
627                 ib_destroy_cm_id(p->id);
628                 ib_destroy_qp(p->qp);
629                 kfree(p);
630                 spin_lock_irq(&priv->lock);
631         }
632         spin_unlock_irq(&priv->lock);
633
634         cancel_delayed_work(&priv->cm.stale_task);
635 }
636
637 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
638 {
639         struct ipoib_cm_tx *p = cm_id->context;
640         struct ipoib_dev_priv *priv = netdev_priv(p->dev);
641         struct ipoib_cm_data *data = event->private_data;
642         struct sk_buff_head skqueue;
643         struct ib_qp_attr qp_attr;
644         int qp_attr_mask, ret;
645         struct sk_buff *skb;
646
647         p->mtu = be32_to_cpu(data->mtu);
648
649         if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) {
650                 ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n",
651                            p->mtu, priv->dev->mtu);
652                 return -EINVAL;
653         }
654
655         qp_attr.qp_state = IB_QPS_RTR;
656         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
657         if (ret) {
658                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
659                 return ret;
660         }
661
662         qp_attr.rq_psn = 0 /* FIXME */;
663         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
664         if (ret) {
665                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
666                 return ret;
667         }
668
669         qp_attr.qp_state = IB_QPS_RTS;
670         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
671         if (ret) {
672                 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
673                 return ret;
674         }
675         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
676         if (ret) {
677                 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
678                 return ret;
679         }
680
681         skb_queue_head_init(&skqueue);
682
683         spin_lock_irq(&priv->lock);
684         set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
685         if (p->neigh)
686                 while ((skb = __skb_dequeue(&p->neigh->queue)))
687                         __skb_queue_tail(&skqueue, skb);
688         spin_unlock_irq(&priv->lock);
689
690         while ((skb = __skb_dequeue(&skqueue))) {
691                 skb->dev = p->dev;
692                 if (dev_queue_xmit(skb))
693                         ipoib_warn(priv, "dev_queue_xmit failed "
694                                    "to requeue packet\n");
695         }
696
697         ret = ib_send_cm_rtu(cm_id, NULL, 0);
698         if (ret) {
699                 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
700                 return ret;
701         }
702         return 0;
703 }
704
705 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ib_cq *cq)
706 {
707         struct ipoib_dev_priv *priv = netdev_priv(dev);
708         struct ib_qp_init_attr attr = {};
709         attr.recv_cq = priv->cq;
710         attr.srq = priv->cm.srq;
711         attr.cap.max_send_wr = ipoib_sendq_size;
712         attr.cap.max_send_sge = 1;
713         attr.sq_sig_type = IB_SIGNAL_ALL_WR;
714         attr.qp_type = IB_QPT_RC;
715         attr.send_cq = cq;
716         return ib_create_qp(priv->pd, &attr);
717 }
718
719 static int ipoib_cm_send_req(struct net_device *dev,
720                              struct ib_cm_id *id, struct ib_qp *qp,
721                              u32 qpn,
722                              struct ib_sa_path_rec *pathrec)
723 {
724         struct ipoib_dev_priv *priv = netdev_priv(dev);
725         struct ipoib_cm_data data = {};
726         struct ib_cm_req_param req = {};
727
728         data.qpn = cpu_to_be32(priv->qp->qp_num);
729         data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
730
731         req.primary_path              = pathrec;
732         req.alternate_path            = NULL;
733         req.service_id                = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
734         req.qp_num                    = qp->qp_num;
735         req.qp_type                   = qp->qp_type;
736         req.private_data              = &data;
737         req.private_data_len          = sizeof data;
738         req.flow_control              = 0;
739
740         req.starting_psn              = 0; /* FIXME */
741
742         /*
743          * Pick some arbitrary defaults here; we could make these
744          * module parameters if anyone cared about setting them.
745          */
746         req.responder_resources       = 4;
747         req.remote_cm_response_timeout = 20;
748         req.local_cm_response_timeout  = 20;
749         req.retry_count               = 0; /* RFC draft warns against retries */
750         req.rnr_retry_count           = 0; /* RFC draft warns against retries */
751         req.max_cm_retries            = 15;
752         req.srq                       = 1;
753         return ib_send_cm_req(id, &req);
754 }
755
756 static int ipoib_cm_modify_tx_init(struct net_device *dev,
757                                   struct ib_cm_id *cm_id, struct ib_qp *qp)
758 {
759         struct ipoib_dev_priv *priv = netdev_priv(dev);
760         struct ib_qp_attr qp_attr;
761         int qp_attr_mask, ret;
762         ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
763         if (ret) {
764                 ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret);
765                 return ret;
766         }
767
768         qp_attr.qp_state = IB_QPS_INIT;
769         qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
770         qp_attr.port_num = priv->port;
771         qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
772
773         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
774         if (ret) {
775                 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
776                 return ret;
777         }
778         return 0;
779 }
780
781 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
782                             struct ib_sa_path_rec *pathrec)
783 {
784         struct ipoib_dev_priv *priv = netdev_priv(p->dev);
785         int ret;
786
787         p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring,
788                                 GFP_KERNEL);
789         if (!p->tx_ring) {
790                 ipoib_warn(priv, "failed to allocate tx ring\n");
791                 ret = -ENOMEM;
792                 goto err_tx;
793         }
794
795         p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p,
796                              ipoib_sendq_size + 1, 0);
797         if (IS_ERR(p->cq)) {
798                 ret = PTR_ERR(p->cq);
799                 ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret);
800                 goto err_cq;
801         }
802
803         ret = ib_req_notify_cq(p->cq, IB_CQ_NEXT_COMP);
804         if (ret) {
805                 ipoib_warn(priv, "failed to request completion notification: %d\n", ret);
806                 goto err_req_notify;
807         }
808
809         p->qp = ipoib_cm_create_tx_qp(p->dev, p->cq);
810         if (IS_ERR(p->qp)) {
811                 ret = PTR_ERR(p->qp);
812                 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
813                 goto err_qp;
814         }
815
816         p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
817         if (IS_ERR(p->id)) {
818                 ret = PTR_ERR(p->id);
819                 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
820                 goto err_id;
821         }
822
823         ret = ipoib_cm_modify_tx_init(p->dev, p->id,  p->qp);
824         if (ret) {
825                 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
826                 goto err_modify;
827         }
828
829         ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
830         if (ret) {
831                 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
832                 goto err_send_cm;
833         }
834
835         ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n",
836                   p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn);
837
838         return 0;
839
840 err_send_cm:
841 err_modify:
842         ib_destroy_cm_id(p->id);
843 err_id:
844         p->id = NULL;
845         ib_destroy_qp(p->qp);
846 err_req_notify:
847 err_qp:
848         p->qp = NULL;
849         ib_destroy_cq(p->cq);
850 err_cq:
851         p->cq = NULL;
852 err_tx:
853         return ret;
854 }
855
856 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
857 {
858         struct ipoib_dev_priv *priv = netdev_priv(p->dev);
859         struct ipoib_tx_buf *tx_req;
860
861         ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
862                   p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
863
864         if (p->id)
865                 ib_destroy_cm_id(p->id);
866
867         if (p->qp)
868                 ib_destroy_qp(p->qp);
869
870         if (p->cq)
871                 ib_destroy_cq(p->cq);
872
873         if (test_bit(IPOIB_FLAG_NETIF_STOPPED, &p->flags))
874                 netif_wake_queue(p->dev);
875
876         if (p->tx_ring) {
877                 while ((int) p->tx_tail - (int) p->tx_head < 0) {
878                         tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
879                         ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
880                                          DMA_TO_DEVICE);
881                         dev_kfree_skb_any(tx_req->skb);
882                         ++p->tx_tail;
883                 }
884
885                 kfree(p->tx_ring);
886         }
887
888         kfree(p);
889 }
890
891 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
892                                struct ib_cm_event *event)
893 {
894         struct ipoib_cm_tx *tx = cm_id->context;
895         struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
896         struct net_device *dev = priv->dev;
897         struct ipoib_neigh *neigh;
898         int ret;
899
900         switch (event->event) {
901         case IB_CM_DREQ_RECEIVED:
902                 ipoib_dbg(priv, "DREQ received.\n");
903                 ib_send_cm_drep(cm_id, NULL, 0);
904                 break;
905         case IB_CM_REP_RECEIVED:
906                 ipoib_dbg(priv, "REP received.\n");
907                 ret = ipoib_cm_rep_handler(cm_id, event);
908                 if (ret)
909                         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
910                                        NULL, 0, NULL, 0);
911                 break;
912         case IB_CM_REQ_ERROR:
913         case IB_CM_REJ_RECEIVED:
914         case IB_CM_TIMEWAIT_EXIT:
915                 ipoib_dbg(priv, "CM error %d.\n", event->event);
916                 spin_lock_irq(&priv->tx_lock);
917                 spin_lock(&priv->lock);
918                 neigh = tx->neigh;
919
920                 if (neigh) {
921                         neigh->cm = NULL;
922                         list_del(&neigh->list);
923                         if (neigh->ah)
924                                 ipoib_put_ah(neigh->ah);
925                         ipoib_neigh_free(dev, neigh);
926
927                         tx->neigh = NULL;
928                 }
929
930                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
931                         list_move(&tx->list, &priv->cm.reap_list);
932                         queue_work(ipoib_workqueue, &priv->cm.reap_task);
933                 }
934
935                 spin_unlock(&priv->lock);
936                 spin_unlock_irq(&priv->tx_lock);
937                 break;
938         default:
939                 break;
940         }
941
942         return 0;
943 }
944
945 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
946                                        struct ipoib_neigh *neigh)
947 {
948         struct ipoib_dev_priv *priv = netdev_priv(dev);
949         struct ipoib_cm_tx *tx;
950
951         tx = kzalloc(sizeof *tx, GFP_ATOMIC);
952         if (!tx)
953                 return NULL;
954
955         neigh->cm = tx;
956         tx->neigh = neigh;
957         tx->path = path;
958         tx->dev = dev;
959         list_add(&tx->list, &priv->cm.start_list);
960         set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
961         queue_work(ipoib_workqueue, &priv->cm.start_task);
962         return tx;
963 }
964
965 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
966 {
967         struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
968         if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
969                 list_move(&tx->list, &priv->cm.reap_list);
970                 queue_work(ipoib_workqueue, &priv->cm.reap_task);
971                 ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n",
972                           IPOIB_GID_ARG(tx->neigh->dgid));
973                 tx->neigh = NULL;
974         }
975 }
976
977 static void ipoib_cm_tx_start(struct work_struct *work)
978 {
979         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
980                                                    cm.start_task);
981         struct net_device *dev = priv->dev;
982         struct ipoib_neigh *neigh;
983         struct ipoib_cm_tx *p;
984         unsigned long flags;
985         int ret;
986
987         struct ib_sa_path_rec pathrec;
988         u32 qpn;
989
990         spin_lock_irqsave(&priv->tx_lock, flags);
991         spin_lock(&priv->lock);
992         while (!list_empty(&priv->cm.start_list)) {
993                 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
994                 list_del_init(&p->list);
995                 neigh = p->neigh;
996                 qpn = IPOIB_QPN(neigh->neighbour->ha);
997                 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
998                 spin_unlock(&priv->lock);
999                 spin_unlock_irqrestore(&priv->tx_lock, flags);
1000                 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1001                 spin_lock_irqsave(&priv->tx_lock, flags);
1002                 spin_lock(&priv->lock);
1003                 if (ret) {
1004                         neigh = p->neigh;
1005                         if (neigh) {
1006                                 neigh->cm = NULL;
1007                                 list_del(&neigh->list);
1008                                 if (neigh->ah)
1009                                         ipoib_put_ah(neigh->ah);
1010                                 ipoib_neigh_free(dev, neigh);
1011                         }
1012                         list_del(&p->list);
1013                         kfree(p);
1014                 }
1015         }
1016         spin_unlock(&priv->lock);
1017         spin_unlock_irqrestore(&priv->tx_lock, flags);
1018 }
1019
1020 static void ipoib_cm_tx_reap(struct work_struct *work)
1021 {
1022         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1023                                                    cm.reap_task);
1024         struct ipoib_cm_tx *p;
1025
1026         spin_lock_irq(&priv->tx_lock);
1027         spin_lock(&priv->lock);
1028         while (!list_empty(&priv->cm.reap_list)) {
1029                 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1030                 list_del(&p->list);
1031                 spin_unlock(&priv->lock);
1032                 spin_unlock_irq(&priv->tx_lock);
1033                 ipoib_cm_tx_destroy(p);
1034                 spin_lock_irq(&priv->tx_lock);
1035                 spin_lock(&priv->lock);
1036         }
1037         spin_unlock(&priv->lock);
1038         spin_unlock_irq(&priv->tx_lock);
1039 }
1040
1041 static void ipoib_cm_skb_reap(struct work_struct *work)
1042 {
1043         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1044                                                    cm.skb_task);
1045         struct net_device *dev = priv->dev;
1046         struct sk_buff *skb;
1047
1048         unsigned mtu = priv->mcast_mtu;
1049
1050         spin_lock_irq(&priv->tx_lock);
1051         spin_lock(&priv->lock);
1052         while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1053                 spin_unlock(&priv->lock);
1054                 spin_unlock_irq(&priv->tx_lock);
1055                 if (skb->protocol == htons(ETH_P_IP))
1056                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1057 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1058                 else if (skb->protocol == htons(ETH_P_IPV6))
1059                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1060 #endif
1061                 dev_kfree_skb_any(skb);
1062                 spin_lock_irq(&priv->tx_lock);
1063                 spin_lock(&priv->lock);
1064         }
1065         spin_unlock(&priv->lock);
1066         spin_unlock_irq(&priv->tx_lock);
1067 }
1068
1069 void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
1070                            unsigned int mtu)
1071 {
1072         struct ipoib_dev_priv *priv = netdev_priv(dev);
1073         int e = skb_queue_empty(&priv->cm.skb_queue);
1074
1075         if (skb->dst)
1076                 skb->dst->ops->update_pmtu(skb->dst, mtu);
1077
1078         skb_queue_tail(&priv->cm.skb_queue, skb);
1079         if (e)
1080                 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1081 }
1082
1083 static void ipoib_cm_stale_task(struct work_struct *work)
1084 {
1085         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1086                                                    cm.stale_task.work);
1087         struct ipoib_cm_rx *p;
1088
1089         spin_lock_irq(&priv->lock);
1090         while (!list_empty(&priv->cm.passive_ids)) {
1091                 /* List if sorted by LRU, start from tail,
1092                  * stop when we see a recently used entry */
1093                 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1094                 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1095                         break;
1096                 list_del_init(&p->list);
1097                 spin_unlock_irq(&priv->lock);
1098                 ib_destroy_cm_id(p->id);
1099                 ib_destroy_qp(p->qp);
1100                 kfree(p);
1101                 spin_lock_irq(&priv->lock);
1102         }
1103         spin_unlock_irq(&priv->lock);
1104 }
1105
1106
1107 static ssize_t show_mode(struct device *d, struct device_attribute *attr, 
1108                          char *buf)
1109 {
1110         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1111
1112         if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1113                 return sprintf(buf, "connected\n");
1114         else
1115                 return sprintf(buf, "datagram\n");
1116 }
1117
1118 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1119                         const char *buf, size_t count)
1120 {
1121         struct net_device *dev = to_net_dev(d);
1122         struct ipoib_dev_priv *priv = netdev_priv(dev);
1123
1124         /* flush paths if we switch modes so that connections are restarted */
1125         if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1126                 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1127                 ipoib_warn(priv, "enabling connected mode "
1128                            "will cause multicast packet drops\n");
1129                 ipoib_flush_paths(dev);
1130                 return count;
1131         }
1132
1133         if (!strcmp(buf, "datagram\n")) {
1134                 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1135                 dev->mtu = min(priv->mcast_mtu, dev->mtu);
1136                 ipoib_flush_paths(dev);
1137                 return count;
1138         }
1139
1140         return -EINVAL;
1141 }
1142
1143 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1144
1145 int ipoib_cm_add_mode_attr(struct net_device *dev)
1146 {
1147         return device_create_file(&dev->dev, &dev_attr_mode);
1148 }
1149
1150 int ipoib_cm_dev_init(struct net_device *dev)
1151 {
1152         struct ipoib_dev_priv *priv = netdev_priv(dev);
1153         struct ib_srq_init_attr srq_init_attr = {
1154                 .attr = {
1155                         .max_wr  = ipoib_recvq_size,
1156                         .max_sge = IPOIB_CM_RX_SG
1157                 }
1158         };
1159         int ret, i;
1160
1161         INIT_LIST_HEAD(&priv->cm.passive_ids);
1162         INIT_LIST_HEAD(&priv->cm.reap_list);
1163         INIT_LIST_HEAD(&priv->cm.start_list);
1164         INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1165         INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1166         INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1167         INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1168
1169         skb_queue_head_init(&priv->cm.skb_queue);
1170
1171         priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1172         if (IS_ERR(priv->cm.srq)) {
1173                 ret = PTR_ERR(priv->cm.srq);
1174                 priv->cm.srq = NULL;
1175                 return ret;
1176         }
1177
1178         priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
1179                                     GFP_KERNEL);
1180         if (!priv->cm.srq_ring) {
1181                 printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n",
1182                        priv->ca->name, ipoib_recvq_size);
1183                 ipoib_cm_dev_cleanup(dev);
1184                 return -ENOMEM;
1185         }
1186
1187         for (i = 0; i < IPOIB_CM_RX_SG; ++i)
1188                 priv->cm.rx_sge[i].lkey = priv->mr->lkey;
1189
1190         priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
1191         for (i = 1; i < IPOIB_CM_RX_SG; ++i)
1192                 priv->cm.rx_sge[i].length = PAGE_SIZE;
1193         priv->cm.rx_wr.next = NULL;
1194         priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
1195         priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
1196
1197         for (i = 0; i < ipoib_recvq_size; ++i) {
1198                 if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
1199                                            priv->cm.srq_ring[i].mapping)) {
1200                         ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
1201                         ipoib_cm_dev_cleanup(dev);
1202                         return -ENOMEM;
1203                 }
1204                 if (ipoib_cm_post_receive(dev, i)) {
1205                         ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
1206                         ipoib_cm_dev_cleanup(dev);
1207                         return -EIO;
1208                 }
1209         }
1210
1211         priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1212         return 0;
1213 }
1214
1215 void ipoib_cm_dev_cleanup(struct net_device *dev)
1216 {
1217         struct ipoib_dev_priv *priv = netdev_priv(dev);
1218         int i, ret;
1219
1220         if (!priv->cm.srq)
1221                 return;
1222
1223         ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1224
1225         ret = ib_destroy_srq(priv->cm.srq);
1226         if (ret)
1227                 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1228
1229         priv->cm.srq = NULL;
1230         if (!priv->cm.srq_ring)
1231                 return;
1232         for (i = 0; i < ipoib_recvq_size; ++i)
1233                 if (priv->cm.srq_ring[i].skb) {
1234                         ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
1235                                               priv->cm.srq_ring[i].mapping);
1236                         dev_kfree_skb_any(priv->cm.srq_ring[i].skb);
1237                         priv->cm.srq_ring[i].skb = NULL;
1238                 }
1239         kfree(priv->cm.srq_ring);
1240         priv->cm.srq_ring = NULL;
1241 }