22bb42dc8f7382638bf2a0e2f3a6704cef94cbe7
[safe/jmp/linux-2.6] / drivers / infiniband / hw / ipath / ipath_verbs.c
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <linux/io.h>
37 #include <linux/utsname.h>
38
39 #include "ipath_kernel.h"
40 #include "ipath_verbs.h"
41 #include "ipath_common.h"
42
43 static unsigned int ib_ipath_qp_table_size = 251;
44 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
45 MODULE_PARM_DESC(qp_table_size, "QP table size");
46
47 unsigned int ib_ipath_lkey_table_size = 12;
48 module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
49                    S_IRUGO);
50 MODULE_PARM_DESC(lkey_table_size,
51                  "LKEY table size in bits (2^n, 1 <= n <= 23)");
52
53 static unsigned int ib_ipath_max_pds = 0xFFFF;
54 module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
55 MODULE_PARM_DESC(max_pds,
56                  "Maximum number of protection domains to support");
57
58 static unsigned int ib_ipath_max_ahs = 0xFFFF;
59 module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
60 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
61
62 unsigned int ib_ipath_max_cqes = 0x2FFFF;
63 module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
64 MODULE_PARM_DESC(max_cqes,
65                  "Maximum number of completion queue entries to support");
66
67 unsigned int ib_ipath_max_cqs = 0x1FFFF;
68 module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
69 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
70
71 unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
72 module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
73                    S_IWUSR | S_IRUGO);
74 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
75
76 unsigned int ib_ipath_max_qps = 16384;
77 module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
78 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
79
80 unsigned int ib_ipath_max_sges = 0x60;
81 module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
82 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
83
84 unsigned int ib_ipath_max_mcast_grps = 16384;
85 module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
86                    S_IWUSR | S_IRUGO);
87 MODULE_PARM_DESC(max_mcast_grps,
88                  "Maximum number of multicast groups to support");
89
90 unsigned int ib_ipath_max_mcast_qp_attached = 16;
91 module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
92                    uint, S_IWUSR | S_IRUGO);
93 MODULE_PARM_DESC(max_mcast_qp_attached,
94                  "Maximum number of attached QPs to support");
95
96 unsigned int ib_ipath_max_srqs = 1024;
97 module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
99
100 unsigned int ib_ipath_max_srq_sges = 128;
101 module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
102                    uint, S_IWUSR | S_IRUGO);
103 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
104
105 unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
106 module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
107                    uint, S_IWUSR | S_IRUGO);
108 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
109
110 static unsigned int ib_ipath_disable_sma;
111 module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
112 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
113
114 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
115         [IB_QPS_RESET] = 0,
116         [IB_QPS_INIT] = IPATH_POST_RECV_OK,
117         [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
118         [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
119             IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
120         [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
121             IPATH_POST_SEND_OK,
122         [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
123         [IB_QPS_ERR] = 0,
124 };
125
126 struct ipath_ucontext {
127         struct ib_ucontext ibucontext;
128 };
129
130 static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
131                                                   *ibucontext)
132 {
133         return container_of(ibucontext, struct ipath_ucontext, ibucontext);
134 }
135
136 /*
137  * Translate ib_wr_opcode into ib_wc_opcode.
138  */
139 const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
140         [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
141         [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
142         [IB_WR_SEND] = IB_WC_SEND,
143         [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
144         [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
145         [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
146         [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
147 };
148
149 /*
150  * System image GUID.
151  */
152 static __be64 sys_image_guid;
153
154 /**
155  * ipath_copy_sge - copy data to SGE memory
156  * @ss: the SGE state
157  * @data: the data to copy
158  * @length: the length of the data
159  */
160 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
161 {
162         struct ipath_sge *sge = &ss->sge;
163
164         while (length) {
165                 u32 len = sge->length;
166
167                 if (len > length)
168                         len = length;
169                 if (len > sge->sge_length)
170                         len = sge->sge_length;
171                 BUG_ON(len == 0);
172                 memcpy(sge->vaddr, data, len);
173                 sge->vaddr += len;
174                 sge->length -= len;
175                 sge->sge_length -= len;
176                 if (sge->sge_length == 0) {
177                         if (--ss->num_sge)
178                                 *sge = *ss->sg_list++;
179                 } else if (sge->length == 0 && sge->mr != NULL) {
180                         if (++sge->n >= IPATH_SEGSZ) {
181                                 if (++sge->m >= sge->mr->mapsz)
182                                         break;
183                                 sge->n = 0;
184                         }
185                         sge->vaddr =
186                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
187                         sge->length =
188                                 sge->mr->map[sge->m]->segs[sge->n].length;
189                 }
190                 data += len;
191                 length -= len;
192         }
193 }
194
195 /**
196  * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
197  * @ss: the SGE state
198  * @length: the number of bytes to skip
199  */
200 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
201 {
202         struct ipath_sge *sge = &ss->sge;
203
204         while (length) {
205                 u32 len = sge->length;
206
207                 if (len > length)
208                         len = length;
209                 if (len > sge->sge_length)
210                         len = sge->sge_length;
211                 BUG_ON(len == 0);
212                 sge->vaddr += len;
213                 sge->length -= len;
214                 sge->sge_length -= len;
215                 if (sge->sge_length == 0) {
216                         if (--ss->num_sge)
217                                 *sge = *ss->sg_list++;
218                 } else if (sge->length == 0 && sge->mr != NULL) {
219                         if (++sge->n >= IPATH_SEGSZ) {
220                                 if (++sge->m >= sge->mr->mapsz)
221                                         break;
222                                 sge->n = 0;
223                         }
224                         sge->vaddr =
225                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
226                         sge->length =
227                                 sge->mr->map[sge->m]->segs[sge->n].length;
228                 }
229                 length -= len;
230         }
231 }
232
233 static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
234 {
235         struct ib_wc wc;
236
237         memset(&wc, 0, sizeof(wc));
238         wc.wr_id = wr->wr_id;
239         wc.status = IB_WC_WR_FLUSH_ERR;
240         wc.opcode = ib_ipath_wc_opcode[wr->opcode];
241         wc.qp = &qp->ibqp;
242         ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
243 }
244
245 /*
246  * Count the number of DMA descriptors needed to send length bytes of data.
247  * Don't modify the ipath_sge_state to get the count.
248  * Return zero if any of the segments is not aligned.
249  */
250 static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
251 {
252         struct ipath_sge *sg_list = ss->sg_list;
253         struct ipath_sge sge = ss->sge;
254         u8 num_sge = ss->num_sge;
255         u32 ndesc = 1;  /* count the header */
256
257         while (length) {
258                 u32 len = sge.length;
259
260                 if (len > length)
261                         len = length;
262                 if (len > sge.sge_length)
263                         len = sge.sge_length;
264                 BUG_ON(len == 0);
265                 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266                     (len != length && (len & (sizeof(u32) - 1)))) {
267                         ndesc = 0;
268                         break;
269                 }
270                 ndesc++;
271                 sge.vaddr += len;
272                 sge.length -= len;
273                 sge.sge_length -= len;
274                 if (sge.sge_length == 0) {
275                         if (--num_sge)
276                                 sge = *sg_list++;
277                 } else if (sge.length == 0 && sge.mr != NULL) {
278                         if (++sge.n >= IPATH_SEGSZ) {
279                                 if (++sge.m >= sge.mr->mapsz)
280                                         break;
281                                 sge.n = 0;
282                         }
283                         sge.vaddr =
284                                 sge.mr->map[sge.m]->segs[sge.n].vaddr;
285                         sge.length =
286                                 sge.mr->map[sge.m]->segs[sge.n].length;
287                 }
288                 length -= len;
289         }
290         return ndesc;
291 }
292
293 /*
294  * Copy from the SGEs to the data buffer.
295  */
296 static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
297                                 u32 length)
298 {
299         struct ipath_sge *sge = &ss->sge;
300
301         while (length) {
302                 u32 len = sge->length;
303
304                 if (len > length)
305                         len = length;
306                 if (len > sge->sge_length)
307                         len = sge->sge_length;
308                 BUG_ON(len == 0);
309                 memcpy(data, sge->vaddr, len);
310                 sge->vaddr += len;
311                 sge->length -= len;
312                 sge->sge_length -= len;
313                 if (sge->sge_length == 0) {
314                         if (--ss->num_sge)
315                                 *sge = *ss->sg_list++;
316                 } else if (sge->length == 0 && sge->mr != NULL) {
317                         if (++sge->n >= IPATH_SEGSZ) {
318                                 if (++sge->m >= sge->mr->mapsz)
319                                         break;
320                                 sge->n = 0;
321                         }
322                         sge->vaddr =
323                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
324                         sge->length =
325                                 sge->mr->map[sge->m]->segs[sge->n].length;
326                 }
327                 data += len;
328                 length -= len;
329         }
330 }
331
332 /**
333  * ipath_post_one_send - post one RC, UC, or UD send work request
334  * @qp: the QP to post on
335  * @wr: the work request to send
336  */
337 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
338 {
339         struct ipath_swqe *wqe;
340         u32 next;
341         int i;
342         int j;
343         int acc;
344         int ret;
345         unsigned long flags;
346
347         spin_lock_irqsave(&qp->s_lock, flags);
348
349         /* Check that state is OK to post send. */
350         if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) {
351                 if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR)
352                         goto bail_inval;
353                 /* C10-96 says generate a flushed completion entry. */
354                 ipath_flush_wqe(qp, wr);
355                 ret = 0;
356                 goto bail;
357         }
358
359         /* IB spec says that num_sge == 0 is OK. */
360         if (wr->num_sge > qp->s_max_sge)
361                 goto bail_inval;
362
363         /*
364          * Don't allow RDMA reads or atomic operations on UC or
365          * undefined operations.
366          * Make sure buffer is large enough to hold the result for atomics.
367          */
368         if (qp->ibqp.qp_type == IB_QPT_UC) {
369                 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
370                         goto bail_inval;
371         } else if (qp->ibqp.qp_type == IB_QPT_UD) {
372                 /* Check UD opcode */
373                 if (wr->opcode != IB_WR_SEND &&
374                     wr->opcode != IB_WR_SEND_WITH_IMM)
375                         goto bail_inval;
376                 /* Check UD destination address PD */
377                 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
378                         goto bail_inval;
379         } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
380                 goto bail_inval;
381         else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
382                    (wr->num_sge == 0 ||
383                     wr->sg_list[0].length < sizeof(u64) ||
384                     wr->sg_list[0].addr & (sizeof(u64) - 1)))
385                 goto bail_inval;
386         else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
387                 goto bail_inval;
388
389         next = qp->s_head + 1;
390         if (next >= qp->s_size)
391                 next = 0;
392         if (next == qp->s_last) {
393                 ret = -ENOMEM;
394                 goto bail;
395         }
396
397         wqe = get_swqe_ptr(qp, qp->s_head);
398         wqe->wr = *wr;
399         wqe->length = 0;
400         if (wr->num_sge) {
401                 acc = wr->opcode >= IB_WR_RDMA_READ ?
402                         IB_ACCESS_LOCAL_WRITE : 0;
403                 for (i = 0, j = 0; i < wr->num_sge; i++) {
404                         u32 length = wr->sg_list[i].length;
405                         int ok;
406
407                         if (length == 0)
408                                 continue;
409                         ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
410                                            &wr->sg_list[i], acc);
411                         if (!ok)
412                                 goto bail_inval;
413                         wqe->length += length;
414                         j++;
415                 }
416                 wqe->wr.num_sge = j;
417         }
418         if (qp->ibqp.qp_type == IB_QPT_UC ||
419             qp->ibqp.qp_type == IB_QPT_RC) {
420                 if (wqe->length > 0x80000000U)
421                         goto bail_inval;
422         } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
423                 goto bail_inval;
424         wqe->ssn = qp->s_ssn++;
425         qp->s_head = next;
426
427         ret = 0;
428         goto bail;
429
430 bail_inval:
431         ret = -EINVAL;
432 bail:
433         spin_unlock_irqrestore(&qp->s_lock, flags);
434         return ret;
435 }
436
437 /**
438  * ipath_post_send - post a send on a QP
439  * @ibqp: the QP to post the send on
440  * @wr: the list of work requests to post
441  * @bad_wr: the first bad WR is put here
442  *
443  * This may be called from interrupt context.
444  */
445 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
446                            struct ib_send_wr **bad_wr)
447 {
448         struct ipath_qp *qp = to_iqp(ibqp);
449         int err = 0;
450
451         for (; wr; wr = wr->next) {
452                 err = ipath_post_one_send(qp, wr);
453                 if (err) {
454                         *bad_wr = wr;
455                         goto bail;
456                 }
457         }
458
459         /* Try to do the send work in the caller's context. */
460         ipath_do_send((unsigned long) qp);
461
462 bail:
463         return err;
464 }
465
466 /**
467  * ipath_post_receive - post a receive on a QP
468  * @ibqp: the QP to post the receive on
469  * @wr: the WR to post
470  * @bad_wr: the first bad WR is put here
471  *
472  * This may be called from interrupt context.
473  */
474 static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
475                               struct ib_recv_wr **bad_wr)
476 {
477         struct ipath_qp *qp = to_iqp(ibqp);
478         struct ipath_rwq *wq = qp->r_rq.wq;
479         unsigned long flags;
480         int ret;
481
482         /* Check that state is OK to post receive. */
483         if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
484                 *bad_wr = wr;
485                 ret = -EINVAL;
486                 goto bail;
487         }
488
489         for (; wr; wr = wr->next) {
490                 struct ipath_rwqe *wqe;
491                 u32 next;
492                 int i;
493
494                 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
495                         *bad_wr = wr;
496                         ret = -EINVAL;
497                         goto bail;
498                 }
499
500                 spin_lock_irqsave(&qp->r_rq.lock, flags);
501                 next = wq->head + 1;
502                 if (next >= qp->r_rq.size)
503                         next = 0;
504                 if (next == wq->tail) {
505                         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
506                         *bad_wr = wr;
507                         ret = -ENOMEM;
508                         goto bail;
509                 }
510
511                 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
512                 wqe->wr_id = wr->wr_id;
513                 wqe->num_sge = wr->num_sge;
514                 for (i = 0; i < wr->num_sge; i++)
515                         wqe->sg_list[i] = wr->sg_list[i];
516                 /* Make sure queue entry is written before the head index. */
517                 smp_wmb();
518                 wq->head = next;
519                 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
520         }
521         ret = 0;
522
523 bail:
524         return ret;
525 }
526
527 /**
528  * ipath_qp_rcv - processing an incoming packet on a QP
529  * @dev: the device the packet came on
530  * @hdr: the packet header
531  * @has_grh: true if the packet has a GRH
532  * @data: the packet data
533  * @tlen: the packet length
534  * @qp: the QP the packet came on
535  *
536  * This is called from ipath_ib_rcv() to process an incoming packet
537  * for the given QP.
538  * Called at interrupt level.
539  */
540 static void ipath_qp_rcv(struct ipath_ibdev *dev,
541                          struct ipath_ib_header *hdr, int has_grh,
542                          void *data, u32 tlen, struct ipath_qp *qp)
543 {
544         /* Check for valid receive state. */
545         if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
546                 dev->n_pkt_drops++;
547                 return;
548         }
549
550         switch (qp->ibqp.qp_type) {
551         case IB_QPT_SMI:
552         case IB_QPT_GSI:
553                 if (ib_ipath_disable_sma)
554                         break;
555                 /* FALLTHROUGH */
556         case IB_QPT_UD:
557                 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
558                 break;
559
560         case IB_QPT_RC:
561                 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
562                 break;
563
564         case IB_QPT_UC:
565                 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
566                 break;
567
568         default:
569                 break;
570         }
571 }
572
573 /**
574  * ipath_ib_rcv - process an incoming packet
575  * @arg: the device pointer
576  * @rhdr: the header of the packet
577  * @data: the packet data
578  * @tlen: the packet length
579  *
580  * This is called from ipath_kreceive() to process an incoming packet at
581  * interrupt level. Tlen is the length of the header + data + CRC in bytes.
582  */
583 void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
584                   u32 tlen)
585 {
586         struct ipath_ib_header *hdr = rhdr;
587         struct ipath_other_headers *ohdr;
588         struct ipath_qp *qp;
589         u32 qp_num;
590         int lnh;
591         u8 opcode;
592         u16 lid;
593
594         if (unlikely(dev == NULL))
595                 goto bail;
596
597         if (unlikely(tlen < 24)) {      /* LRH+BTH+CRC */
598                 dev->rcv_errors++;
599                 goto bail;
600         }
601
602         /* Check for a valid destination LID (see ch. 7.11.1). */
603         lid = be16_to_cpu(hdr->lrh[1]);
604         if (lid < IPATH_MULTICAST_LID_BASE) {
605                 lid &= ~((1 << dev->dd->ipath_lmc) - 1);
606                 if (unlikely(lid != dev->dd->ipath_lid)) {
607                         dev->rcv_errors++;
608                         goto bail;
609                 }
610         }
611
612         /* Check for GRH */
613         lnh = be16_to_cpu(hdr->lrh[0]) & 3;
614         if (lnh == IPATH_LRH_BTH)
615                 ohdr = &hdr->u.oth;
616         else if (lnh == IPATH_LRH_GRH)
617                 ohdr = &hdr->u.l.oth;
618         else {
619                 dev->rcv_errors++;
620                 goto bail;
621         }
622
623         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
624         dev->opstats[opcode].n_bytes += tlen;
625         dev->opstats[opcode].n_packets++;
626
627         /* Get the destination QP number. */
628         qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
629         if (qp_num == IPATH_MULTICAST_QPN) {
630                 struct ipath_mcast *mcast;
631                 struct ipath_mcast_qp *p;
632
633                 if (lnh != IPATH_LRH_GRH) {
634                         dev->n_pkt_drops++;
635                         goto bail;
636                 }
637                 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
638                 if (mcast == NULL) {
639                         dev->n_pkt_drops++;
640                         goto bail;
641                 }
642                 dev->n_multicast_rcv++;
643                 list_for_each_entry_rcu(p, &mcast->qp_list, list)
644                         ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
645                 /*
646                  * Notify ipath_multicast_detach() if it is waiting for us
647                  * to finish.
648                  */
649                 if (atomic_dec_return(&mcast->refcount) <= 1)
650                         wake_up(&mcast->wait);
651         } else {
652                 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
653                 if (qp) {
654                         dev->n_unicast_rcv++;
655                         ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
656                                      tlen, qp);
657                         /*
658                          * Notify ipath_destroy_qp() if it is waiting
659                          * for us to finish.
660                          */
661                         if (atomic_dec_and_test(&qp->refcount))
662                                 wake_up(&qp->wait);
663                 } else
664                         dev->n_pkt_drops++;
665         }
666
667 bail:;
668 }
669
670 /**
671  * ipath_ib_timer - verbs timer
672  * @arg: the device pointer
673  *
674  * This is called from ipath_do_rcv_timer() at interrupt level to check for
675  * QPs which need retransmits and to collect performance numbers.
676  */
677 static void ipath_ib_timer(struct ipath_ibdev *dev)
678 {
679         struct ipath_qp *resend = NULL;
680         struct list_head *last;
681         struct ipath_qp *qp;
682         unsigned long flags;
683
684         if (dev == NULL)
685                 return;
686
687         spin_lock_irqsave(&dev->pending_lock, flags);
688         /* Start filling the next pending queue. */
689         if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
690                 dev->pending_index = 0;
691         /* Save any requests still in the new queue, they have timed out. */
692         last = &dev->pending[dev->pending_index];
693         while (!list_empty(last)) {
694                 qp = list_entry(last->next, struct ipath_qp, timerwait);
695                 list_del_init(&qp->timerwait);
696                 qp->timer_next = resend;
697                 resend = qp;
698                 atomic_inc(&qp->refcount);
699         }
700         last = &dev->rnrwait;
701         if (!list_empty(last)) {
702                 qp = list_entry(last->next, struct ipath_qp, timerwait);
703                 if (--qp->s_rnr_timeout == 0) {
704                         do {
705                                 list_del_init(&qp->timerwait);
706                                 tasklet_hi_schedule(&qp->s_task);
707                                 if (list_empty(last))
708                                         break;
709                                 qp = list_entry(last->next, struct ipath_qp,
710                                                 timerwait);
711                         } while (qp->s_rnr_timeout == 0);
712                 }
713         }
714         /*
715          * We should only be in the started state if pma_sample_start != 0
716          */
717         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
718             --dev->pma_sample_start == 0) {
719                 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
720                 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
721                                         &dev->ipath_rword,
722                                         &dev->ipath_spkts,
723                                         &dev->ipath_rpkts,
724                                         &dev->ipath_xmit_wait);
725         }
726         if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
727                 if (dev->pma_sample_interval == 0) {
728                         u64 ta, tb, tc, td, te;
729
730                         dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
731                         ipath_snapshot_counters(dev->dd, &ta, &tb,
732                                                 &tc, &td, &te);
733
734                         dev->ipath_sword = ta - dev->ipath_sword;
735                         dev->ipath_rword = tb - dev->ipath_rword;
736                         dev->ipath_spkts = tc - dev->ipath_spkts;
737                         dev->ipath_rpkts = td - dev->ipath_rpkts;
738                         dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
739                 }
740                 else
741                         dev->pma_sample_interval--;
742         }
743         spin_unlock_irqrestore(&dev->pending_lock, flags);
744
745         /* XXX What if timer fires again while this is running? */
746         for (qp = resend; qp != NULL; qp = qp->timer_next) {
747                 spin_lock_irqsave(&qp->s_lock, flags);
748                 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
749                         dev->n_timeouts++;
750                         ipath_restart_rc(qp, qp->s_last_psn + 1);
751                 }
752                 spin_unlock_irqrestore(&qp->s_lock, flags);
753
754                 /* Notify ipath_destroy_qp() if it is waiting. */
755                 if (atomic_dec_and_test(&qp->refcount))
756                         wake_up(&qp->wait);
757         }
758 }
759
760 static void update_sge(struct ipath_sge_state *ss, u32 length)
761 {
762         struct ipath_sge *sge = &ss->sge;
763
764         sge->vaddr += length;
765         sge->length -= length;
766         sge->sge_length -= length;
767         if (sge->sge_length == 0) {
768                 if (--ss->num_sge)
769                         *sge = *ss->sg_list++;
770         } else if (sge->length == 0 && sge->mr != NULL) {
771                 if (++sge->n >= IPATH_SEGSZ) {
772                         if (++sge->m >= sge->mr->mapsz)
773                                 return;
774                         sge->n = 0;
775                 }
776                 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
777                 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
778         }
779 }
780
781 #ifdef __LITTLE_ENDIAN
782 static inline u32 get_upper_bits(u32 data, u32 shift)
783 {
784         return data >> shift;
785 }
786
787 static inline u32 set_upper_bits(u32 data, u32 shift)
788 {
789         return data << shift;
790 }
791
792 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
793 {
794         data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
795         data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
796         return data;
797 }
798 #else
799 static inline u32 get_upper_bits(u32 data, u32 shift)
800 {
801         return data << shift;
802 }
803
804 static inline u32 set_upper_bits(u32 data, u32 shift)
805 {
806         return data >> shift;
807 }
808
809 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
810 {
811         data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
812         data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
813         return data;
814 }
815 #endif
816
817 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
818                     u32 length, unsigned flush_wc)
819 {
820         u32 extra = 0;
821         u32 data = 0;
822         u32 last;
823
824         while (1) {
825                 u32 len = ss->sge.length;
826                 u32 off;
827
828                 if (len > length)
829                         len = length;
830                 if (len > ss->sge.sge_length)
831                         len = ss->sge.sge_length;
832                 BUG_ON(len == 0);
833                 /* If the source address is not aligned, try to align it. */
834                 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
835                 if (off) {
836                         u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
837                                             ~(sizeof(u32) - 1));
838                         u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
839                         u32 y;
840
841                         y = sizeof(u32) - off;
842                         if (len > y)
843                                 len = y;
844                         if (len + extra >= sizeof(u32)) {
845                                 data |= set_upper_bits(v, extra *
846                                                        BITS_PER_BYTE);
847                                 len = sizeof(u32) - extra;
848                                 if (len == length) {
849                                         last = data;
850                                         break;
851                                 }
852                                 __raw_writel(data, piobuf);
853                                 piobuf++;
854                                 extra = 0;
855                                 data = 0;
856                         } else {
857                                 /* Clear unused upper bytes */
858                                 data |= clear_upper_bytes(v, len, extra);
859                                 if (len == length) {
860                                         last = data;
861                                         break;
862                                 }
863                                 extra += len;
864                         }
865                 } else if (extra) {
866                         /* Source address is aligned. */
867                         u32 *addr = (u32 *) ss->sge.vaddr;
868                         int shift = extra * BITS_PER_BYTE;
869                         int ushift = 32 - shift;
870                         u32 l = len;
871
872                         while (l >= sizeof(u32)) {
873                                 u32 v = *addr;
874
875                                 data |= set_upper_bits(v, shift);
876                                 __raw_writel(data, piobuf);
877                                 data = get_upper_bits(v, ushift);
878                                 piobuf++;
879                                 addr++;
880                                 l -= sizeof(u32);
881                         }
882                         /*
883                          * We still have 'extra' number of bytes leftover.
884                          */
885                         if (l) {
886                                 u32 v = *addr;
887
888                                 if (l + extra >= sizeof(u32)) {
889                                         data |= set_upper_bits(v, shift);
890                                         len -= l + extra - sizeof(u32);
891                                         if (len == length) {
892                                                 last = data;
893                                                 break;
894                                         }
895                                         __raw_writel(data, piobuf);
896                                         piobuf++;
897                                         extra = 0;
898                                         data = 0;
899                                 } else {
900                                         /* Clear unused upper bytes */
901                                         data |= clear_upper_bytes(v, l,
902                                                                   extra);
903                                         if (len == length) {
904                                                 last = data;
905                                                 break;
906                                         }
907                                         extra += l;
908                                 }
909                         } else if (len == length) {
910                                 last = data;
911                                 break;
912                         }
913                 } else if (len == length) {
914                         u32 w;
915
916                         /*
917                          * Need to round up for the last dword in the
918                          * packet.
919                          */
920                         w = (len + 3) >> 2;
921                         __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
922                         piobuf += w - 1;
923                         last = ((u32 *) ss->sge.vaddr)[w - 1];
924                         break;
925                 } else {
926                         u32 w = len >> 2;
927
928                         __iowrite32_copy(piobuf, ss->sge.vaddr, w);
929                         piobuf += w;
930
931                         extra = len & (sizeof(u32) - 1);
932                         if (extra) {
933                                 u32 v = ((u32 *) ss->sge.vaddr)[w];
934
935                                 /* Clear unused upper bytes */
936                                 data = clear_upper_bytes(v, extra, 0);
937                         }
938                 }
939                 update_sge(ss, len);
940                 length -= len;
941         }
942         /* Update address before sending packet. */
943         update_sge(ss, length);
944         if (flush_wc) {
945                 /* must flush early everything before trigger word */
946                 ipath_flush_wc();
947                 __raw_writel(last, piobuf);
948                 /* be sure trigger word is written */
949                 ipath_flush_wc();
950         } else
951                 __raw_writel(last, piobuf);
952 }
953
954 /*
955  * Convert IB rate to delay multiplier.
956  */
957 unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
958 {
959         switch (rate) {
960         case IB_RATE_2_5_GBPS: return 8;
961         case IB_RATE_5_GBPS:   return 4;
962         case IB_RATE_10_GBPS:  return 2;
963         case IB_RATE_20_GBPS:  return 1;
964         default:               return 0;
965         }
966 }
967
968 /*
969  * Convert delay multiplier to IB rate
970  */
971 static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
972 {
973         switch (mult) {
974         case 8:  return IB_RATE_2_5_GBPS;
975         case 4:  return IB_RATE_5_GBPS;
976         case 2:  return IB_RATE_10_GBPS;
977         case 1:  return IB_RATE_20_GBPS;
978         default: return IB_RATE_PORT_CURRENT;
979         }
980 }
981
982 static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
983 {
984         struct ipath_verbs_txreq *tx = NULL;
985         unsigned long flags;
986
987         spin_lock_irqsave(&dev->pending_lock, flags);
988         if (!list_empty(&dev->txreq_free)) {
989                 struct list_head *l = dev->txreq_free.next;
990
991                 list_del(l);
992                 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
993         }
994         spin_unlock_irqrestore(&dev->pending_lock, flags);
995         return tx;
996 }
997
998 static inline void put_txreq(struct ipath_ibdev *dev,
999                              struct ipath_verbs_txreq *tx)
1000 {
1001         unsigned long flags;
1002
1003         spin_lock_irqsave(&dev->pending_lock, flags);
1004         list_add(&tx->txreq.list, &dev->txreq_free);
1005         spin_unlock_irqrestore(&dev->pending_lock, flags);
1006 }
1007
1008 static void sdma_complete(void *cookie, int status)
1009 {
1010         struct ipath_verbs_txreq *tx = cookie;
1011         struct ipath_qp *qp = tx->qp;
1012         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1013
1014         /* Generate a completion queue entry if needed */
1015         if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
1016                 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
1017                         IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
1018
1019                 ipath_send_complete(qp, tx->wqe, ibs);
1020         }
1021
1022         if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
1023                 kfree(tx->txreq.map_addr);
1024         put_txreq(dev, tx);
1025
1026         if (atomic_dec_and_test(&qp->refcount))
1027                 wake_up(&qp->wait);
1028 }
1029
1030 /*
1031  * Compute the number of clock cycles of delay before sending the next packet.
1032  * The multipliers reflect the number of clocks for the fastest rate so
1033  * one tick at 4xDDR is 8 ticks at 1xSDR.
1034  * If the destination port will take longer to receive a packet than
1035  * the outgoing link can send it, we need to delay sending the next packet
1036  * by the difference in time it takes the receiver to receive and the sender
1037  * to send this packet.
1038  * Note that this delay is always correct for UC and RC but not always
1039  * optimal for UD. For UD, the destination HCA can be different for each
1040  * packet, in which case, we could send packets to a different destination
1041  * while "waiting" for the delay. The overhead for doing this without
1042  * HW support is more than just paying the cost of delaying some packets
1043  * unnecessarily.
1044  */
1045 static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
1046 {
1047         return (rcv_mult > snd_mult) ?
1048                 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
1049 }
1050
1051 static int ipath_verbs_send_dma(struct ipath_qp *qp,
1052                                 struct ipath_ib_header *hdr, u32 hdrwords,
1053                                 struct ipath_sge_state *ss, u32 len,
1054                                 u32 plen, u32 dwords)
1055 {
1056         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1057         struct ipath_devdata *dd = dev->dd;
1058         struct ipath_verbs_txreq *tx;
1059         u32 *piobuf;
1060         u32 control;
1061         u32 ndesc;
1062         int ret;
1063
1064         tx = qp->s_tx;
1065         if (tx) {
1066                 qp->s_tx = NULL;
1067                 /* resend previously constructed packet */
1068                 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
1069                 if (ret)
1070                         qp->s_tx = tx;
1071                 goto bail;
1072         }
1073
1074         tx = get_txreq(dev);
1075         if (!tx) {
1076                 ret = -EBUSY;
1077                 goto bail;
1078         }
1079
1080         /*
1081          * Get the saved delay count we computed for the previous packet
1082          * and save the delay count for this packet to be used next time
1083          * we get here.
1084          */
1085         control = qp->s_pkt_delay;
1086         qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1087
1088         tx->qp = qp;
1089         atomic_inc(&qp->refcount);
1090         tx->wqe = qp->s_wqe;
1091         tx->txreq.callback = sdma_complete;
1092         tx->txreq.callback_cookie = tx;
1093         tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
1094                 IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
1095         if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1096                 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
1097
1098         /* VL15 packets bypass credit check */
1099         if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
1100                 control |= 1ULL << 31;
1101                 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
1102         }
1103
1104         if (len) {
1105                 /*
1106                  * Don't try to DMA if it takes more descriptors than
1107                  * the queue holds.
1108                  */
1109                 ndesc = ipath_count_sge(ss, len);
1110                 if (ndesc >= dd->ipath_sdma_descq_cnt)
1111                         ndesc = 0;
1112         } else
1113                 ndesc = 1;
1114         if (ndesc) {
1115                 tx->hdr.pbc[0] = cpu_to_le32(plen);
1116                 tx->hdr.pbc[1] = cpu_to_le32(control);
1117                 memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
1118                 tx->txreq.sg_count = ndesc;
1119                 tx->map_len = (hdrwords + 2) << 2;
1120                 tx->txreq.map_addr = &tx->hdr;
1121                 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
1122                 if (ret) {
1123                         /* save ss and length in dwords */
1124                         tx->ss = ss;
1125                         tx->len = dwords;
1126                         qp->s_tx = tx;
1127                 }
1128                 goto bail;
1129         }
1130
1131         /* Allocate a buffer and copy the header and payload to it. */
1132         tx->map_len = (plen + 1) << 2;
1133         piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
1134         if (unlikely(piobuf == NULL)) {
1135                 ret = -EBUSY;
1136                 goto err_tx;
1137         }
1138         tx->txreq.map_addr = piobuf;
1139         tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
1140         tx->txreq.sg_count = 1;
1141
1142         *piobuf++ = (__force u32) cpu_to_le32(plen);
1143         *piobuf++ = (__force u32) cpu_to_le32(control);
1144         memcpy(piobuf, hdr, hdrwords << 2);
1145         ipath_copy_from_sge(piobuf + hdrwords, ss, len);
1146
1147         ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
1148         /*
1149          * If we couldn't queue the DMA request, save the info
1150          * and try again later rather than destroying the
1151          * buffer and undoing the side effects of the copy.
1152          */
1153         if (ret) {
1154                 tx->ss = NULL;
1155                 tx->len = 0;
1156                 qp->s_tx = tx;
1157         }
1158         dev->n_unaligned++;
1159         goto bail;
1160
1161 err_tx:
1162         if (atomic_dec_and_test(&qp->refcount))
1163                 wake_up(&qp->wait);
1164         put_txreq(dev, tx);
1165 bail:
1166         return ret;
1167 }
1168
1169 static int ipath_verbs_send_pio(struct ipath_qp *qp,
1170                                 struct ipath_ib_header *ibhdr, u32 hdrwords,
1171                                 struct ipath_sge_state *ss, u32 len,
1172                                 u32 plen, u32 dwords)
1173 {
1174         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1175         u32 *hdr = (u32 *) ibhdr;
1176         u32 __iomem *piobuf;
1177         unsigned flush_wc;
1178         u32 control;
1179         int ret;
1180
1181         piobuf = ipath_getpiobuf(dd, plen, NULL);
1182         if (unlikely(piobuf == NULL)) {
1183                 ret = -EBUSY;
1184                 goto bail;
1185         }
1186
1187         /*
1188          * Get the saved delay count we computed for the previous packet
1189          * and save the delay count for this packet to be used next time
1190          * we get here.
1191          */
1192         control = qp->s_pkt_delay;
1193         qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1194
1195         /* VL15 packets bypass credit check */
1196         if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
1197                 control |= 1ULL << 31;
1198
1199         /*
1200          * Write the length to the control qword plus any needed flags.
1201          * We have to flush after the PBC for correctness on some cpus
1202          * or WC buffer can be written out of order.
1203          */
1204         writeq(((u64) control << 32) | plen, piobuf);
1205         piobuf += 2;
1206
1207         flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
1208         if (len == 0) {
1209                 /*
1210                  * If there is just the header portion, must flush before
1211                  * writing last word of header for correctness, and after
1212                  * the last header word (trigger word).
1213                  */
1214                 if (flush_wc) {
1215                         ipath_flush_wc();
1216                         __iowrite32_copy(piobuf, hdr, hdrwords - 1);
1217                         ipath_flush_wc();
1218                         __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1219                         ipath_flush_wc();
1220                 } else
1221                         __iowrite32_copy(piobuf, hdr, hdrwords);
1222                 goto done;
1223         }
1224
1225         if (flush_wc)
1226                 ipath_flush_wc();
1227         __iowrite32_copy(piobuf, hdr, hdrwords);
1228         piobuf += hdrwords;
1229
1230         /* The common case is aligned and contained in one segment. */
1231         if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1232                    !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1233                 u32 *addr = (u32 *) ss->sge.vaddr;
1234
1235                 /* Update address before sending packet. */
1236                 update_sge(ss, len);
1237                 if (flush_wc) {
1238                         __iowrite32_copy(piobuf, addr, dwords - 1);
1239                         /* must flush early everything before trigger word */
1240                         ipath_flush_wc();
1241                         __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1242                         /* be sure trigger word is written */
1243                         ipath_flush_wc();
1244                 } else
1245                         __iowrite32_copy(piobuf, addr, dwords);
1246                 goto done;
1247         }
1248         copy_io(piobuf, ss, len, flush_wc);
1249 done:
1250         if (qp->s_wqe)
1251                 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1252         ret = 0;
1253 bail:
1254         return ret;
1255 }
1256
1257 /**
1258  * ipath_verbs_send - send a packet
1259  * @qp: the QP to send on
1260  * @hdr: the packet header
1261  * @hdrwords: the number of 32-bit words in the header
1262  * @ss: the SGE to send
1263  * @len: the length of the packet in bytes
1264  */
1265 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
1266                      u32 hdrwords, struct ipath_sge_state *ss, u32 len)
1267 {
1268         struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1269         u32 plen;
1270         int ret;
1271         u32 dwords = (len + 3) >> 2;
1272
1273         /*
1274          * Calculate the send buffer trigger address.
1275          * The +1 counts for the pbc control dword following the pbc length.
1276          */
1277         plen = hdrwords + dwords + 1;
1278
1279         /*
1280          * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1281          * can defer SDMA restart until link goes ACTIVE without
1282          * worrying about just how we got there.
1283          */
1284         if (qp->ibqp.qp_type == IB_QPT_SMI)
1285                 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1286                                            plen, dwords);
1287         /* All non-VL15 packets are dropped if link is not ACTIVE */
1288         else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
1289                 if (qp->s_wqe)
1290                         ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1291                 ret = 0;
1292         } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1293                 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1294                                            plen, dwords);
1295         else
1296                 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1297                                            plen, dwords);
1298
1299         return ret;
1300 }
1301
1302 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
1303                             u64 *rwords, u64 *spkts, u64 *rpkts,
1304                             u64 *xmit_wait)
1305 {
1306         int ret;
1307
1308         if (!(dd->ipath_flags & IPATH_INITTED)) {
1309                 /* no hardware, freeze, etc. */
1310                 ret = -EINVAL;
1311                 goto bail;
1312         }
1313         *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1314         *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1315         *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1316         *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1317         *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
1318
1319         ret = 0;
1320
1321 bail:
1322         return ret;
1323 }
1324
1325 /**
1326  * ipath_get_counters - get various chip counters
1327  * @dd: the infinipath device
1328  * @cntrs: counters are placed here
1329  *
1330  * Return the counters needed by recv_pma_get_portcounters().
1331  */
1332 int ipath_get_counters(struct ipath_devdata *dd,
1333                        struct ipath_verbs_counters *cntrs)
1334 {
1335         struct ipath_cregs const *crp = dd->ipath_cregs;
1336         int ret;
1337
1338         if (!(dd->ipath_flags & IPATH_INITTED)) {
1339                 /* no hardware, freeze, etc. */
1340                 ret = -EINVAL;
1341                 goto bail;
1342         }
1343         cntrs->symbol_error_counter =
1344                 ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
1345         cntrs->link_error_recovery_counter =
1346                 ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
1347         /*
1348          * The link downed counter counts when the other side downs the
1349          * connection.  We add in the number of times we downed the link
1350          * due to local link integrity errors to compensate.
1351          */
1352         cntrs->link_downed_counter =
1353                 ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
1354         cntrs->port_rcv_errors =
1355                 ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
1356                 ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
1357                 ipath_snap_cntr(dd, crp->cr_portovflcnt) +
1358                 ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
1359                 ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
1360                 ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
1361                 ipath_snap_cntr(dd, crp->cr_erricrccnt) +
1362                 ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
1363                 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1364                 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1365                 dd->ipath_rxfc_unsupvl_errs;
1366         if (crp->cr_rxotherlocalphyerrcnt)
1367                 cntrs->port_rcv_errors +=
1368                         ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
1369         if (crp->cr_rxvlerrcnt)
1370                 cntrs->port_rcv_errors +=
1371                         ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
1372         cntrs->port_rcv_remphys_errors =
1373                 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1374         cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
1375         cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
1376         cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
1377         cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1378         cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1379         cntrs->local_link_integrity_errors =
1380                 crp->cr_locallinkintegrityerrcnt ?
1381                 ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
1382                 ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1383                  dd->ipath_lli_errs : dd->ipath_lli_errors);
1384         cntrs->excessive_buffer_overrun_errors =
1385                 crp->cr_excessbufferovflcnt ?
1386                 ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
1387                 dd->ipath_overrun_thresh_errs;
1388         cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
1389                 ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
1390
1391         ret = 0;
1392
1393 bail:
1394         return ret;
1395 }
1396
1397 /**
1398  * ipath_ib_piobufavail - callback when a PIO buffer is available
1399  * @arg: the device pointer
1400  *
1401  * This is called from ipath_intr() at interrupt level when a PIO buffer is
1402  * available after ipath_verbs_send() returned an error that no buffers were
1403  * available.  Return 1 if we consumed all the PIO buffers and we still have
1404  * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
1405  * return zero).
1406  */
1407 int ipath_ib_piobufavail(struct ipath_ibdev *dev)
1408 {
1409         struct ipath_qp *qp;
1410         unsigned long flags;
1411
1412         if (dev == NULL)
1413                 goto bail;
1414
1415         spin_lock_irqsave(&dev->pending_lock, flags);
1416         while (!list_empty(&dev->piowait)) {
1417                 qp = list_entry(dev->piowait.next, struct ipath_qp,
1418                                 piowait);
1419                 list_del_init(&qp->piowait);
1420                 clear_bit(IPATH_S_BUSY, &qp->s_busy);
1421                 tasklet_hi_schedule(&qp->s_task);
1422         }
1423         spin_unlock_irqrestore(&dev->pending_lock, flags);
1424
1425 bail:
1426         return 0;
1427 }
1428
1429 static int ipath_query_device(struct ib_device *ibdev,
1430                               struct ib_device_attr *props)
1431 {
1432         struct ipath_ibdev *dev = to_idev(ibdev);
1433
1434         memset(props, 0, sizeof(*props));
1435
1436         props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1437                 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1438                 IB_DEVICE_SYS_IMAGE_GUID;
1439         props->page_size_cap = PAGE_SIZE;
1440         props->vendor_id = dev->dd->ipath_vendorid;
1441         props->vendor_part_id = dev->dd->ipath_deviceid;
1442         props->hw_ver = dev->dd->ipath_pcirev;
1443
1444         props->sys_image_guid = dev->sys_image_guid;
1445
1446         props->max_mr_size = ~0ull;
1447         props->max_qp = ib_ipath_max_qps;
1448         props->max_qp_wr = ib_ipath_max_qp_wrs;
1449         props->max_sge = ib_ipath_max_sges;
1450         props->max_cq = ib_ipath_max_cqs;
1451         props->max_ah = ib_ipath_max_ahs;
1452         props->max_cqe = ib_ipath_max_cqes;
1453         props->max_mr = dev->lk_table.max;
1454         props->max_fmr = dev->lk_table.max;
1455         props->max_map_per_fmr = 32767;
1456         props->max_pd = ib_ipath_max_pds;
1457         props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
1458         props->max_qp_init_rd_atom = 255;
1459         /* props->max_res_rd_atom */
1460         props->max_srq = ib_ipath_max_srqs;
1461         props->max_srq_wr = ib_ipath_max_srq_wrs;
1462         props->max_srq_sge = ib_ipath_max_srq_sges;
1463         /* props->local_ca_ack_delay */
1464         props->atomic_cap = IB_ATOMIC_GLOB;
1465         props->max_pkeys = ipath_get_npkeys(dev->dd);
1466         props->max_mcast_grp = ib_ipath_max_mcast_grps;
1467         props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
1468         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1469                 props->max_mcast_grp;
1470
1471         return 0;
1472 }
1473
1474 const u8 ipath_cvt_physportstate[32] = {
1475         [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
1476         [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
1477         [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
1478         [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
1479         [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
1480         [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
1481         [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
1482                 IB_PHYSPORTSTATE_CFG_TRAIN,
1483         [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
1484                 IB_PHYSPORTSTATE_CFG_TRAIN,
1485         [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
1486                 IB_PHYSPORTSTATE_CFG_TRAIN,
1487         [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
1488         [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
1489                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1490         [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
1491                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1492         [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
1493                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1494         [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
1495         [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
1496         [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
1497         [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
1498         [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
1499         [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
1500         [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
1501         [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
1502 };
1503
1504 u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1505 {
1506         return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1507 }
1508
1509 static int ipath_query_port(struct ib_device *ibdev,
1510                             u8 port, struct ib_port_attr *props)
1511 {
1512         struct ipath_ibdev *dev = to_idev(ibdev);
1513         struct ipath_devdata *dd = dev->dd;
1514         enum ib_mtu mtu;
1515         u16 lid = dd->ipath_lid;
1516         u64 ibcstat;
1517
1518         memset(props, 0, sizeof(*props));
1519         props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1520         props->lmc = dd->ipath_lmc;
1521         props->sm_lid = dev->sm_lid;
1522         props->sm_sl = dev->sm_sl;
1523         ibcstat = dd->ipath_lastibcstat;
1524         /* map LinkState to IB portinfo values.  */
1525         props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
1526
1527         /* See phys_state_show() */
1528         props->phys_state = /* MEA: assumes shift == 0 */
1529                 ipath_cvt_physportstate[dd->ipath_lastibcstat &
1530                 dd->ibcs_lts_mask];
1531         props->port_cap_flags = dev->port_cap_flags;
1532         props->gid_tbl_len = 1;
1533         props->max_msg_sz = 0x80000000;
1534         props->pkey_tbl_len = ipath_get_npkeys(dd);
1535         props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1536                 dev->z_pkey_violations;
1537         props->qkey_viol_cntr = dev->qkey_violations;
1538         props->active_width = dd->ipath_link_width_active;
1539         /* See rate_show() */
1540         props->active_speed = dd->ipath_link_speed_active;
1541         props->max_vl_num = 1;          /* VLCap = VL0 */
1542         props->init_type_reply = 0;
1543
1544         props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
1545         switch (dd->ipath_ibmtu) {
1546         case 4096:
1547                 mtu = IB_MTU_4096;
1548                 break;
1549         case 2048:
1550                 mtu = IB_MTU_2048;
1551                 break;
1552         case 1024:
1553                 mtu = IB_MTU_1024;
1554                 break;
1555         case 512:
1556                 mtu = IB_MTU_512;
1557                 break;
1558         case 256:
1559                 mtu = IB_MTU_256;
1560                 break;
1561         default:
1562                 mtu = IB_MTU_2048;
1563         }
1564         props->active_mtu = mtu;
1565         props->subnet_timeout = dev->subnet_timeout;
1566
1567         return 0;
1568 }
1569
1570 static int ipath_modify_device(struct ib_device *device,
1571                                int device_modify_mask,
1572                                struct ib_device_modify *device_modify)
1573 {
1574         int ret;
1575
1576         if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1577                                    IB_DEVICE_MODIFY_NODE_DESC)) {
1578                 ret = -EOPNOTSUPP;
1579                 goto bail;
1580         }
1581
1582         if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
1583                 memcpy(device->node_desc, device_modify->node_desc, 64);
1584
1585         if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
1586                 to_idev(device)->sys_image_guid =
1587                         cpu_to_be64(device_modify->sys_image_guid);
1588
1589         ret = 0;
1590
1591 bail:
1592         return ret;
1593 }
1594
1595 static int ipath_modify_port(struct ib_device *ibdev,
1596                              u8 port, int port_modify_mask,
1597                              struct ib_port_modify *props)
1598 {
1599         struct ipath_ibdev *dev = to_idev(ibdev);
1600
1601         dev->port_cap_flags |= props->set_port_cap_mask;
1602         dev->port_cap_flags &= ~props->clr_port_cap_mask;
1603         if (port_modify_mask & IB_PORT_SHUTDOWN)
1604                 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
1605         if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1606                 dev->qkey_violations = 0;
1607         return 0;
1608 }
1609
1610 static int ipath_query_gid(struct ib_device *ibdev, u8 port,
1611                            int index, union ib_gid *gid)
1612 {
1613         struct ipath_ibdev *dev = to_idev(ibdev);
1614         int ret;
1615
1616         if (index >= 1) {
1617                 ret = -EINVAL;
1618                 goto bail;
1619         }
1620         gid->global.subnet_prefix = dev->gid_prefix;
1621         gid->global.interface_id = dev->dd->ipath_guid;
1622
1623         ret = 0;
1624
1625 bail:
1626         return ret;
1627 }
1628
1629 static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
1630                                     struct ib_ucontext *context,
1631                                     struct ib_udata *udata)
1632 {
1633         struct ipath_ibdev *dev = to_idev(ibdev);
1634         struct ipath_pd *pd;
1635         struct ib_pd *ret;
1636
1637         /*
1638          * This is actually totally arbitrary.  Some correctness tests
1639          * assume there's a maximum number of PDs that can be allocated.
1640          * We don't actually have this limit, but we fail the test if
1641          * we allow allocations of more than we report for this value.
1642          */
1643
1644         pd = kmalloc(sizeof *pd, GFP_KERNEL);
1645         if (!pd) {
1646                 ret = ERR_PTR(-ENOMEM);
1647                 goto bail;
1648         }
1649
1650         spin_lock(&dev->n_pds_lock);
1651         if (dev->n_pds_allocated == ib_ipath_max_pds) {
1652                 spin_unlock(&dev->n_pds_lock);
1653                 kfree(pd);
1654                 ret = ERR_PTR(-ENOMEM);
1655                 goto bail;
1656         }
1657
1658         dev->n_pds_allocated++;
1659         spin_unlock(&dev->n_pds_lock);
1660
1661         /* ib_alloc_pd() will initialize pd->ibpd. */
1662         pd->user = udata != NULL;
1663
1664         ret = &pd->ibpd;
1665
1666 bail:
1667         return ret;
1668 }
1669
1670 static int ipath_dealloc_pd(struct ib_pd *ibpd)
1671 {
1672         struct ipath_pd *pd = to_ipd(ibpd);
1673         struct ipath_ibdev *dev = to_idev(ibpd->device);
1674
1675         spin_lock(&dev->n_pds_lock);
1676         dev->n_pds_allocated--;
1677         spin_unlock(&dev->n_pds_lock);
1678
1679         kfree(pd);
1680
1681         return 0;
1682 }
1683
1684 /**
1685  * ipath_create_ah - create an address handle
1686  * @pd: the protection domain
1687  * @ah_attr: the attributes of the AH
1688  *
1689  * This may be called from interrupt context.
1690  */
1691 static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1692                                      struct ib_ah_attr *ah_attr)
1693 {
1694         struct ipath_ah *ah;
1695         struct ib_ah *ret;
1696         struct ipath_ibdev *dev = to_idev(pd->device);
1697         unsigned long flags;
1698
1699         /* A multicast address requires a GRH (see ch. 8.4.1). */
1700         if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
1701             ah_attr->dlid != IPATH_PERMISSIVE_LID &&
1702             !(ah_attr->ah_flags & IB_AH_GRH)) {
1703                 ret = ERR_PTR(-EINVAL);
1704                 goto bail;
1705         }
1706
1707         if (ah_attr->dlid == 0) {
1708                 ret = ERR_PTR(-EINVAL);
1709                 goto bail;
1710         }
1711
1712         if (ah_attr->port_num < 1 ||
1713             ah_attr->port_num > pd->device->phys_port_cnt) {
1714                 ret = ERR_PTR(-EINVAL);
1715                 goto bail;
1716         }
1717
1718         ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1719         if (!ah) {
1720                 ret = ERR_PTR(-ENOMEM);
1721                 goto bail;
1722         }
1723
1724         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1725         if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1726                 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1727                 kfree(ah);
1728                 ret = ERR_PTR(-ENOMEM);
1729                 goto bail;
1730         }
1731
1732         dev->n_ahs_allocated++;
1733         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1734
1735         /* ib_create_ah() will initialize ah->ibah. */
1736         ah->attr = *ah_attr;
1737         ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
1738
1739         ret = &ah->ibah;
1740
1741 bail:
1742         return ret;
1743 }
1744
1745 /**
1746  * ipath_destroy_ah - destroy an address handle
1747  * @ibah: the AH to destroy
1748  *
1749  * This may be called from interrupt context.
1750  */
1751 static int ipath_destroy_ah(struct ib_ah *ibah)
1752 {
1753         struct ipath_ibdev *dev = to_idev(ibah->device);
1754         struct ipath_ah *ah = to_iah(ibah);
1755         unsigned long flags;
1756
1757         spin_lock_irqsave(&dev->n_ahs_lock, flags);
1758         dev->n_ahs_allocated--;
1759         spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1760
1761         kfree(ah);
1762
1763         return 0;
1764 }
1765
1766 static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1767 {
1768         struct ipath_ah *ah = to_iah(ibah);
1769
1770         *ah_attr = ah->attr;
1771         ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
1772
1773         return 0;
1774 }
1775
1776 /**
1777  * ipath_get_npkeys - return the size of the PKEY table for port 0
1778  * @dd: the infinipath device
1779  */
1780 unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1781 {
1782         return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1783 }
1784
1785 /**
1786  * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1787  * @dd: the infinipath device
1788  * @index: the PKEY index
1789  */
1790 unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1791 {
1792         unsigned ret;
1793
1794         if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1795                 ret = 0;
1796         else
1797                 ret = dd->ipath_pd[0]->port_pkeys[index];
1798
1799         return ret;
1800 }
1801
1802 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1803                             u16 *pkey)
1804 {
1805         struct ipath_ibdev *dev = to_idev(ibdev);
1806         int ret;
1807
1808         if (index >= ipath_get_npkeys(dev->dd)) {
1809                 ret = -EINVAL;
1810                 goto bail;
1811         }
1812
1813         *pkey = ipath_get_pkey(dev->dd, index);
1814         ret = 0;
1815
1816 bail:
1817         return ret;
1818 }
1819
1820 /**
1821  * ipath_alloc_ucontext - allocate a ucontest
1822  * @ibdev: the infiniband device
1823  * @udata: not used by the InfiniPath driver
1824  */
1825
1826 static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
1827                                                 struct ib_udata *udata)
1828 {
1829         struct ipath_ucontext *context;
1830         struct ib_ucontext *ret;
1831
1832         context = kmalloc(sizeof *context, GFP_KERNEL);
1833         if (!context) {
1834                 ret = ERR_PTR(-ENOMEM);
1835                 goto bail;
1836         }
1837
1838         ret = &context->ibucontext;
1839
1840 bail:
1841         return ret;
1842 }
1843
1844 static int ipath_dealloc_ucontext(struct ib_ucontext *context)
1845 {
1846         kfree(to_iucontext(context));
1847         return 0;
1848 }
1849
1850 static int ipath_verbs_register_sysfs(struct ib_device *dev);
1851
1852 static void __verbs_timer(unsigned long arg)
1853 {
1854         struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1855
1856         /* Handle verbs layer timeouts. */
1857         ipath_ib_timer(dd->verbs_dev);
1858
1859         mod_timer(&dd->verbs_timer, jiffies + 1);
1860 }
1861
1862 static int enable_timer(struct ipath_devdata *dd)
1863 {
1864         /*
1865          * Early chips had a design flaw where the chip and kernel idea
1866          * of the tail register don't always agree, and therefore we won't
1867          * get an interrupt on the next packet received.
1868          * If the board supports per packet receive interrupts, use it.
1869          * Otherwise, the timer function periodically checks for packets
1870          * to cover this case.
1871          * Either way, the timer is needed for verbs layer related
1872          * processing.
1873          */
1874         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1875                 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1876                                  0x2074076542310ULL);
1877                 /* Enable GPIO bit 2 interrupt */
1878                 dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
1879                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1880                                  dd->ipath_gpio_mask);
1881         }
1882
1883         init_timer(&dd->verbs_timer);
1884         dd->verbs_timer.function = __verbs_timer;
1885         dd->verbs_timer.data = (unsigned long)dd;
1886         dd->verbs_timer.expires = jiffies + 1;
1887         add_timer(&dd->verbs_timer);
1888
1889         return 0;
1890 }
1891
1892 static int disable_timer(struct ipath_devdata *dd)
1893 {
1894         /* Disable GPIO bit 2 interrupt */
1895         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1896                 /* Disable GPIO bit 2 interrupt */
1897                 dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
1898                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1899                                  dd->ipath_gpio_mask);
1900                 /*
1901                  * We might want to undo changes to debugportselect,
1902                  * but how?
1903                  */
1904         }
1905
1906         del_timer_sync(&dd->verbs_timer);
1907
1908         return 0;
1909 }
1910
1911 /**
1912  * ipath_register_ib_device - register our device with the infiniband core
1913  * @dd: the device data structure
1914  * Return the allocated ipath_ibdev pointer or NULL on error.
1915  */
1916 int ipath_register_ib_device(struct ipath_devdata *dd)
1917 {
1918         struct ipath_verbs_counters cntrs;
1919         struct ipath_ibdev *idev;
1920         struct ib_device *dev;
1921         struct ipath_verbs_txreq *tx;
1922         unsigned i;
1923         int ret;
1924
1925         idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
1926         if (idev == NULL) {
1927                 ret = -ENOMEM;
1928                 goto bail;
1929         }
1930
1931         dev = &idev->ibdev;
1932
1933         if (dd->ipath_sdma_descq_cnt) {
1934                 tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
1935                              GFP_KERNEL);
1936                 if (tx == NULL) {
1937                         ret = -ENOMEM;
1938                         goto err_tx;
1939                 }
1940         } else
1941                 tx = NULL;
1942         idev->txreq_bufs = tx;
1943
1944         /* Only need to initialize non-zero fields. */
1945         spin_lock_init(&idev->n_pds_lock);
1946         spin_lock_init(&idev->n_ahs_lock);
1947         spin_lock_init(&idev->n_cqs_lock);
1948         spin_lock_init(&idev->n_qps_lock);
1949         spin_lock_init(&idev->n_srqs_lock);
1950         spin_lock_init(&idev->n_mcast_grps_lock);
1951
1952         spin_lock_init(&idev->qp_table.lock);
1953         spin_lock_init(&idev->lk_table.lock);
1954         idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1955         /* Set the prefix to the default value (see ch. 4.1.1) */
1956         idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
1957
1958         ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
1959         if (ret)
1960                 goto err_qp;
1961
1962         /*
1963          * The top ib_ipath_lkey_table_size bits are used to index the
1964          * table.  The lower 8 bits can be owned by the user (copied from
1965          * the LKEY).  The remaining bits act as a generation number or tag.
1966          */
1967         idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
1968         idev->lk_table.table = kzalloc(idev->lk_table.max *
1969                                        sizeof(*idev->lk_table.table),
1970                                        GFP_KERNEL);
1971         if (idev->lk_table.table == NULL) {
1972                 ret = -ENOMEM;
1973                 goto err_lk;
1974         }
1975         INIT_LIST_HEAD(&idev->pending_mmaps);
1976         spin_lock_init(&idev->pending_lock);
1977         idev->mmap_offset = PAGE_SIZE;
1978         spin_lock_init(&idev->mmap_offset_lock);
1979         INIT_LIST_HEAD(&idev->pending[0]);
1980         INIT_LIST_HEAD(&idev->pending[1]);
1981         INIT_LIST_HEAD(&idev->pending[2]);
1982         INIT_LIST_HEAD(&idev->piowait);
1983         INIT_LIST_HEAD(&idev->rnrwait);
1984         INIT_LIST_HEAD(&idev->txreq_free);
1985         idev->pending_index = 0;
1986         idev->port_cap_flags =
1987                 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1988         if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
1989                 idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1990         idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1991         idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1992         idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1993         idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1994         idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1995
1996         /* Snapshot current HW counters to "clear" them. */
1997         ipath_get_counters(dd, &cntrs);
1998         idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1999         idev->z_link_error_recovery_counter =
2000                 cntrs.link_error_recovery_counter;
2001         idev->z_link_downed_counter = cntrs.link_downed_counter;
2002         idev->z_port_rcv_errors = cntrs.port_rcv_errors;
2003         idev->z_port_rcv_remphys_errors =
2004                 cntrs.port_rcv_remphys_errors;
2005         idev->z_port_xmit_discards = cntrs.port_xmit_discards;
2006         idev->z_port_xmit_data = cntrs.port_xmit_data;
2007         idev->z_port_rcv_data = cntrs.port_rcv_data;
2008         idev->z_port_xmit_packets = cntrs.port_xmit_packets;
2009         idev->z_port_rcv_packets = cntrs.port_rcv_packets;
2010         idev->z_local_link_integrity_errors =
2011                 cntrs.local_link_integrity_errors;
2012         idev->z_excessive_buffer_overrun_errors =
2013                 cntrs.excessive_buffer_overrun_errors;
2014         idev->z_vl15_dropped = cntrs.vl15_dropped;
2015
2016         for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
2017                 list_add(&tx->txreq.list, &idev->txreq_free);
2018
2019         /*
2020          * The system image GUID is supposed to be the same for all
2021          * IB HCAs in a single system but since there can be other
2022          * device types in the system, we can't be sure this is unique.
2023          */
2024         if (!sys_image_guid)
2025                 sys_image_guid = dd->ipath_guid;
2026         idev->sys_image_guid = sys_image_guid;
2027         idev->ib_unit = dd->ipath_unit;
2028         idev->dd = dd;
2029
2030         strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
2031         dev->owner = THIS_MODULE;
2032         dev->node_guid = dd->ipath_guid;
2033         dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
2034         dev->uverbs_cmd_mask =
2035                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2036                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2037                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2038                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2039                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2040                 (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
2041                 (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
2042                 (1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
2043                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
2044                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2045                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2046                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2047                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2048                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2049                 (1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
2050                 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
2051                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2052                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2053                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2054                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2055                 (1ull << IB_USER_VERBS_CMD_POST_SEND)           |
2056                 (1ull << IB_USER_VERBS_CMD_POST_RECV)           |
2057                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2058                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2059                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2060                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2061                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2062                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2063                 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2064         dev->node_type = RDMA_NODE_IB_CA;
2065         dev->phys_port_cnt = 1;
2066         dev->num_comp_vectors = 1;
2067         dev->dma_device = &dd->pcidev->dev;
2068         dev->query_device = ipath_query_device;
2069         dev->modify_device = ipath_modify_device;
2070         dev->query_port = ipath_query_port;
2071         dev->modify_port = ipath_modify_port;
2072         dev->query_pkey = ipath_query_pkey;
2073         dev->query_gid = ipath_query_gid;
2074         dev->alloc_ucontext = ipath_alloc_ucontext;
2075         dev->dealloc_ucontext = ipath_dealloc_ucontext;
2076         dev->alloc_pd = ipath_alloc_pd;
2077         dev->dealloc_pd = ipath_dealloc_pd;
2078         dev->create_ah = ipath_create_ah;
2079         dev->destroy_ah = ipath_destroy_ah;
2080         dev->query_ah = ipath_query_ah;
2081         dev->create_srq = ipath_create_srq;
2082         dev->modify_srq = ipath_modify_srq;
2083         dev->query_srq = ipath_query_srq;
2084         dev->destroy_srq = ipath_destroy_srq;
2085         dev->create_qp = ipath_create_qp;
2086         dev->modify_qp = ipath_modify_qp;
2087         dev->query_qp = ipath_query_qp;
2088         dev->destroy_qp = ipath_destroy_qp;
2089         dev->post_send = ipath_post_send;
2090         dev->post_recv = ipath_post_receive;
2091         dev->post_srq_recv = ipath_post_srq_receive;
2092         dev->create_cq = ipath_create_cq;
2093         dev->destroy_cq = ipath_destroy_cq;
2094         dev->resize_cq = ipath_resize_cq;
2095         dev->poll_cq = ipath_poll_cq;
2096         dev->req_notify_cq = ipath_req_notify_cq;
2097         dev->get_dma_mr = ipath_get_dma_mr;
2098         dev->reg_phys_mr = ipath_reg_phys_mr;
2099         dev->reg_user_mr = ipath_reg_user_mr;
2100         dev->dereg_mr = ipath_dereg_mr;
2101         dev->alloc_fmr = ipath_alloc_fmr;
2102         dev->map_phys_fmr = ipath_map_phys_fmr;
2103         dev->unmap_fmr = ipath_unmap_fmr;
2104         dev->dealloc_fmr = ipath_dealloc_fmr;
2105         dev->attach_mcast = ipath_multicast_attach;
2106         dev->detach_mcast = ipath_multicast_detach;
2107         dev->process_mad = ipath_process_mad;
2108         dev->mmap = ipath_mmap;
2109         dev->dma_ops = &ipath_dma_mapping_ops;
2110
2111         snprintf(dev->node_desc, sizeof(dev->node_desc),
2112                  IPATH_IDSTR " %s", init_utsname()->nodename);
2113
2114         ret = ib_register_device(dev);
2115         if (ret)
2116                 goto err_reg;
2117
2118         if (ipath_verbs_register_sysfs(dev))
2119                 goto err_class;
2120
2121         enable_timer(dd);
2122
2123         goto bail;
2124
2125 err_class:
2126         ib_unregister_device(dev);
2127 err_reg:
2128         kfree(idev->lk_table.table);
2129 err_lk:
2130         kfree(idev->qp_table.table);
2131 err_qp:
2132         kfree(idev->txreq_bufs);
2133 err_tx:
2134         ib_dealloc_device(dev);
2135         ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2136         idev = NULL;
2137
2138 bail:
2139         dd->verbs_dev = idev;
2140         return ret;
2141 }
2142
2143 void ipath_unregister_ib_device(struct ipath_ibdev *dev)
2144 {
2145         struct ib_device *ibdev = &dev->ibdev;
2146
2147         disable_timer(dev->dd);
2148
2149         ib_unregister_device(ibdev);
2150
2151         if (!list_empty(&dev->pending[0]) ||
2152             !list_empty(&dev->pending[1]) ||
2153             !list_empty(&dev->pending[2]))
2154                 ipath_dev_err(dev->dd, "pending list not empty!\n");
2155         if (!list_empty(&dev->piowait))
2156                 ipath_dev_err(dev->dd, "piowait list not empty!\n");
2157         if (!list_empty(&dev->rnrwait))
2158                 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
2159         if (!ipath_mcast_tree_empty())
2160                 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
2161         /*
2162          * Note that ipath_unregister_ib_device() can be called before all
2163          * the QPs are destroyed!
2164          */
2165         ipath_free_all_qps(&dev->qp_table);
2166         kfree(dev->qp_table.table);
2167         kfree(dev->lk_table.table);
2168         kfree(dev->txreq_bufs);
2169         ib_dealloc_device(ibdev);
2170 }
2171
2172 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2173                         char *buf)
2174 {
2175         struct ipath_ibdev *dev =
2176                 container_of(device, struct ipath_ibdev, ibdev.dev);
2177
2178         return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
2179 }
2180
2181 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2182                         char *buf)
2183 {
2184         struct ipath_ibdev *dev =
2185                 container_of(device, struct ipath_ibdev, ibdev.dev);
2186         int ret;
2187
2188         ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
2189         if (ret < 0)
2190                 goto bail;
2191         strcat(buf, "\n");
2192         ret = strlen(buf);
2193
2194 bail:
2195         return ret;
2196 }
2197
2198 static ssize_t show_stats(struct device *device, struct device_attribute *attr,
2199                           char *buf)
2200 {
2201         struct ipath_ibdev *dev =
2202                 container_of(device, struct ipath_ibdev, ibdev.dev);
2203         int i;
2204         int len;
2205
2206         len = sprintf(buf,
2207                       "RC resends  %d\n"
2208                       "RC no QACK  %d\n"
2209                       "RC ACKs     %d\n"
2210                       "RC SEQ NAKs %d\n"
2211                       "RC RDMA seq %d\n"
2212                       "RC RNR NAKs %d\n"
2213                       "RC OTH NAKs %d\n"
2214                       "RC timeouts %d\n"
2215                       "RC RDMA dup %d\n"
2216                       "RC stalls   %d\n"
2217                       "piobuf wait %d\n"
2218                       "no piobuf   %d\n"
2219                       "unaligned   %d\n"
2220                       "PKT drops   %d\n"
2221                       "WQE errs    %d\n",
2222                       dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
2223                       dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
2224                       dev->n_other_naks, dev->n_timeouts,
2225                       dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
2226                       dev->n_no_piobuf, dev->n_unaligned,
2227                       dev->n_pkt_drops, dev->n_wqe_errs);
2228         for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
2229                 const struct ipath_opcode_stats *si = &dev->opstats[i];
2230
2231                 if (!si->n_packets && !si->n_bytes)
2232                         continue;
2233                 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
2234                                (unsigned long long) si->n_packets,
2235                                (unsigned long long) si->n_bytes);
2236         }
2237         return len;
2238 }
2239
2240 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
2241 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2242 static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
2243 static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
2244
2245 static struct device_attribute *ipath_class_attributes[] = {
2246         &dev_attr_hw_rev,
2247         &dev_attr_hca_type,
2248         &dev_attr_board_id,
2249         &dev_attr_stats
2250 };
2251
2252 static int ipath_verbs_register_sysfs(struct ib_device *dev)
2253 {
2254         int i;
2255         int ret;
2256
2257         for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
2258                 if (device_create_file(&dev->dev,
2259                                        ipath_class_attributes[i])) {
2260                         ret = 1;
2261                         goto bail;
2262                 }
2263
2264         ret = 0;
2265
2266 bail:
2267         return ret;
2268 }