2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
37 #include <linux/utsname.h>
39 #include "ipath_kernel.h"
40 #include "ipath_verbs.h"
41 #include "ipath_common.h"
43 static unsigned int ib_ipath_qp_table_size = 251;
44 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
45 MODULE_PARM_DESC(qp_table_size, "QP table size");
47 unsigned int ib_ipath_lkey_table_size = 12;
48 module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
50 MODULE_PARM_DESC(lkey_table_size,
51 "LKEY table size in bits (2^n, 1 <= n <= 23)");
53 static unsigned int ib_ipath_max_pds = 0xFFFF;
54 module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
55 MODULE_PARM_DESC(max_pds,
56 "Maximum number of protection domains to support");
58 static unsigned int ib_ipath_max_ahs = 0xFFFF;
59 module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
60 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
62 unsigned int ib_ipath_max_cqes = 0x2FFFF;
63 module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
64 MODULE_PARM_DESC(max_cqes,
65 "Maximum number of completion queue entries to support");
67 unsigned int ib_ipath_max_cqs = 0x1FFFF;
68 module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
69 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
71 unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
72 module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
74 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
76 unsigned int ib_ipath_max_qps = 16384;
77 module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
78 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
80 unsigned int ib_ipath_max_sges = 0x60;
81 module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
82 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
84 unsigned int ib_ipath_max_mcast_grps = 16384;
85 module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
87 MODULE_PARM_DESC(max_mcast_grps,
88 "Maximum number of multicast groups to support");
90 unsigned int ib_ipath_max_mcast_qp_attached = 16;
91 module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
92 uint, S_IWUSR | S_IRUGO);
93 MODULE_PARM_DESC(max_mcast_qp_attached,
94 "Maximum number of attached QPs to support");
96 unsigned int ib_ipath_max_srqs = 1024;
97 module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
100 unsigned int ib_ipath_max_srq_sges = 128;
101 module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
102 uint, S_IWUSR | S_IRUGO);
103 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
105 unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
106 module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
107 uint, S_IWUSR | S_IRUGO);
108 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
110 static unsigned int ib_ipath_disable_sma;
111 module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
112 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
114 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
116 [IB_QPS_INIT] = IPATH_POST_RECV_OK,
117 [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
118 [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
119 IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
120 [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
122 [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
126 struct ipath_ucontext {
127 struct ib_ucontext ibucontext;
130 static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
133 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
137 * Translate ib_wr_opcode into ib_wc_opcode.
139 const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
140 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
141 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
142 [IB_WR_SEND] = IB_WC_SEND,
143 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
144 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
145 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
146 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
152 static __be64 sys_image_guid;
155 * ipath_copy_sge - copy data to SGE memory
157 * @data: the data to copy
158 * @length: the length of the data
160 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
162 struct ipath_sge *sge = &ss->sge;
165 u32 len = sge->length;
169 if (len > sge->sge_length)
170 len = sge->sge_length;
172 memcpy(sge->vaddr, data, len);
175 sge->sge_length -= len;
176 if (sge->sge_length == 0) {
178 *sge = *ss->sg_list++;
179 } else if (sge->length == 0 && sge->mr != NULL) {
180 if (++sge->n >= IPATH_SEGSZ) {
181 if (++sge->m >= sge->mr->mapsz)
186 sge->mr->map[sge->m]->segs[sge->n].vaddr;
188 sge->mr->map[sge->m]->segs[sge->n].length;
196 * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
198 * @length: the number of bytes to skip
200 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
202 struct ipath_sge *sge = &ss->sge;
205 u32 len = sge->length;
209 if (len > sge->sge_length)
210 len = sge->sge_length;
214 sge->sge_length -= len;
215 if (sge->sge_length == 0) {
217 *sge = *ss->sg_list++;
218 } else if (sge->length == 0 && sge->mr != NULL) {
219 if (++sge->n >= IPATH_SEGSZ) {
220 if (++sge->m >= sge->mr->mapsz)
225 sge->mr->map[sge->m]->segs[sge->n].vaddr;
227 sge->mr->map[sge->m]->segs[sge->n].length;
233 static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
237 memset(&wc, 0, sizeof(wc));
238 wc.wr_id = wr->wr_id;
239 wc.status = IB_WC_WR_FLUSH_ERR;
240 wc.opcode = ib_ipath_wc_opcode[wr->opcode];
242 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
246 * Count the number of DMA descriptors needed to send length bytes of data.
247 * Don't modify the ipath_sge_state to get the count.
248 * Return zero if any of the segments is not aligned.
250 static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
252 struct ipath_sge *sg_list = ss->sg_list;
253 struct ipath_sge sge = ss->sge;
254 u8 num_sge = ss->num_sge;
255 u32 ndesc = 1; /* count the header */
258 u32 len = sge.length;
262 if (len > sge.sge_length)
263 len = sge.sge_length;
265 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266 (len != length && (len & (sizeof(u32) - 1)))) {
273 sge.sge_length -= len;
274 if (sge.sge_length == 0) {
277 } else if (sge.length == 0 && sge.mr != NULL) {
278 if (++sge.n >= IPATH_SEGSZ) {
279 if (++sge.m >= sge.mr->mapsz)
284 sge.mr->map[sge.m]->segs[sge.n].vaddr;
286 sge.mr->map[sge.m]->segs[sge.n].length;
294 * Copy from the SGEs to the data buffer.
296 static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
299 struct ipath_sge *sge = &ss->sge;
302 u32 len = sge->length;
306 if (len > sge->sge_length)
307 len = sge->sge_length;
309 memcpy(data, sge->vaddr, len);
312 sge->sge_length -= len;
313 if (sge->sge_length == 0) {
315 *sge = *ss->sg_list++;
316 } else if (sge->length == 0 && sge->mr != NULL) {
317 if (++sge->n >= IPATH_SEGSZ) {
318 if (++sge->m >= sge->mr->mapsz)
323 sge->mr->map[sge->m]->segs[sge->n].vaddr;
325 sge->mr->map[sge->m]->segs[sge->n].length;
333 * ipath_post_one_send - post one RC, UC, or UD send work request
334 * @qp: the QP to post on
335 * @wr: the work request to send
337 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
339 struct ipath_swqe *wqe;
347 spin_lock_irqsave(&qp->s_lock, flags);
349 /* Check that state is OK to post send. */
350 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) {
351 if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR)
353 /* C10-96 says generate a flushed completion entry. */
354 ipath_flush_wqe(qp, wr);
359 /* IB spec says that num_sge == 0 is OK. */
360 if (wr->num_sge > qp->s_max_sge)
364 * Don't allow RDMA reads or atomic operations on UC or
365 * undefined operations.
366 * Make sure buffer is large enough to hold the result for atomics.
368 if (qp->ibqp.qp_type == IB_QPT_UC) {
369 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
371 } else if (qp->ibqp.qp_type == IB_QPT_UD) {
372 /* Check UD opcode */
373 if (wr->opcode != IB_WR_SEND &&
374 wr->opcode != IB_WR_SEND_WITH_IMM)
376 /* Check UD destination address PD */
377 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
381 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
383 wr->sg_list[0].length < sizeof(u64) ||
384 wr->sg_list[0].addr & (sizeof(u64) - 1)))
386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
389 next = qp->s_head + 1;
390 if (next >= qp->s_size)
392 if (next == qp->s_last) {
397 wqe = get_swqe_ptr(qp, qp->s_head);
401 acc = wr->opcode >= IB_WR_RDMA_READ ?
402 IB_ACCESS_LOCAL_WRITE : 0;
403 for (i = 0, j = 0; i < wr->num_sge; i++) {
404 u32 length = wr->sg_list[i].length;
409 ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
410 &wr->sg_list[i], acc);
413 wqe->length += length;
418 if (qp->ibqp.qp_type == IB_QPT_UC ||
419 qp->ibqp.qp_type == IB_QPT_RC) {
420 if (wqe->length > 0x80000000U)
422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
424 wqe->ssn = qp->s_ssn++;
433 spin_unlock_irqrestore(&qp->s_lock, flags);
438 * ipath_post_send - post a send on a QP
439 * @ibqp: the QP to post the send on
440 * @wr: the list of work requests to post
441 * @bad_wr: the first bad WR is put here
443 * This may be called from interrupt context.
445 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
446 struct ib_send_wr **bad_wr)
448 struct ipath_qp *qp = to_iqp(ibqp);
451 for (; wr; wr = wr->next) {
452 err = ipath_post_one_send(qp, wr);
459 /* Try to do the send work in the caller's context. */
460 ipath_do_send((unsigned long) qp);
467 * ipath_post_receive - post a receive on a QP
468 * @ibqp: the QP to post the receive on
469 * @wr: the WR to post
470 * @bad_wr: the first bad WR is put here
472 * This may be called from interrupt context.
474 static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
475 struct ib_recv_wr **bad_wr)
477 struct ipath_qp *qp = to_iqp(ibqp);
478 struct ipath_rwq *wq = qp->r_rq.wq;
482 /* Check that state is OK to post receive. */
483 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
489 for (; wr; wr = wr->next) {
490 struct ipath_rwqe *wqe;
494 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
500 spin_lock_irqsave(&qp->r_rq.lock, flags);
502 if (next >= qp->r_rq.size)
504 if (next == wq->tail) {
505 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
511 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
512 wqe->wr_id = wr->wr_id;
513 wqe->num_sge = wr->num_sge;
514 for (i = 0; i < wr->num_sge; i++)
515 wqe->sg_list[i] = wr->sg_list[i];
516 /* Make sure queue entry is written before the head index. */
519 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
528 * ipath_qp_rcv - processing an incoming packet on a QP
529 * @dev: the device the packet came on
530 * @hdr: the packet header
531 * @has_grh: true if the packet has a GRH
532 * @data: the packet data
533 * @tlen: the packet length
534 * @qp: the QP the packet came on
536 * This is called from ipath_ib_rcv() to process an incoming packet
538 * Called at interrupt level.
540 static void ipath_qp_rcv(struct ipath_ibdev *dev,
541 struct ipath_ib_header *hdr, int has_grh,
542 void *data, u32 tlen, struct ipath_qp *qp)
544 /* Check for valid receive state. */
545 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
550 switch (qp->ibqp.qp_type) {
553 if (ib_ipath_disable_sma)
557 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
561 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
565 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
574 * ipath_ib_rcv - process an incoming packet
575 * @arg: the device pointer
576 * @rhdr: the header of the packet
577 * @data: the packet data
578 * @tlen: the packet length
580 * This is called from ipath_kreceive() to process an incoming packet at
581 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
583 void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
586 struct ipath_ib_header *hdr = rhdr;
587 struct ipath_other_headers *ohdr;
594 if (unlikely(dev == NULL))
597 if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
602 /* Check for a valid destination LID (see ch. 7.11.1). */
603 lid = be16_to_cpu(hdr->lrh[1]);
604 if (lid < IPATH_MULTICAST_LID_BASE) {
605 lid &= ~((1 << dev->dd->ipath_lmc) - 1);
606 if (unlikely(lid != dev->dd->ipath_lid)) {
613 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
614 if (lnh == IPATH_LRH_BTH)
616 else if (lnh == IPATH_LRH_GRH)
617 ohdr = &hdr->u.l.oth;
623 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
624 dev->opstats[opcode].n_bytes += tlen;
625 dev->opstats[opcode].n_packets++;
627 /* Get the destination QP number. */
628 qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
629 if (qp_num == IPATH_MULTICAST_QPN) {
630 struct ipath_mcast *mcast;
631 struct ipath_mcast_qp *p;
633 if (lnh != IPATH_LRH_GRH) {
637 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
642 dev->n_multicast_rcv++;
643 list_for_each_entry_rcu(p, &mcast->qp_list, list)
644 ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
646 * Notify ipath_multicast_detach() if it is waiting for us
649 if (atomic_dec_return(&mcast->refcount) <= 1)
650 wake_up(&mcast->wait);
652 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
654 dev->n_unicast_rcv++;
655 ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
658 * Notify ipath_destroy_qp() if it is waiting
661 if (atomic_dec_and_test(&qp->refcount))
671 * ipath_ib_timer - verbs timer
672 * @arg: the device pointer
674 * This is called from ipath_do_rcv_timer() at interrupt level to check for
675 * QPs which need retransmits and to collect performance numbers.
677 static void ipath_ib_timer(struct ipath_ibdev *dev)
679 struct ipath_qp *resend = NULL;
680 struct list_head *last;
687 spin_lock_irqsave(&dev->pending_lock, flags);
688 /* Start filling the next pending queue. */
689 if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
690 dev->pending_index = 0;
691 /* Save any requests still in the new queue, they have timed out. */
692 last = &dev->pending[dev->pending_index];
693 while (!list_empty(last)) {
694 qp = list_entry(last->next, struct ipath_qp, timerwait);
695 list_del_init(&qp->timerwait);
696 qp->timer_next = resend;
698 atomic_inc(&qp->refcount);
700 last = &dev->rnrwait;
701 if (!list_empty(last)) {
702 qp = list_entry(last->next, struct ipath_qp, timerwait);
703 if (--qp->s_rnr_timeout == 0) {
705 list_del_init(&qp->timerwait);
706 tasklet_hi_schedule(&qp->s_task);
707 if (list_empty(last))
709 qp = list_entry(last->next, struct ipath_qp,
711 } while (qp->s_rnr_timeout == 0);
715 * We should only be in the started state if pma_sample_start != 0
717 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
718 --dev->pma_sample_start == 0) {
719 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
720 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
724 &dev->ipath_xmit_wait);
726 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
727 if (dev->pma_sample_interval == 0) {
728 u64 ta, tb, tc, td, te;
730 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
731 ipath_snapshot_counters(dev->dd, &ta, &tb,
734 dev->ipath_sword = ta - dev->ipath_sword;
735 dev->ipath_rword = tb - dev->ipath_rword;
736 dev->ipath_spkts = tc - dev->ipath_spkts;
737 dev->ipath_rpkts = td - dev->ipath_rpkts;
738 dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
741 dev->pma_sample_interval--;
743 spin_unlock_irqrestore(&dev->pending_lock, flags);
745 /* XXX What if timer fires again while this is running? */
746 for (qp = resend; qp != NULL; qp = qp->timer_next) {
747 spin_lock_irqsave(&qp->s_lock, flags);
748 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
750 ipath_restart_rc(qp, qp->s_last_psn + 1);
752 spin_unlock_irqrestore(&qp->s_lock, flags);
754 /* Notify ipath_destroy_qp() if it is waiting. */
755 if (atomic_dec_and_test(&qp->refcount))
760 static void update_sge(struct ipath_sge_state *ss, u32 length)
762 struct ipath_sge *sge = &ss->sge;
764 sge->vaddr += length;
765 sge->length -= length;
766 sge->sge_length -= length;
767 if (sge->sge_length == 0) {
769 *sge = *ss->sg_list++;
770 } else if (sge->length == 0 && sge->mr != NULL) {
771 if (++sge->n >= IPATH_SEGSZ) {
772 if (++sge->m >= sge->mr->mapsz)
776 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
777 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
781 #ifdef __LITTLE_ENDIAN
782 static inline u32 get_upper_bits(u32 data, u32 shift)
784 return data >> shift;
787 static inline u32 set_upper_bits(u32 data, u32 shift)
789 return data << shift;
792 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
794 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
795 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
799 static inline u32 get_upper_bits(u32 data, u32 shift)
801 return data << shift;
804 static inline u32 set_upper_bits(u32 data, u32 shift)
806 return data >> shift;
809 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
811 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
812 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
817 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
818 u32 length, unsigned flush_wc)
825 u32 len = ss->sge.length;
830 if (len > ss->sge.sge_length)
831 len = ss->sge.sge_length;
833 /* If the source address is not aligned, try to align it. */
834 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
836 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
838 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
841 y = sizeof(u32) - off;
844 if (len + extra >= sizeof(u32)) {
845 data |= set_upper_bits(v, extra *
847 len = sizeof(u32) - extra;
852 __raw_writel(data, piobuf);
857 /* Clear unused upper bytes */
858 data |= clear_upper_bytes(v, len, extra);
866 /* Source address is aligned. */
867 u32 *addr = (u32 *) ss->sge.vaddr;
868 int shift = extra * BITS_PER_BYTE;
869 int ushift = 32 - shift;
872 while (l >= sizeof(u32)) {
875 data |= set_upper_bits(v, shift);
876 __raw_writel(data, piobuf);
877 data = get_upper_bits(v, ushift);
883 * We still have 'extra' number of bytes leftover.
888 if (l + extra >= sizeof(u32)) {
889 data |= set_upper_bits(v, shift);
890 len -= l + extra - sizeof(u32);
895 __raw_writel(data, piobuf);
900 /* Clear unused upper bytes */
901 data |= clear_upper_bytes(v, l,
909 } else if (len == length) {
913 } else if (len == length) {
917 * Need to round up for the last dword in the
921 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
923 last = ((u32 *) ss->sge.vaddr)[w - 1];
928 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
931 extra = len & (sizeof(u32) - 1);
933 u32 v = ((u32 *) ss->sge.vaddr)[w];
935 /* Clear unused upper bytes */
936 data = clear_upper_bytes(v, extra, 0);
942 /* Update address before sending packet. */
943 update_sge(ss, length);
945 /* must flush early everything before trigger word */
947 __raw_writel(last, piobuf);
948 /* be sure trigger word is written */
951 __raw_writel(last, piobuf);
955 * Convert IB rate to delay multiplier.
957 unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
960 case IB_RATE_2_5_GBPS: return 8;
961 case IB_RATE_5_GBPS: return 4;
962 case IB_RATE_10_GBPS: return 2;
963 case IB_RATE_20_GBPS: return 1;
969 * Convert delay multiplier to IB rate
971 static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
974 case 8: return IB_RATE_2_5_GBPS;
975 case 4: return IB_RATE_5_GBPS;
976 case 2: return IB_RATE_10_GBPS;
977 case 1: return IB_RATE_20_GBPS;
978 default: return IB_RATE_PORT_CURRENT;
982 static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
984 struct ipath_verbs_txreq *tx = NULL;
987 spin_lock_irqsave(&dev->pending_lock, flags);
988 if (!list_empty(&dev->txreq_free)) {
989 struct list_head *l = dev->txreq_free.next;
992 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
994 spin_unlock_irqrestore(&dev->pending_lock, flags);
998 static inline void put_txreq(struct ipath_ibdev *dev,
999 struct ipath_verbs_txreq *tx)
1001 unsigned long flags;
1003 spin_lock_irqsave(&dev->pending_lock, flags);
1004 list_add(&tx->txreq.list, &dev->txreq_free);
1005 spin_unlock_irqrestore(&dev->pending_lock, flags);
1008 static void sdma_complete(void *cookie, int status)
1010 struct ipath_verbs_txreq *tx = cookie;
1011 struct ipath_qp *qp = tx->qp;
1012 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1014 /* Generate a completion queue entry if needed */
1015 if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
1016 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
1017 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
1019 ipath_send_complete(qp, tx->wqe, ibs);
1022 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
1023 kfree(tx->txreq.map_addr);
1026 if (atomic_dec_and_test(&qp->refcount))
1031 * Compute the number of clock cycles of delay before sending the next packet.
1032 * The multipliers reflect the number of clocks for the fastest rate so
1033 * one tick at 4xDDR is 8 ticks at 1xSDR.
1034 * If the destination port will take longer to receive a packet than
1035 * the outgoing link can send it, we need to delay sending the next packet
1036 * by the difference in time it takes the receiver to receive and the sender
1037 * to send this packet.
1038 * Note that this delay is always correct for UC and RC but not always
1039 * optimal for UD. For UD, the destination HCA can be different for each
1040 * packet, in which case, we could send packets to a different destination
1041 * while "waiting" for the delay. The overhead for doing this without
1042 * HW support is more than just paying the cost of delaying some packets
1045 static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
1047 return (rcv_mult > snd_mult) ?
1048 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
1051 static int ipath_verbs_send_dma(struct ipath_qp *qp,
1052 struct ipath_ib_header *hdr, u32 hdrwords,
1053 struct ipath_sge_state *ss, u32 len,
1054 u32 plen, u32 dwords)
1056 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1057 struct ipath_devdata *dd = dev->dd;
1058 struct ipath_verbs_txreq *tx;
1067 /* resend previously constructed packet */
1068 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
1074 tx = get_txreq(dev);
1081 * Get the saved delay count we computed for the previous packet
1082 * and save the delay count for this packet to be used next time
1085 control = qp->s_pkt_delay;
1086 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1089 atomic_inc(&qp->refcount);
1090 tx->wqe = qp->s_wqe;
1091 tx->txreq.callback = sdma_complete;
1092 tx->txreq.callback_cookie = tx;
1093 tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
1094 IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
1095 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1096 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
1098 /* VL15 packets bypass credit check */
1099 if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
1100 control |= 1ULL << 31;
1101 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
1106 * Don't try to DMA if it takes more descriptors than
1109 ndesc = ipath_count_sge(ss, len);
1110 if (ndesc >= dd->ipath_sdma_descq_cnt)
1115 tx->hdr.pbc[0] = cpu_to_le32(plen);
1116 tx->hdr.pbc[1] = cpu_to_le32(control);
1117 memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
1118 tx->txreq.sg_count = ndesc;
1119 tx->map_len = (hdrwords + 2) << 2;
1120 tx->txreq.map_addr = &tx->hdr;
1121 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
1123 /* save ss and length in dwords */
1131 /* Allocate a buffer and copy the header and payload to it. */
1132 tx->map_len = (plen + 1) << 2;
1133 piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
1134 if (unlikely(piobuf == NULL)) {
1138 tx->txreq.map_addr = piobuf;
1139 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
1140 tx->txreq.sg_count = 1;
1142 *piobuf++ = (__force u32) cpu_to_le32(plen);
1143 *piobuf++ = (__force u32) cpu_to_le32(control);
1144 memcpy(piobuf, hdr, hdrwords << 2);
1145 ipath_copy_from_sge(piobuf + hdrwords, ss, len);
1147 ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
1149 * If we couldn't queue the DMA request, save the info
1150 * and try again later rather than destroying the
1151 * buffer and undoing the side effects of the copy.
1162 if (atomic_dec_and_test(&qp->refcount))
1169 static int ipath_verbs_send_pio(struct ipath_qp *qp,
1170 struct ipath_ib_header *ibhdr, u32 hdrwords,
1171 struct ipath_sge_state *ss, u32 len,
1172 u32 plen, u32 dwords)
1174 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1175 u32 *hdr = (u32 *) ibhdr;
1176 u32 __iomem *piobuf;
1181 piobuf = ipath_getpiobuf(dd, plen, NULL);
1182 if (unlikely(piobuf == NULL)) {
1188 * Get the saved delay count we computed for the previous packet
1189 * and save the delay count for this packet to be used next time
1192 control = qp->s_pkt_delay;
1193 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1195 /* VL15 packets bypass credit check */
1196 if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
1197 control |= 1ULL << 31;
1200 * Write the length to the control qword plus any needed flags.
1201 * We have to flush after the PBC for correctness on some cpus
1202 * or WC buffer can be written out of order.
1204 writeq(((u64) control << 32) | plen, piobuf);
1207 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
1210 * If there is just the header portion, must flush before
1211 * writing last word of header for correctness, and after
1212 * the last header word (trigger word).
1216 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
1218 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1221 __iowrite32_copy(piobuf, hdr, hdrwords);
1227 __iowrite32_copy(piobuf, hdr, hdrwords);
1230 /* The common case is aligned and contained in one segment. */
1231 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1232 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1233 u32 *addr = (u32 *) ss->sge.vaddr;
1235 /* Update address before sending packet. */
1236 update_sge(ss, len);
1238 __iowrite32_copy(piobuf, addr, dwords - 1);
1239 /* must flush early everything before trigger word */
1241 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1242 /* be sure trigger word is written */
1245 __iowrite32_copy(piobuf, addr, dwords);
1248 copy_io(piobuf, ss, len, flush_wc);
1251 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1258 * ipath_verbs_send - send a packet
1259 * @qp: the QP to send on
1260 * @hdr: the packet header
1261 * @hdrwords: the number of 32-bit words in the header
1262 * @ss: the SGE to send
1263 * @len: the length of the packet in bytes
1265 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
1266 u32 hdrwords, struct ipath_sge_state *ss, u32 len)
1268 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1271 u32 dwords = (len + 3) >> 2;
1274 * Calculate the send buffer trigger address.
1275 * The +1 counts for the pbc control dword following the pbc length.
1277 plen = hdrwords + dwords + 1;
1280 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1281 * can defer SDMA restart until link goes ACTIVE without
1282 * worrying about just how we got there.
1284 if (qp->ibqp.qp_type == IB_QPT_SMI)
1285 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1287 /* All non-VL15 packets are dropped if link is not ACTIVE */
1288 else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
1290 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1292 } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1293 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1296 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1302 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
1303 u64 *rwords, u64 *spkts, u64 *rpkts,
1308 if (!(dd->ipath_flags & IPATH_INITTED)) {
1309 /* no hardware, freeze, etc. */
1313 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1314 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1315 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1316 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1317 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
1326 * ipath_get_counters - get various chip counters
1327 * @dd: the infinipath device
1328 * @cntrs: counters are placed here
1330 * Return the counters needed by recv_pma_get_portcounters().
1332 int ipath_get_counters(struct ipath_devdata *dd,
1333 struct ipath_verbs_counters *cntrs)
1335 struct ipath_cregs const *crp = dd->ipath_cregs;
1338 if (!(dd->ipath_flags & IPATH_INITTED)) {
1339 /* no hardware, freeze, etc. */
1343 cntrs->symbol_error_counter =
1344 ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
1345 cntrs->link_error_recovery_counter =
1346 ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
1348 * The link downed counter counts when the other side downs the
1349 * connection. We add in the number of times we downed the link
1350 * due to local link integrity errors to compensate.
1352 cntrs->link_downed_counter =
1353 ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
1354 cntrs->port_rcv_errors =
1355 ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
1356 ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
1357 ipath_snap_cntr(dd, crp->cr_portovflcnt) +
1358 ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
1359 ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
1360 ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
1361 ipath_snap_cntr(dd, crp->cr_erricrccnt) +
1362 ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
1363 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1364 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1365 dd->ipath_rxfc_unsupvl_errs;
1366 if (crp->cr_rxotherlocalphyerrcnt)
1367 cntrs->port_rcv_errors +=
1368 ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
1369 if (crp->cr_rxvlerrcnt)
1370 cntrs->port_rcv_errors +=
1371 ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
1372 cntrs->port_rcv_remphys_errors =
1373 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1374 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
1375 cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
1376 cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
1377 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1378 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1379 cntrs->local_link_integrity_errors =
1380 crp->cr_locallinkintegrityerrcnt ?
1381 ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
1382 ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1383 dd->ipath_lli_errs : dd->ipath_lli_errors);
1384 cntrs->excessive_buffer_overrun_errors =
1385 crp->cr_excessbufferovflcnt ?
1386 ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
1387 dd->ipath_overrun_thresh_errs;
1388 cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
1389 ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
1398 * ipath_ib_piobufavail - callback when a PIO buffer is available
1399 * @arg: the device pointer
1401 * This is called from ipath_intr() at interrupt level when a PIO buffer is
1402 * available after ipath_verbs_send() returned an error that no buffers were
1403 * available. Return 1 if we consumed all the PIO buffers and we still have
1404 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
1407 int ipath_ib_piobufavail(struct ipath_ibdev *dev)
1409 struct ipath_qp *qp;
1410 unsigned long flags;
1415 spin_lock_irqsave(&dev->pending_lock, flags);
1416 while (!list_empty(&dev->piowait)) {
1417 qp = list_entry(dev->piowait.next, struct ipath_qp,
1419 list_del_init(&qp->piowait);
1420 clear_bit(IPATH_S_BUSY, &qp->s_busy);
1421 tasklet_hi_schedule(&qp->s_task);
1423 spin_unlock_irqrestore(&dev->pending_lock, flags);
1429 static int ipath_query_device(struct ib_device *ibdev,
1430 struct ib_device_attr *props)
1432 struct ipath_ibdev *dev = to_idev(ibdev);
1434 memset(props, 0, sizeof(*props));
1436 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1437 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1438 IB_DEVICE_SYS_IMAGE_GUID;
1439 props->page_size_cap = PAGE_SIZE;
1440 props->vendor_id = dev->dd->ipath_vendorid;
1441 props->vendor_part_id = dev->dd->ipath_deviceid;
1442 props->hw_ver = dev->dd->ipath_pcirev;
1444 props->sys_image_guid = dev->sys_image_guid;
1446 props->max_mr_size = ~0ull;
1447 props->max_qp = ib_ipath_max_qps;
1448 props->max_qp_wr = ib_ipath_max_qp_wrs;
1449 props->max_sge = ib_ipath_max_sges;
1450 props->max_cq = ib_ipath_max_cqs;
1451 props->max_ah = ib_ipath_max_ahs;
1452 props->max_cqe = ib_ipath_max_cqes;
1453 props->max_mr = dev->lk_table.max;
1454 props->max_fmr = dev->lk_table.max;
1455 props->max_map_per_fmr = 32767;
1456 props->max_pd = ib_ipath_max_pds;
1457 props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
1458 props->max_qp_init_rd_atom = 255;
1459 /* props->max_res_rd_atom */
1460 props->max_srq = ib_ipath_max_srqs;
1461 props->max_srq_wr = ib_ipath_max_srq_wrs;
1462 props->max_srq_sge = ib_ipath_max_srq_sges;
1463 /* props->local_ca_ack_delay */
1464 props->atomic_cap = IB_ATOMIC_GLOB;
1465 props->max_pkeys = ipath_get_npkeys(dev->dd);
1466 props->max_mcast_grp = ib_ipath_max_mcast_grps;
1467 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
1468 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1469 props->max_mcast_grp;
1474 const u8 ipath_cvt_physportstate[32] = {
1475 [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
1476 [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
1477 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
1478 [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
1479 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
1480 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
1481 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
1482 IB_PHYSPORTSTATE_CFG_TRAIN,
1483 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
1484 IB_PHYSPORTSTATE_CFG_TRAIN,
1485 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
1486 IB_PHYSPORTSTATE_CFG_TRAIN,
1487 [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
1488 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
1489 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1490 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
1491 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1492 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
1493 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1494 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
1495 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
1496 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
1497 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
1498 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
1499 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
1500 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
1501 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
1504 u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1506 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1509 static int ipath_query_port(struct ib_device *ibdev,
1510 u8 port, struct ib_port_attr *props)
1512 struct ipath_ibdev *dev = to_idev(ibdev);
1513 struct ipath_devdata *dd = dev->dd;
1515 u16 lid = dd->ipath_lid;
1518 memset(props, 0, sizeof(*props));
1519 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1520 props->lmc = dd->ipath_lmc;
1521 props->sm_lid = dev->sm_lid;
1522 props->sm_sl = dev->sm_sl;
1523 ibcstat = dd->ipath_lastibcstat;
1524 /* map LinkState to IB portinfo values. */
1525 props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
1527 /* See phys_state_show() */
1528 props->phys_state = /* MEA: assumes shift == 0 */
1529 ipath_cvt_physportstate[dd->ipath_lastibcstat &
1531 props->port_cap_flags = dev->port_cap_flags;
1532 props->gid_tbl_len = 1;
1533 props->max_msg_sz = 0x80000000;
1534 props->pkey_tbl_len = ipath_get_npkeys(dd);
1535 props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1536 dev->z_pkey_violations;
1537 props->qkey_viol_cntr = dev->qkey_violations;
1538 props->active_width = dd->ipath_link_width_active;
1539 /* See rate_show() */
1540 props->active_speed = dd->ipath_link_speed_active;
1541 props->max_vl_num = 1; /* VLCap = VL0 */
1542 props->init_type_reply = 0;
1544 props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
1545 switch (dd->ipath_ibmtu) {
1564 props->active_mtu = mtu;
1565 props->subnet_timeout = dev->subnet_timeout;
1570 static int ipath_modify_device(struct ib_device *device,
1571 int device_modify_mask,
1572 struct ib_device_modify *device_modify)
1576 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1577 IB_DEVICE_MODIFY_NODE_DESC)) {
1582 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
1583 memcpy(device->node_desc, device_modify->node_desc, 64);
1585 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
1586 to_idev(device)->sys_image_guid =
1587 cpu_to_be64(device_modify->sys_image_guid);
1595 static int ipath_modify_port(struct ib_device *ibdev,
1596 u8 port, int port_modify_mask,
1597 struct ib_port_modify *props)
1599 struct ipath_ibdev *dev = to_idev(ibdev);
1601 dev->port_cap_flags |= props->set_port_cap_mask;
1602 dev->port_cap_flags &= ~props->clr_port_cap_mask;
1603 if (port_modify_mask & IB_PORT_SHUTDOWN)
1604 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
1605 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1606 dev->qkey_violations = 0;
1610 static int ipath_query_gid(struct ib_device *ibdev, u8 port,
1611 int index, union ib_gid *gid)
1613 struct ipath_ibdev *dev = to_idev(ibdev);
1620 gid->global.subnet_prefix = dev->gid_prefix;
1621 gid->global.interface_id = dev->dd->ipath_guid;
1629 static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
1630 struct ib_ucontext *context,
1631 struct ib_udata *udata)
1633 struct ipath_ibdev *dev = to_idev(ibdev);
1634 struct ipath_pd *pd;
1638 * This is actually totally arbitrary. Some correctness tests
1639 * assume there's a maximum number of PDs that can be allocated.
1640 * We don't actually have this limit, but we fail the test if
1641 * we allow allocations of more than we report for this value.
1644 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1646 ret = ERR_PTR(-ENOMEM);
1650 spin_lock(&dev->n_pds_lock);
1651 if (dev->n_pds_allocated == ib_ipath_max_pds) {
1652 spin_unlock(&dev->n_pds_lock);
1654 ret = ERR_PTR(-ENOMEM);
1658 dev->n_pds_allocated++;
1659 spin_unlock(&dev->n_pds_lock);
1661 /* ib_alloc_pd() will initialize pd->ibpd. */
1662 pd->user = udata != NULL;
1670 static int ipath_dealloc_pd(struct ib_pd *ibpd)
1672 struct ipath_pd *pd = to_ipd(ibpd);
1673 struct ipath_ibdev *dev = to_idev(ibpd->device);
1675 spin_lock(&dev->n_pds_lock);
1676 dev->n_pds_allocated--;
1677 spin_unlock(&dev->n_pds_lock);
1685 * ipath_create_ah - create an address handle
1686 * @pd: the protection domain
1687 * @ah_attr: the attributes of the AH
1689 * This may be called from interrupt context.
1691 static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1692 struct ib_ah_attr *ah_attr)
1694 struct ipath_ah *ah;
1696 struct ipath_ibdev *dev = to_idev(pd->device);
1697 unsigned long flags;
1699 /* A multicast address requires a GRH (see ch. 8.4.1). */
1700 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
1701 ah_attr->dlid != IPATH_PERMISSIVE_LID &&
1702 !(ah_attr->ah_flags & IB_AH_GRH)) {
1703 ret = ERR_PTR(-EINVAL);
1707 if (ah_attr->dlid == 0) {
1708 ret = ERR_PTR(-EINVAL);
1712 if (ah_attr->port_num < 1 ||
1713 ah_attr->port_num > pd->device->phys_port_cnt) {
1714 ret = ERR_PTR(-EINVAL);
1718 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1720 ret = ERR_PTR(-ENOMEM);
1724 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1725 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1726 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1728 ret = ERR_PTR(-ENOMEM);
1732 dev->n_ahs_allocated++;
1733 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1735 /* ib_create_ah() will initialize ah->ibah. */
1736 ah->attr = *ah_attr;
1737 ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
1746 * ipath_destroy_ah - destroy an address handle
1747 * @ibah: the AH to destroy
1749 * This may be called from interrupt context.
1751 static int ipath_destroy_ah(struct ib_ah *ibah)
1753 struct ipath_ibdev *dev = to_idev(ibah->device);
1754 struct ipath_ah *ah = to_iah(ibah);
1755 unsigned long flags;
1757 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1758 dev->n_ahs_allocated--;
1759 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1766 static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1768 struct ipath_ah *ah = to_iah(ibah);
1770 *ah_attr = ah->attr;
1771 ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
1777 * ipath_get_npkeys - return the size of the PKEY table for port 0
1778 * @dd: the infinipath device
1780 unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1782 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1786 * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1787 * @dd: the infinipath device
1788 * @index: the PKEY index
1790 unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1794 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1797 ret = dd->ipath_pd[0]->port_pkeys[index];
1802 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1805 struct ipath_ibdev *dev = to_idev(ibdev);
1808 if (index >= ipath_get_npkeys(dev->dd)) {
1813 *pkey = ipath_get_pkey(dev->dd, index);
1821 * ipath_alloc_ucontext - allocate a ucontest
1822 * @ibdev: the infiniband device
1823 * @udata: not used by the InfiniPath driver
1826 static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
1827 struct ib_udata *udata)
1829 struct ipath_ucontext *context;
1830 struct ib_ucontext *ret;
1832 context = kmalloc(sizeof *context, GFP_KERNEL);
1834 ret = ERR_PTR(-ENOMEM);
1838 ret = &context->ibucontext;
1844 static int ipath_dealloc_ucontext(struct ib_ucontext *context)
1846 kfree(to_iucontext(context));
1850 static int ipath_verbs_register_sysfs(struct ib_device *dev);
1852 static void __verbs_timer(unsigned long arg)
1854 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1856 /* Handle verbs layer timeouts. */
1857 ipath_ib_timer(dd->verbs_dev);
1859 mod_timer(&dd->verbs_timer, jiffies + 1);
1862 static int enable_timer(struct ipath_devdata *dd)
1865 * Early chips had a design flaw where the chip and kernel idea
1866 * of the tail register don't always agree, and therefore we won't
1867 * get an interrupt on the next packet received.
1868 * If the board supports per packet receive interrupts, use it.
1869 * Otherwise, the timer function periodically checks for packets
1870 * to cover this case.
1871 * Either way, the timer is needed for verbs layer related
1874 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1875 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1876 0x2074076542310ULL);
1877 /* Enable GPIO bit 2 interrupt */
1878 dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
1879 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1880 dd->ipath_gpio_mask);
1883 init_timer(&dd->verbs_timer);
1884 dd->verbs_timer.function = __verbs_timer;
1885 dd->verbs_timer.data = (unsigned long)dd;
1886 dd->verbs_timer.expires = jiffies + 1;
1887 add_timer(&dd->verbs_timer);
1892 static int disable_timer(struct ipath_devdata *dd)
1894 /* Disable GPIO bit 2 interrupt */
1895 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1896 /* Disable GPIO bit 2 interrupt */
1897 dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
1898 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1899 dd->ipath_gpio_mask);
1901 * We might want to undo changes to debugportselect,
1906 del_timer_sync(&dd->verbs_timer);
1912 * ipath_register_ib_device - register our device with the infiniband core
1913 * @dd: the device data structure
1914 * Return the allocated ipath_ibdev pointer or NULL on error.
1916 int ipath_register_ib_device(struct ipath_devdata *dd)
1918 struct ipath_verbs_counters cntrs;
1919 struct ipath_ibdev *idev;
1920 struct ib_device *dev;
1921 struct ipath_verbs_txreq *tx;
1925 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
1933 if (dd->ipath_sdma_descq_cnt) {
1934 tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
1942 idev->txreq_bufs = tx;
1944 /* Only need to initialize non-zero fields. */
1945 spin_lock_init(&idev->n_pds_lock);
1946 spin_lock_init(&idev->n_ahs_lock);
1947 spin_lock_init(&idev->n_cqs_lock);
1948 spin_lock_init(&idev->n_qps_lock);
1949 spin_lock_init(&idev->n_srqs_lock);
1950 spin_lock_init(&idev->n_mcast_grps_lock);
1952 spin_lock_init(&idev->qp_table.lock);
1953 spin_lock_init(&idev->lk_table.lock);
1954 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
1955 /* Set the prefix to the default value (see ch. 4.1.1) */
1956 idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
1958 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
1963 * The top ib_ipath_lkey_table_size bits are used to index the
1964 * table. The lower 8 bits can be owned by the user (copied from
1965 * the LKEY). The remaining bits act as a generation number or tag.
1967 idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
1968 idev->lk_table.table = kzalloc(idev->lk_table.max *
1969 sizeof(*idev->lk_table.table),
1971 if (idev->lk_table.table == NULL) {
1975 INIT_LIST_HEAD(&idev->pending_mmaps);
1976 spin_lock_init(&idev->pending_lock);
1977 idev->mmap_offset = PAGE_SIZE;
1978 spin_lock_init(&idev->mmap_offset_lock);
1979 INIT_LIST_HEAD(&idev->pending[0]);
1980 INIT_LIST_HEAD(&idev->pending[1]);
1981 INIT_LIST_HEAD(&idev->pending[2]);
1982 INIT_LIST_HEAD(&idev->piowait);
1983 INIT_LIST_HEAD(&idev->rnrwait);
1984 INIT_LIST_HEAD(&idev->txreq_free);
1985 idev->pending_index = 0;
1986 idev->port_cap_flags =
1987 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1988 if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
1989 idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1990 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1991 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1992 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1993 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1994 idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1996 /* Snapshot current HW counters to "clear" them. */
1997 ipath_get_counters(dd, &cntrs);
1998 idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1999 idev->z_link_error_recovery_counter =
2000 cntrs.link_error_recovery_counter;
2001 idev->z_link_downed_counter = cntrs.link_downed_counter;
2002 idev->z_port_rcv_errors = cntrs.port_rcv_errors;
2003 idev->z_port_rcv_remphys_errors =
2004 cntrs.port_rcv_remphys_errors;
2005 idev->z_port_xmit_discards = cntrs.port_xmit_discards;
2006 idev->z_port_xmit_data = cntrs.port_xmit_data;
2007 idev->z_port_rcv_data = cntrs.port_rcv_data;
2008 idev->z_port_xmit_packets = cntrs.port_xmit_packets;
2009 idev->z_port_rcv_packets = cntrs.port_rcv_packets;
2010 idev->z_local_link_integrity_errors =
2011 cntrs.local_link_integrity_errors;
2012 idev->z_excessive_buffer_overrun_errors =
2013 cntrs.excessive_buffer_overrun_errors;
2014 idev->z_vl15_dropped = cntrs.vl15_dropped;
2016 for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
2017 list_add(&tx->txreq.list, &idev->txreq_free);
2020 * The system image GUID is supposed to be the same for all
2021 * IB HCAs in a single system but since there can be other
2022 * device types in the system, we can't be sure this is unique.
2024 if (!sys_image_guid)
2025 sys_image_guid = dd->ipath_guid;
2026 idev->sys_image_guid = sys_image_guid;
2027 idev->ib_unit = dd->ipath_unit;
2030 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
2031 dev->owner = THIS_MODULE;
2032 dev->node_guid = dd->ipath_guid;
2033 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
2034 dev->uverbs_cmd_mask =
2035 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2036 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2037 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2038 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2039 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2040 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2041 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2042 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2043 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2044 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2045 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2046 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2047 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2048 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2049 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2050 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2051 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2052 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2053 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2054 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2055 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2056 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2057 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2058 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2059 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2060 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2061 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2062 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2063 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2064 dev->node_type = RDMA_NODE_IB_CA;
2065 dev->phys_port_cnt = 1;
2066 dev->num_comp_vectors = 1;
2067 dev->dma_device = &dd->pcidev->dev;
2068 dev->query_device = ipath_query_device;
2069 dev->modify_device = ipath_modify_device;
2070 dev->query_port = ipath_query_port;
2071 dev->modify_port = ipath_modify_port;
2072 dev->query_pkey = ipath_query_pkey;
2073 dev->query_gid = ipath_query_gid;
2074 dev->alloc_ucontext = ipath_alloc_ucontext;
2075 dev->dealloc_ucontext = ipath_dealloc_ucontext;
2076 dev->alloc_pd = ipath_alloc_pd;
2077 dev->dealloc_pd = ipath_dealloc_pd;
2078 dev->create_ah = ipath_create_ah;
2079 dev->destroy_ah = ipath_destroy_ah;
2080 dev->query_ah = ipath_query_ah;
2081 dev->create_srq = ipath_create_srq;
2082 dev->modify_srq = ipath_modify_srq;
2083 dev->query_srq = ipath_query_srq;
2084 dev->destroy_srq = ipath_destroy_srq;
2085 dev->create_qp = ipath_create_qp;
2086 dev->modify_qp = ipath_modify_qp;
2087 dev->query_qp = ipath_query_qp;
2088 dev->destroy_qp = ipath_destroy_qp;
2089 dev->post_send = ipath_post_send;
2090 dev->post_recv = ipath_post_receive;
2091 dev->post_srq_recv = ipath_post_srq_receive;
2092 dev->create_cq = ipath_create_cq;
2093 dev->destroy_cq = ipath_destroy_cq;
2094 dev->resize_cq = ipath_resize_cq;
2095 dev->poll_cq = ipath_poll_cq;
2096 dev->req_notify_cq = ipath_req_notify_cq;
2097 dev->get_dma_mr = ipath_get_dma_mr;
2098 dev->reg_phys_mr = ipath_reg_phys_mr;
2099 dev->reg_user_mr = ipath_reg_user_mr;
2100 dev->dereg_mr = ipath_dereg_mr;
2101 dev->alloc_fmr = ipath_alloc_fmr;
2102 dev->map_phys_fmr = ipath_map_phys_fmr;
2103 dev->unmap_fmr = ipath_unmap_fmr;
2104 dev->dealloc_fmr = ipath_dealloc_fmr;
2105 dev->attach_mcast = ipath_multicast_attach;
2106 dev->detach_mcast = ipath_multicast_detach;
2107 dev->process_mad = ipath_process_mad;
2108 dev->mmap = ipath_mmap;
2109 dev->dma_ops = &ipath_dma_mapping_ops;
2111 snprintf(dev->node_desc, sizeof(dev->node_desc),
2112 IPATH_IDSTR " %s", init_utsname()->nodename);
2114 ret = ib_register_device(dev);
2118 if (ipath_verbs_register_sysfs(dev))
2126 ib_unregister_device(dev);
2128 kfree(idev->lk_table.table);
2130 kfree(idev->qp_table.table);
2132 kfree(idev->txreq_bufs);
2134 ib_dealloc_device(dev);
2135 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2139 dd->verbs_dev = idev;
2143 void ipath_unregister_ib_device(struct ipath_ibdev *dev)
2145 struct ib_device *ibdev = &dev->ibdev;
2147 disable_timer(dev->dd);
2149 ib_unregister_device(ibdev);
2151 if (!list_empty(&dev->pending[0]) ||
2152 !list_empty(&dev->pending[1]) ||
2153 !list_empty(&dev->pending[2]))
2154 ipath_dev_err(dev->dd, "pending list not empty!\n");
2155 if (!list_empty(&dev->piowait))
2156 ipath_dev_err(dev->dd, "piowait list not empty!\n");
2157 if (!list_empty(&dev->rnrwait))
2158 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
2159 if (!ipath_mcast_tree_empty())
2160 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
2162 * Note that ipath_unregister_ib_device() can be called before all
2163 * the QPs are destroyed!
2165 ipath_free_all_qps(&dev->qp_table);
2166 kfree(dev->qp_table.table);
2167 kfree(dev->lk_table.table);
2168 kfree(dev->txreq_bufs);
2169 ib_dealloc_device(ibdev);
2172 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2175 struct ipath_ibdev *dev =
2176 container_of(device, struct ipath_ibdev, ibdev.dev);
2178 return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
2181 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2184 struct ipath_ibdev *dev =
2185 container_of(device, struct ipath_ibdev, ibdev.dev);
2188 ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
2198 static ssize_t show_stats(struct device *device, struct device_attribute *attr,
2201 struct ipath_ibdev *dev =
2202 container_of(device, struct ipath_ibdev, ibdev.dev);
2222 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
2223 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
2224 dev->n_other_naks, dev->n_timeouts,
2225 dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
2226 dev->n_no_piobuf, dev->n_unaligned,
2227 dev->n_pkt_drops, dev->n_wqe_errs);
2228 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
2229 const struct ipath_opcode_stats *si = &dev->opstats[i];
2231 if (!si->n_packets && !si->n_bytes)
2233 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
2234 (unsigned long long) si->n_packets,
2235 (unsigned long long) si->n_bytes);
2240 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
2241 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2242 static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
2243 static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
2245 static struct device_attribute *ipath_class_attributes[] = {
2252 static int ipath_verbs_register_sysfs(struct ib_device *dev)
2257 for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
2258 if (device_create_file(&dev->dev,
2259 ipath_class_attributes[i])) {