2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <linux/utsname.h>
38 #include "ipath_kernel.h"
39 #include "ipath_verbs.h"
40 #include "ips_common.h"
42 /* Not static, because we don't want the compiler removing it */
43 const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
45 static unsigned int ib_ipath_qp_table_size = 251;
46 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
47 MODULE_PARM_DESC(qp_table_size, "QP table size");
49 unsigned int ib_ipath_lkey_table_size = 12;
50 module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
52 MODULE_PARM_DESC(lkey_table_size,
53 "LKEY table size in bits (2^n, 1 <= n <= 23)");
55 unsigned int ib_ipath_debug; /* debug mask */
56 module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
57 MODULE_PARM_DESC(debug, "Verbs debug mask");
59 MODULE_LICENSE("GPL");
60 MODULE_AUTHOR("QLogic <support@pathscale.com>");
61 MODULE_DESCRIPTION("QLogic InfiniPath driver");
63 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
65 [IB_QPS_INIT] = IPATH_POST_RECV_OK,
66 [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
67 [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
68 IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
69 [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
71 [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
76 * Translate ib_wr_opcode into ib_wc_opcode.
78 const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
79 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
80 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
81 [IB_WR_SEND] = IB_WC_SEND,
82 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
83 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
84 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
85 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
91 static __be64 sys_image_guid;
94 * ipath_copy_sge - copy data to SGE memory
96 * @data: the data to copy
97 * @length: the length of the data
99 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
101 struct ipath_sge *sge = &ss->sge;
104 u32 len = sge->length;
109 memcpy(sge->vaddr, data, len);
112 sge->sge_length -= len;
113 if (sge->sge_length == 0) {
115 *sge = *ss->sg_list++;
116 } else if (sge->length == 0 && sge->mr != NULL) {
117 if (++sge->n >= IPATH_SEGSZ) {
118 if (++sge->m >= sge->mr->mapsz)
123 sge->mr->map[sge->m]->segs[sge->n].vaddr;
125 sge->mr->map[sge->m]->segs[sge->n].length;
133 * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
135 * @length: the number of bytes to skip
137 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
139 struct ipath_sge *sge = &ss->sge;
141 while (length > sge->sge_length) {
142 length -= sge->sge_length;
143 ss->sge = *ss->sg_list++;
146 u32 len = sge->length;
153 sge->sge_length -= len;
154 if (sge->sge_length == 0) {
156 *sge = *ss->sg_list++;
157 } else if (sge->length == 0 && sge->mr != NULL) {
158 if (++sge->n >= IPATH_SEGSZ) {
159 if (++sge->m >= sge->mr->mapsz)
164 sge->mr->map[sge->m]->segs[sge->n].vaddr;
166 sge->mr->map[sge->m]->segs[sge->n].length;
173 * ipath_post_send - post a send on a QP
174 * @ibqp: the QP to post the send on
175 * @wr: the list of work requests to post
176 * @bad_wr: the first bad WR is put here
178 * This may be called from interrupt context.
180 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
181 struct ib_send_wr **bad_wr)
183 struct ipath_qp *qp = to_iqp(ibqp);
186 /* Check that state is OK to post send. */
187 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)) {
193 for (; wr; wr = wr->next) {
194 switch (qp->ibqp.qp_type) {
197 err = ipath_post_ruc_send(qp, wr);
203 err = ipath_post_ud_send(qp, wr);
220 * ipath_post_receive - post a receive on a QP
221 * @ibqp: the QP to post the receive on
222 * @wr: the WR to post
223 * @bad_wr: the first bad WR is put here
225 * This may be called from interrupt context.
227 static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
228 struct ib_recv_wr **bad_wr)
230 struct ipath_qp *qp = to_iqp(ibqp);
234 /* Check that state is OK to post receive. */
235 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) {
241 for (; wr; wr = wr->next) {
242 struct ipath_rwqe *wqe;
246 if (wr->num_sge > qp->r_rq.max_sge) {
252 spin_lock_irqsave(&qp->r_rq.lock, flags);
253 next = qp->r_rq.head + 1;
254 if (next >= qp->r_rq.size)
256 if (next == qp->r_rq.tail) {
257 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
263 wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head);
264 wqe->wr_id = wr->wr_id;
265 wqe->sg_list[0].mr = NULL;
266 wqe->sg_list[0].vaddr = NULL;
267 wqe->sg_list[0].length = 0;
268 wqe->sg_list[0].sge_length = 0;
270 for (i = 0, j = 0; i < wr->num_sge; i++) {
272 if (to_ipd(qp->ibqp.pd)->user &&
273 wr->sg_list[i].lkey == 0) {
274 spin_unlock_irqrestore(&qp->r_rq.lock,
280 if (wr->sg_list[i].length == 0)
283 &to_idev(qp->ibqp.device)->lk_table,
284 &wqe->sg_list[j], &wr->sg_list[i],
285 IB_ACCESS_LOCAL_WRITE)) {
286 spin_unlock_irqrestore(&qp->r_rq.lock,
292 wqe->length += wr->sg_list[i].length;
296 qp->r_rq.head = next;
297 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
306 * ipath_qp_rcv - processing an incoming packet on a QP
307 * @dev: the device the packet came on
308 * @hdr: the packet header
309 * @has_grh: true if the packet has a GRH
310 * @data: the packet data
311 * @tlen: the packet length
312 * @qp: the QP the packet came on
314 * This is called from ipath_ib_rcv() to process an incoming packet
316 * Called at interrupt level.
318 static void ipath_qp_rcv(struct ipath_ibdev *dev,
319 struct ipath_ib_header *hdr, int has_grh,
320 void *data, u32 tlen, struct ipath_qp *qp)
322 /* Check for valid receive state. */
323 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
328 switch (qp->ibqp.qp_type) {
332 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
336 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
340 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
349 * ipath_ib_rcv - process and incoming packet
350 * @arg: the device pointer
351 * @rhdr: the header of the packet
352 * @data: the packet data
353 * @tlen: the packet length
355 * This is called from ipath_kreceive() to process an incoming packet at
356 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
358 static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
360 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
361 struct ipath_ib_header *hdr = rhdr;
362 struct ipath_other_headers *ohdr;
369 if (unlikely(dev == NULL))
372 if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
377 /* Check for a valid destination LID (see ch. 7.11.1). */
378 lid = be16_to_cpu(hdr->lrh[1]);
379 if (lid < IPS_MULTICAST_LID_BASE) {
380 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
381 if (unlikely(lid != ipath_layer_get_lid(dev->dd))) {
388 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
389 if (lnh == IPS_LRH_BTH)
391 else if (lnh == IPS_LRH_GRH)
392 ohdr = &hdr->u.l.oth;
398 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
399 dev->opstats[opcode].n_bytes += tlen;
400 dev->opstats[opcode].n_packets++;
402 /* Get the destination QP number. */
403 qp_num = be32_to_cpu(ohdr->bth[1]) & IPS_QPN_MASK;
404 if (qp_num == IPS_MULTICAST_QPN) {
405 struct ipath_mcast *mcast;
406 struct ipath_mcast_qp *p;
408 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
413 dev->n_multicast_rcv++;
414 list_for_each_entry_rcu(p, &mcast->qp_list, list)
415 ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
418 * Notify ipath_multicast_detach() if it is waiting for us
421 if (atomic_dec_return(&mcast->refcount) <= 1)
422 wake_up(&mcast->wait);
424 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
426 dev->n_unicast_rcv++;
427 ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
430 * Notify ipath_destroy_qp() if it is waiting
433 if (atomic_dec_and_test(&qp->refcount))
443 * ipath_ib_timer - verbs timer
444 * @arg: the device pointer
446 * This is called from ipath_do_rcv_timer() at interrupt level to check for
447 * QPs which need retransmits and to collect performance numbers.
449 static void ipath_ib_timer(void *arg)
451 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
452 struct ipath_qp *resend = NULL;
453 struct list_head *last;
460 spin_lock_irqsave(&dev->pending_lock, flags);
461 /* Start filling the next pending queue. */
462 if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
463 dev->pending_index = 0;
464 /* Save any requests still in the new queue, they have timed out. */
465 last = &dev->pending[dev->pending_index];
466 while (!list_empty(last)) {
467 qp = list_entry(last->next, struct ipath_qp, timerwait);
468 list_del_init(&qp->timerwait);
469 qp->timer_next = resend;
471 atomic_inc(&qp->refcount);
473 last = &dev->rnrwait;
474 if (!list_empty(last)) {
475 qp = list_entry(last->next, struct ipath_qp, timerwait);
476 if (--qp->s_rnr_timeout == 0) {
478 list_del_init(&qp->timerwait);
479 tasklet_hi_schedule(&qp->s_task);
480 if (list_empty(last))
482 qp = list_entry(last->next, struct ipath_qp,
484 } while (qp->s_rnr_timeout == 0);
488 * We should only be in the started state if pma_sample_start != 0
490 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
491 --dev->pma_sample_start == 0) {
492 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
493 ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword,
497 &dev->ipath_xmit_wait);
499 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
500 if (dev->pma_sample_interval == 0) {
501 u64 ta, tb, tc, td, te;
503 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
504 ipath_layer_snapshot_counters(dev->dd, &ta, &tb,
507 dev->ipath_sword = ta - dev->ipath_sword;
508 dev->ipath_rword = tb - dev->ipath_rword;
509 dev->ipath_spkts = tc - dev->ipath_spkts;
510 dev->ipath_rpkts = td - dev->ipath_rpkts;
511 dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
514 dev->pma_sample_interval--;
516 spin_unlock_irqrestore(&dev->pending_lock, flags);
518 /* XXX What if timer fires again while this is running? */
519 for (qp = resend; qp != NULL; qp = qp->timer_next) {
522 spin_lock_irqsave(&qp->s_lock, flags);
523 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
525 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
527 spin_unlock_irqrestore(&qp->s_lock, flags);
529 /* Notify ipath_destroy_qp() if it is waiting. */
530 if (atomic_dec_and_test(&qp->refcount))
536 * ipath_ib_piobufavail - callback when a PIO buffer is available
537 * @arg: the device pointer
539 * This is called from ipath_intr() at interrupt level when a PIO buffer is
540 * available after ipath_verbs_send() returned an error that no buffers were
541 * available. Return 1 if we consumed all the PIO buffers and we still have
542 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
545 static int ipath_ib_piobufavail(void *arg)
547 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
554 spin_lock_irqsave(&dev->pending_lock, flags);
555 while (!list_empty(&dev->piowait)) {
556 qp = list_entry(dev->piowait.next, struct ipath_qp,
558 list_del_init(&qp->piowait);
559 tasklet_hi_schedule(&qp->s_task);
561 spin_unlock_irqrestore(&dev->pending_lock, flags);
567 static int ipath_query_device(struct ib_device *ibdev,
568 struct ib_device_attr *props)
570 struct ipath_ibdev *dev = to_idev(ibdev);
571 u32 vendor, boardrev, majrev, minrev;
573 memset(props, 0, sizeof(*props));
575 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
576 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
577 IB_DEVICE_SYS_IMAGE_GUID;
578 ipath_layer_query_device(dev->dd, &vendor, &boardrev,
580 props->vendor_id = vendor;
581 props->vendor_part_id = boardrev;
582 props->hw_ver = boardrev << 16 | majrev << 8 | minrev;
584 props->sys_image_guid = dev->sys_image_guid;
586 props->max_mr_size = ~0ull;
587 props->max_qp = 0xffff;
588 props->max_qp_wr = 0xffff;
589 props->max_sge = 255;
590 props->max_cq = 0xffff;
591 props->max_cqe = 0xffff;
592 props->max_mr = 0xffff;
593 props->max_pd = 0xffff;
594 props->max_qp_rd_atom = 1;
595 props->max_qp_init_rd_atom = 1;
596 /* props->max_res_rd_atom */
597 props->max_srq = 0xffff;
598 props->max_srq_wr = 0xffff;
599 props->max_srq_sge = 255;
600 /* props->local_ca_ack_delay */
601 props->atomic_cap = IB_ATOMIC_HCA;
602 props->max_pkeys = ipath_layer_get_npkeys(dev->dd);
603 props->max_mcast_grp = 0xffff;
604 props->max_mcast_qp_attach = 0xffff;
605 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
606 props->max_mcast_grp;
611 const u8 ipath_cvt_physportstate[16] = {
612 [INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
613 [INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
614 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
615 [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
616 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
617 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
618 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
619 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
620 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
621 [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
622 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
623 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
624 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
627 static int ipath_query_port(struct ib_device *ibdev,
628 u8 port, struct ib_port_attr *props)
630 struct ipath_ibdev *dev = to_idev(ibdev);
632 u16 lid = ipath_layer_get_lid(dev->dd);
635 memset(props, 0, sizeof(*props));
636 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
637 props->lmc = dev->mkeyprot_resv_lmc & 7;
638 props->sm_lid = dev->sm_lid;
639 props->sm_sl = dev->sm_sl;
640 ibcstat = ipath_layer_get_lastibcstat(dev->dd);
641 props->state = ((ibcstat >> 4) & 0x3) + 1;
642 /* See phys_state_show() */
643 props->phys_state = ipath_cvt_physportstate[
644 ipath_layer_get_lastibcstat(dev->dd) & 0xf];
645 props->port_cap_flags = dev->port_cap_flags;
646 props->gid_tbl_len = 1;
647 props->max_msg_sz = 4096;
648 props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd);
649 props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) -
650 dev->z_pkey_violations;
651 props->qkey_viol_cntr = dev->qkey_violations;
652 props->active_width = IB_WIDTH_4X;
653 /* See rate_show() */
654 props->active_speed = 1; /* Regular 10Mbs speed. */
655 props->max_vl_num = 1; /* VLCap = VL0 */
656 props->init_type_reply = 0;
658 props->max_mtu = IB_MTU_4096;
659 switch (ipath_layer_get_ibmtu(dev->dd)) {
678 props->active_mtu = mtu;
679 props->subnet_timeout = dev->subnet_timeout;
684 static int ipath_modify_device(struct ib_device *device,
685 int device_modify_mask,
686 struct ib_device_modify *device_modify)
690 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
691 IB_DEVICE_MODIFY_NODE_DESC)) {
696 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
697 memcpy(device->node_desc, device_modify->node_desc, 64);
699 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
700 to_idev(device)->sys_image_guid =
701 cpu_to_be64(device_modify->sys_image_guid);
709 static int ipath_modify_port(struct ib_device *ibdev,
710 u8 port, int port_modify_mask,
711 struct ib_port_modify *props)
713 struct ipath_ibdev *dev = to_idev(ibdev);
715 dev->port_cap_flags |= props->set_port_cap_mask;
716 dev->port_cap_flags &= ~props->clr_port_cap_mask;
717 if (port_modify_mask & IB_PORT_SHUTDOWN)
718 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
719 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
720 dev->qkey_violations = 0;
724 static int ipath_query_gid(struct ib_device *ibdev, u8 port,
725 int index, union ib_gid *gid)
727 struct ipath_ibdev *dev = to_idev(ibdev);
734 gid->global.subnet_prefix = dev->gid_prefix;
735 gid->global.interface_id = ipath_layer_get_guid(dev->dd);
743 static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
744 struct ib_ucontext *context,
745 struct ib_udata *udata)
750 pd = kmalloc(sizeof *pd, GFP_KERNEL);
752 ret = ERR_PTR(-ENOMEM);
756 /* ib_alloc_pd() will initialize pd->ibpd. */
757 pd->user = udata != NULL;
765 static int ipath_dealloc_pd(struct ib_pd *ibpd)
767 struct ipath_pd *pd = to_ipd(ibpd);
775 * ipath_create_ah - create an address handle
776 * @pd: the protection domain
777 * @ah_attr: the attributes of the AH
779 * This may be called from interrupt context.
781 static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
782 struct ib_ah_attr *ah_attr)
787 /* A multicast address requires a GRH (see ch. 8.4.1). */
788 if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
789 ah_attr->dlid != IPS_PERMISSIVE_LID &&
790 !(ah_attr->ah_flags & IB_AH_GRH)) {
791 ret = ERR_PTR(-EINVAL);
795 if (ah_attr->dlid == 0) {
796 ret = ERR_PTR(-EINVAL);
800 if (ah_attr->port_num != 1 ||
801 ah_attr->port_num > pd->device->phys_port_cnt) {
802 ret = ERR_PTR(-EINVAL);
806 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
808 ret = ERR_PTR(-ENOMEM);
812 /* ib_create_ah() will initialize ah->ibah. */
822 * ipath_destroy_ah - destroy an address handle
823 * @ibah: the AH to destroy
825 * This may be called from interrupt context.
827 static int ipath_destroy_ah(struct ib_ah *ibah)
829 struct ipath_ah *ah = to_iah(ibah);
836 static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
838 struct ipath_ah *ah = to_iah(ibah);
845 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
848 struct ipath_ibdev *dev = to_idev(ibdev);
851 if (index >= ipath_layer_get_npkeys(dev->dd)) {
856 *pkey = ipath_layer_get_pkey(dev->dd, index);
865 * ipath_alloc_ucontext - allocate a ucontest
866 * @ibdev: the infiniband device
867 * @udata: not used by the InfiniPath driver
870 static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
871 struct ib_udata *udata)
873 struct ipath_ucontext *context;
874 struct ib_ucontext *ret;
876 context = kmalloc(sizeof *context, GFP_KERNEL);
878 ret = ERR_PTR(-ENOMEM);
882 ret = &context->ibucontext;
888 static int ipath_dealloc_ucontext(struct ib_ucontext *context)
890 kfree(to_iucontext(context));
894 static int ipath_verbs_register_sysfs(struct ib_device *dev);
897 * ipath_register_ib_device - register our device with the infiniband core
898 * @unit: the device number to register
899 * @dd: the device data structure
900 * Return the allocated ipath_ibdev pointer or NULL on error.
902 static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
904 struct ipath_ibdev *idev;
905 struct ib_device *dev;
908 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
914 /* Only need to initialize non-zero fields. */
915 spin_lock_init(&idev->qp_table.lock);
916 spin_lock_init(&idev->lk_table.lock);
917 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
918 /* Set the prefix to the default value (see ch. 4.1.1) */
919 idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
921 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
926 * The top ib_ipath_lkey_table_size bits are used to index the
927 * table. The lower 8 bits can be owned by the user (copied from
928 * the LKEY). The remaining bits act as a generation number or tag.
930 idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
931 idev->lk_table.table = kzalloc(idev->lk_table.max *
932 sizeof(*idev->lk_table.table),
934 if (idev->lk_table.table == NULL) {
938 spin_lock_init(&idev->pending_lock);
939 INIT_LIST_HEAD(&idev->pending[0]);
940 INIT_LIST_HEAD(&idev->pending[1]);
941 INIT_LIST_HEAD(&idev->pending[2]);
942 INIT_LIST_HEAD(&idev->piowait);
943 INIT_LIST_HEAD(&idev->rnrwait);
944 idev->pending_index = 0;
945 idev->port_cap_flags =
946 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
947 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
948 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
949 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
950 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
951 idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT;
952 idev->link_width_enabled = 3; /* 1x or 4x */
955 * The system image GUID is supposed to be the same for all
956 * IB HCAs in a single system but since there can be other
957 * device types in the system, we can't be sure this is unique.
960 sys_image_guid = ipath_layer_get_guid(dd);
961 idev->sys_image_guid = sys_image_guid;
962 idev->ib_unit = unit;
965 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
966 dev->owner = THIS_MODULE;
967 dev->node_guid = ipath_layer_get_guid(dd);
968 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
969 dev->uverbs_cmd_mask =
970 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
971 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
972 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
973 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
974 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
975 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
976 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
977 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
978 (1ull << IB_USER_VERBS_CMD_REG_MR) |
979 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
980 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
981 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
982 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
983 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
984 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
985 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
986 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
987 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
988 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
989 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
990 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
991 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
992 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
993 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
994 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
995 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
996 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
997 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
998 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
999 dev->node_type = IB_NODE_CA;
1000 dev->phys_port_cnt = 1;
1001 dev->dma_device = ipath_layer_get_device(dd);
1002 dev->class_dev.dev = dev->dma_device;
1003 dev->query_device = ipath_query_device;
1004 dev->modify_device = ipath_modify_device;
1005 dev->query_port = ipath_query_port;
1006 dev->modify_port = ipath_modify_port;
1007 dev->query_pkey = ipath_query_pkey;
1008 dev->query_gid = ipath_query_gid;
1009 dev->alloc_ucontext = ipath_alloc_ucontext;
1010 dev->dealloc_ucontext = ipath_dealloc_ucontext;
1011 dev->alloc_pd = ipath_alloc_pd;
1012 dev->dealloc_pd = ipath_dealloc_pd;
1013 dev->create_ah = ipath_create_ah;
1014 dev->destroy_ah = ipath_destroy_ah;
1015 dev->query_ah = ipath_query_ah;
1016 dev->create_srq = ipath_create_srq;
1017 dev->modify_srq = ipath_modify_srq;
1018 dev->query_srq = ipath_query_srq;
1019 dev->destroy_srq = ipath_destroy_srq;
1020 dev->create_qp = ipath_create_qp;
1021 dev->modify_qp = ipath_modify_qp;
1022 dev->query_qp = ipath_query_qp;
1023 dev->destroy_qp = ipath_destroy_qp;
1024 dev->post_send = ipath_post_send;
1025 dev->post_recv = ipath_post_receive;
1026 dev->post_srq_recv = ipath_post_srq_receive;
1027 dev->create_cq = ipath_create_cq;
1028 dev->destroy_cq = ipath_destroy_cq;
1029 dev->resize_cq = ipath_resize_cq;
1030 dev->poll_cq = ipath_poll_cq;
1031 dev->req_notify_cq = ipath_req_notify_cq;
1032 dev->get_dma_mr = ipath_get_dma_mr;
1033 dev->reg_phys_mr = ipath_reg_phys_mr;
1034 dev->reg_user_mr = ipath_reg_user_mr;
1035 dev->dereg_mr = ipath_dereg_mr;
1036 dev->alloc_fmr = ipath_alloc_fmr;
1037 dev->map_phys_fmr = ipath_map_phys_fmr;
1038 dev->unmap_fmr = ipath_unmap_fmr;
1039 dev->dealloc_fmr = ipath_dealloc_fmr;
1040 dev->attach_mcast = ipath_multicast_attach;
1041 dev->detach_mcast = ipath_multicast_detach;
1042 dev->process_mad = ipath_process_mad;
1044 snprintf(dev->node_desc, sizeof(dev->node_desc),
1045 IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename);
1047 ret = ib_register_device(dev);
1051 if (ipath_verbs_register_sysfs(dev))
1054 ipath_layer_enable_timer(dd);
1059 ib_unregister_device(dev);
1061 kfree(idev->lk_table.table);
1063 kfree(idev->qp_table.table);
1065 ib_dealloc_device(dev);
1066 _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n",
1074 static void ipath_unregister_ib_device(void *arg)
1076 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
1077 struct ib_device *ibdev = &dev->ibdev;
1079 ipath_layer_disable_timer(dev->dd);
1081 ib_unregister_device(ibdev);
1083 if (!list_empty(&dev->pending[0]) ||
1084 !list_empty(&dev->pending[1]) ||
1085 !list_empty(&dev->pending[2]))
1086 _VERBS_ERROR("ipath%d pending list not empty!\n",
1088 if (!list_empty(&dev->piowait))
1089 _VERBS_ERROR("ipath%d piowait list not empty!\n",
1091 if (!list_empty(&dev->rnrwait))
1092 _VERBS_ERROR("ipath%d rnrwait list not empty!\n",
1094 if (!ipath_mcast_tree_empty())
1095 _VERBS_ERROR("ipath%d multicast table memory leak!\n",
1098 * Note that ipath_unregister_ib_device() can be called before all
1099 * the QPs are destroyed!
1101 ipath_free_all_qps(&dev->qp_table);
1102 kfree(dev->qp_table.table);
1103 kfree(dev->lk_table.table);
1104 ib_dealloc_device(ibdev);
1107 static int __init ipath_verbs_init(void)
1109 return ipath_verbs_register(ipath_register_ib_device,
1110 ipath_unregister_ib_device,
1111 ipath_ib_piobufavail, ipath_ib_rcv,
1115 static void __exit ipath_verbs_cleanup(void)
1117 ipath_verbs_unregister();
1120 static ssize_t show_rev(struct class_device *cdev, char *buf)
1122 struct ipath_ibdev *dev =
1123 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1124 int vendor, boardrev, majrev, minrev;
1126 ipath_layer_query_device(dev->dd, &vendor, &boardrev,
1128 return sprintf(buf, "%d.%d\n", majrev, minrev);
1131 static ssize_t show_hca(struct class_device *cdev, char *buf)
1133 struct ipath_ibdev *dev =
1134 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1137 ret = ipath_layer_get_boardname(dev->dd, buf, 128);
1147 static ssize_t show_stats(struct class_device *cdev, char *buf)
1149 struct ipath_ibdev *dev =
1150 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1168 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1169 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1170 dev->n_other_naks, dev->n_timeouts,
1171 dev->n_rdma_dup_busy, dev->n_piowait,
1172 dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
1173 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1174 const struct ipath_opcode_stats *si = &dev->opstats[i];
1176 if (!si->n_packets && !si->n_bytes)
1178 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
1179 (unsigned long long) si->n_packets,
1180 (unsigned long long) si->n_bytes);
1185 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1186 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1187 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
1188 static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
1190 static struct class_device_attribute *ipath_class_attributes[] = {
1191 &class_device_attr_hw_rev,
1192 &class_device_attr_hca_type,
1193 &class_device_attr_board_id,
1194 &class_device_attr_stats
1197 static int ipath_verbs_register_sysfs(struct ib_device *dev)
1202 for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
1203 if (class_device_create_file(&dev->class_dev,
1204 ipath_class_attributes[i])) {
1215 module_init(ipath_verbs_init);
1216 module_exit(ipath_verbs_cleanup);