headers: remove sched.h from interrupt.h
[safe/jmp/linux-2.6] / drivers / infiniband / hw / ipath / ipath_ud.c
index 4b05029..7420715 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * SOFTWARE.
  */
 
+#include <linux/sched.h>
 #include <rdma/ib_smi.h>
 
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_kernel.h"
 
 /**
  * ipath_ud_loopback - handle send on loopback QPs
- * @sqp: the QP
- * @ss: the SGE state
- * @length: the length of the data to send
- * @wr: the work request
- * @wc: the work completion entry
+ * @sqp: the sending QP
+ * @swqe: the send work request
  *
- * This is called from ipath_post_ud_send() to forward a WQE addressed
+ * This is called from ipath_make_ud_req() to forward a WQE addressed
  * to the same HCA.
+ * Note that the receive interrupt handler may be calling ipath_ud_rcv()
+ * while this is being called.
  */
-static void ipath_ud_loopback(struct ipath_qp *sqp,
-                             struct ipath_sge_state *ss,
-                             u32 length, struct ib_send_wr *wr,
-                             struct ib_wc *wc)
+static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
 {
        struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
        struct ipath_qp *qp;
@@ -60,11 +57,19 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
        struct ipath_srq *srq;
        struct ipath_sge_state rsge;
        struct ipath_sge *sge;
+       struct ipath_rwq *wq;
        struct ipath_rwqe *wqe;
+       void (*handler)(struct ib_event *, void *);
+       struct ib_wc wc;
+       u32 tail;
+       u32 rlen;
+       u32 length;
 
-       qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn);
-       if (!qp)
-               return;
+       qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn);
+       if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
+               dev->n_pkt_drops++;
+               goto done;
+       }
 
        /*
         * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
@@ -72,66 +77,90 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
         * qkey from the QP context instead of the WR (see 10.2.5).
         */
        if (unlikely(qp->ibqp.qp_num &&
-                    ((int) wr->wr.ud.remote_qkey < 0
-                     ? qp->qkey : wr->wr.ud.remote_qkey) != qp->qkey)) {
+                    ((int) swqe->wr.wr.ud.remote_qkey < 0 ?
+                     sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) {
                /* XXX OK to lose a count once in a while. */
                dev->qkey_violations++;
                dev->n_pkt_drops++;
-               goto done;
+               goto drop;
        }
 
        /*
         * A GRH is expected to preceed the data even if not
         * present on the wire.
         */
-       wc->byte_len = length + sizeof(struct ib_grh);
+       length = swqe->length;
+       memset(&wc, 0, sizeof wc);
+       wc.byte_len = length + sizeof(struct ib_grh);
 
-       if (wr->opcode == IB_WR_SEND_WITH_IMM) {
-               wc->wc_flags = IB_WC_WITH_IMM;
-               wc->imm_data = wr->imm_data;
-       } else {
-               wc->wc_flags = 0;
-               wc->imm_data = 0;
+       if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+               wc.wc_flags = IB_WC_WITH_IMM;
+               wc.ex.imm_data = swqe->wr.ex.imm_data;
        }
 
        /*
-        * Get the next work request entry to find where to put the data.
-        * Note that it is safe to drop the lock after changing rq->tail
-        * since ipath_post_receive() won't fill the empty slot.
+        * This would be a lot simpler if we could call ipath_get_rwqe()
+        * but that uses state that the receive interrupt handler uses
+        * so we would need to lock out receive interrupts while doing
+        * local loopback.
         */
        if (qp->ibqp.srq) {
                srq = to_isrq(qp->ibqp.srq);
+               handler = srq->ibsrq.event_handler;
                rq = &srq->rq;
        } else {
                srq = NULL;
+               handler = NULL;
                rq = &qp->r_rq;
        }
+
+       /*
+        * Get the next work request entry to find where to put the data.
+        * Note that it is safe to drop the lock after changing rq->tail
+        * since ipath_post_receive() won't fill the empty slot.
+        */
        spin_lock_irqsave(&rq->lock, flags);
-       if (rq->tail == rq->head) {
+       wq = rq->wq;
+       tail = wq->tail;
+       /* Validate tail before using it since it is user writable. */
+       if (tail >= rq->size)
+               tail = 0;
+       if (unlikely(tail == wq->head)) {
                spin_unlock_irqrestore(&rq->lock, flags);
                dev->n_pkt_drops++;
-               goto done;
+               goto drop;
+       }
+       wqe = get_rwqe_ptr(rq, tail);
+       rsge.sg_list = qp->r_ud_sg_list;
+       if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
+               spin_unlock_irqrestore(&rq->lock, flags);
+               dev->n_pkt_drops++;
+               goto drop;
        }
        /* Silently drop packets which are too big. */
-       wqe = get_rwqe_ptr(rq, rq->tail);
-       if (wc->byte_len > wqe->length) {
+       if (wc.byte_len > rlen) {
                spin_unlock_irqrestore(&rq->lock, flags);
                dev->n_pkt_drops++;
-               goto done;
+               goto drop;
        }
-       wc->wr_id = wqe->wr_id;
-       rsge.sge = wqe->sg_list[0];
-       rsge.sg_list = wqe->sg_list + 1;
-       rsge.num_sge = wqe->num_sge;
-       if (++rq->tail >= rq->size)
-               rq->tail = 0;
-       if (srq && srq->ibsrq.event_handler) {
+       if (++tail >= rq->size)
+               tail = 0;
+       wq->tail = tail;
+       wc.wr_id = wqe->wr_id;
+       if (handler) {
                u32 n;
 
-               if (rq->head < rq->tail)
-                       n = rq->size + rq->head - rq->tail;
+               /*
+                * validate head pointer value and compute
+                * the number of remaining WQEs.
+                */
+               n = wq->head;
+               if (n >= rq->size)
+                       n = 0;
+               if (n < tail)
+                       n += rq->size - tail;
                else
-                       n = rq->head - rq->tail;
+                       n -= tail;
                if (n < srq->limit) {
                        struct ib_event ev;
 
@@ -140,32 +169,34 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
                        ev.device = qp->ibqp.device;
                        ev.element.srq = qp->ibqp.srq;
                        ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-                       srq->ibsrq.event_handler(&ev,
-                                                srq->ibsrq.srq_context);
+                       handler(&ev, srq->ibsrq.srq_context);
                } else
                        spin_unlock_irqrestore(&rq->lock, flags);
        } else
                spin_unlock_irqrestore(&rq->lock, flags);
-       ah_attr = &to_iah(wr->wr.ud.ah)->attr;
+
+       ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
        if (ah_attr->ah_flags & IB_AH_GRH) {
                ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
-               wc->wc_flags |= IB_WC_GRH;
+               wc.wc_flags |= IB_WC_GRH;
        } else
                ipath_skip_sge(&rsge, sizeof(struct ib_grh));
-       sge = &ss->sge;
+       sge = swqe->sg_list;
        while (length) {
                u32 len = sge->length;
 
                if (len > length)
                        len = length;
+               if (len > sge->sge_length)
+                       len = sge->sge_length;
                BUG_ON(len == 0);
                ipath_copy_sge(&rsge, sge->vaddr, len);
                sge->vaddr += len;
                sge->length -= len;
                sge->sge_length -= len;
                if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
+                       if (--swqe->wr.num_sge)
+                               sge++;
                } else if (sge->length == 0 && sge->mr != NULL) {
                        if (++sge->n >= IPATH_SEGSZ) {
                                if (++sge->m >= sge->mr->mapsz)
@@ -179,237 +210,186 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
                }
                length -= len;
        }
-       wc->status = IB_WC_SUCCESS;
-       wc->opcode = IB_WC_RECV;
-       wc->vendor_err = 0;
-       wc->qp_num = qp->ibqp.qp_num;
-       wc->src_qp = sqp->ibqp.qp_num;
+       wc.status = IB_WC_SUCCESS;
+       wc.opcode = IB_WC_RECV;
+       wc.qp = &qp->ibqp;
+       wc.src_qp = sqp->ibqp.qp_num;
        /* XXX do we know which pkey matched? Only needed for GSI. */
-       wc->pkey_index = 0;
-       wc->slid = ipath_layer_get_lid(dev->dd) |
+       wc.pkey_index = 0;
+       wc.slid = dev->dd->ipath_lid |
                (ah_attr->src_path_bits &
-                ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1));
-       wc->sl = ah_attr->sl;
-       wc->dlid_path_bits =
-               ah_attr->dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+                ((1 << dev->dd->ipath_lmc) - 1));
+       wc.sl = ah_attr->sl;
+       wc.dlid_path_bits =
+               ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
+       wc.port_num = 1;
        /* Signal completion event if the solicited bit is set. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
-                      wr->send_flags & IB_SEND_SOLICITED);
-
-done:
+       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+                      swqe->wr.send_flags & IB_SEND_SOLICITED);
+drop:
        if (atomic_dec_and_test(&qp->refcount))
                wake_up(&qp->wait);
+done:;
 }
 
 /**
- * ipath_post_ud_send - post a UD send on QP
+ * ipath_make_ud_req - construct a UD request packet
  * @qp: the QP
- * @wr: the work request
  *
- * Note that we actually send the data as it is posted instead of putting
- * the request into a ring buffer.  If we wanted to use a ring buffer,
- * we would need to save a reference to the destination address in the SWQE.
+ * Return 1 if constructed; otherwise, return 0.
  */
-int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
+int ipath_make_ud_req(struct ipath_qp *qp)
 {
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ipath_other_headers *ohdr;
        struct ib_ah_attr *ah_attr;
-       struct ipath_sge_state ss;
-       struct ipath_sge *sg_list;
-       struct ib_wc wc;
-       u32 hwords;
+       struct ipath_swqe *wqe;
+       unsigned long flags;
        u32 nwords;
-       u32 len;
        u32 extra_bytes;
        u32 bth0;
        u16 lrh0;
        u16 lid;
-       int i;
-       int ret;
+       int ret = 0;
+       int next_cur;
 
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
-               ret = 0;
-               goto bail;
-       }
+       spin_lock_irqsave(&qp->s_lock, flags);
 
-       /* IB spec says that num_sge == 0 is OK. */
-       if (wr->num_sge > qp->s_max_sge) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (wr->num_sge > 1) {
-               sg_list = kmalloc((qp->s_max_sge - 1) * sizeof(*sg_list),
-                                 GFP_ATOMIC);
-               if (!sg_list) {
-                       ret = -ENOMEM;
+       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
+               if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
                        goto bail;
-               }
-       } else
-               sg_list = NULL;
-
-       /* Check the buffer to send. */
-       ss.sg_list = sg_list;
-       ss.sge.mr = NULL;
-       ss.sge.vaddr = NULL;
-       ss.sge.length = 0;
-       ss.sge.sge_length = 0;
-       ss.num_sge = 0;
-       len = 0;
-       for (i = 0; i < wr->num_sge; i++) {
-               /* Check LKEY */
-               if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
-                       ret = -EINVAL;
+               /* We are in the error state, flush the work request. */
+               if (qp->s_last == qp->s_head)
                        goto bail;
-               }
-
-               if (wr->sg_list[i].length == 0)
-                       continue;
-               if (!ipath_lkey_ok(&dev->lk_table, ss.num_sge ?
-                                  sg_list + ss.num_sge - 1 : &ss.sge,
-                                  &wr->sg_list[i], 0)) {
-                       ret = -EINVAL;
+               /* If DMAs are in progress, we can't flush immediately. */
+               if (atomic_read(&qp->s_dma_busy)) {
+                       qp->s_flags |= IPATH_S_WAIT_DMA;
                        goto bail;
                }
-               len += wr->sg_list[i].length;
-               ss.num_sge++;
+               wqe = get_swqe_ptr(qp, qp->s_last);
+               ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+               goto done;
        }
-       /* Check for invalid packet size. */
-       if (len > ipath_layer_get_ibmtu(dev->dd)) {
-               ret = -EINVAL;
+
+       if (qp->s_cur == qp->s_head)
                goto bail;
-       }
-       extra_bytes = (4 - len) & 3;
-       nwords = (len + extra_bytes) >> 2;
+
+       wqe = get_swqe_ptr(qp, qp->s_cur);
+       next_cur = qp->s_cur + 1;
+       if (next_cur >= qp->s_size)
+               next_cur = 0;
 
        /* Construct the header. */
-       ah_attr = &to_iah(wr->wr.ud.ah)->attr;
-       if (ah_attr->dlid == 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE) {
-               if (ah_attr->dlid != IPS_PERMISSIVE_LID)
+       ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
+       if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
+               if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
                        dev->n_multicast_xmit++;
                else
                        dev->n_unicast_xmit++;
        } else {
                dev->n_unicast_xmit++;
-               lid = ah_attr->dlid &
-                       ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
-               if (unlikely(lid == ipath_layer_get_lid(dev->dd))) {
+               lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
+               if (unlikely(lid == dev->dd->ipath_lid)) {
                        /*
-                        * Pass in an uninitialized ib_wc to save stack
-                        * space.
+                        * If DMAs are in progress, we can't generate
+                        * a completion for the loopback packet since
+                        * it would be out of order.
+                        * XXX Instead of waiting, we could queue a
+                        * zero length descriptor so we get a callback.
                         */
-                       ipath_ud_loopback(qp, &ss, len, wr, &wc);
+                       if (atomic_read(&qp->s_dma_busy)) {
+                               qp->s_flags |= IPATH_S_WAIT_DMA;
+                               goto bail;
+                       }
+                       qp->s_cur = next_cur;
+                       spin_unlock_irqrestore(&qp->s_lock, flags);
+                       ipath_ud_loopback(qp, wqe);
+                       spin_lock_irqsave(&qp->s_lock, flags);
+                       ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
                        goto done;
                }
        }
+
+       qp->s_cur = next_cur;
+       extra_bytes = -wqe->length & 3;
+       nwords = (wqe->length + extra_bytes) >> 2;
+
+       /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
+       qp->s_hdrwords = 7;
+       qp->s_cur_size = wqe->length;
+       qp->s_cur_sge = &qp->s_sge;
+       qp->s_dmult = ah_attr->static_rate;
+       qp->s_wqe = wqe;
+       qp->s_sge.sge = wqe->sg_list[0];
+       qp->s_sge.sg_list = wqe->sg_list + 1;
+       qp->s_sge.num_sge = wqe->wr.num_sge;
+
        if (ah_attr->ah_flags & IB_AH_GRH) {
                /* Header size in 32-bit words. */
-               hwords = 17;
-               lrh0 = IPS_LRH_GRH;
+               qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
+                                                &ah_attr->grh,
+                                                qp->s_hdrwords, nwords);
+               lrh0 = IPATH_LRH_GRH;
                ohdr = &qp->s_hdr.u.l.oth;
-               qp->s_hdr.u.l.grh.version_tclass_flow =
-                       cpu_to_be32((6 << 28) |
-                                   (ah_attr->grh.traffic_class << 20) |
-                                   ah_attr->grh.flow_label);
-               qp->s_hdr.u.l.grh.paylen =
-                       cpu_to_be16(((wr->opcode ==
-                                     IB_WR_SEND_WITH_IMM ? 6 : 5) +
-                                    nwords + SIZE_OF_CRC) << 2);
-               /* next_hdr is defined by C8-7 in ch. 8.4.1 */
-               qp->s_hdr.u.l.grh.next_hdr = 0x1B;
-               qp->s_hdr.u.l.grh.hop_limit = ah_attr->grh.hop_limit;
-               /* The SGID is 32-bit aligned. */
-               qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
-                       dev->gid_prefix;
-               qp->s_hdr.u.l.grh.sgid.global.interface_id =
-                       ipath_layer_get_guid(dev->dd);
-               qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid;
                /*
                 * Don't worry about sending to locally attached multicast
                 * QPs.  It is unspecified by the spec. what happens.
                 */
        } else {
                /* Header size in 32-bit words. */
-               hwords = 7;
-               lrh0 = IPS_LRH_BTH;
+               lrh0 = IPATH_LRH_BTH;
                ohdr = &qp->s_hdr.u.oth;
        }
-       if (wr->opcode == IB_WR_SEND_WITH_IMM) {
-               ohdr->u.ud.imm_data = wr->imm_data;
-               wc.imm_data = wr->imm_data;
-               hwords += 1;
+       if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+               qp->s_hdrwords++;
+               ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
                bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
-       } else if (wr->opcode == IB_WR_SEND) {
-               wc.imm_data = 0;
+       } else
                bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
-       } else {
-               ret = -EINVAL;
-               goto bail;
-       }
        lrh0 |= ah_attr->sl << 4;
        if (qp->ibqp.qp_type == IB_QPT_SMI)
                lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
        qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
        qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
-       qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
-       lid = ipath_layer_get_lid(dev->dd);
+       qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
+                                          SIZE_OF_CRC);
+       lid = dev->dd->ipath_lid;
        if (lid) {
                lid |= ah_attr->src_path_bits &
-                       ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+                       ((1 << dev->dd->ipath_lmc) - 1);
                qp->s_hdr.lrh[3] = cpu_to_be16(lid);
        } else
                qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
-       if (wr->send_flags & IB_SEND_SOLICITED)
+       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
                bth0 |= 1 << 23;
        bth0 |= extra_bytes << 20;
-       bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPS_DEFAULT_P_KEY :
-               ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+       bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
+               ipath_get_pkey(dev->dd, qp->s_pkey_index);
        ohdr->bth[0] = cpu_to_be32(bth0);
        /*
         * Use the multicast QP if the destination LID is a multicast LID.
         */
-       ohdr->bth[1] = ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
-               ah_attr->dlid != IPS_PERMISSIVE_LID ?
-               __constant_cpu_to_be32(IPS_MULTICAST_QPN) :
-               cpu_to_be32(wr->wr.ud.remote_qpn);
-       /* XXX Could lose a PSN count but not worth locking */
-       ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPS_PSN_MASK);
+       ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
+               ah_attr->dlid != IPATH_PERMISSIVE_LID ?
+               cpu_to_be32(IPATH_MULTICAST_QPN) :
+               cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
+       ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
        /*
         * Qkeys with the high order bit set mean use the
         * qkey from the QP context instead of the WR (see 10.2.5).
         */
-       ohdr->u.ud.deth[0] = cpu_to_be32((int)wr->wr.ud.remote_qkey < 0 ?
-                                        qp->qkey : wr->wr.ud.remote_qkey);
+       ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
+                                        qp->qkey : wqe->wr.wr.ud.remote_qkey);
        ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-       if (ipath_verbs_send(dev->dd, hwords, (u32 *) &qp->s_hdr,
-                            len, &ss))
-               dev->n_no_piobuf++;
 
 done:
-       /* Queue the completion status entry. */
-       if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
-           (wr->send_flags & IB_SEND_SIGNALED)) {
-               wc.wr_id = wr->wr_id;
-               wc.status = IB_WC_SUCCESS;
-               wc.vendor_err = 0;
-               wc.opcode = IB_WC_SEND;
-               wc.byte_len = len;
-               wc.qp_num = qp->ibqp.qp_num;
-               wc.src_qp = 0;
-               wc.wc_flags = 0;
-               /* XXX initialize other fields? */
-               ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
-       }
-       kfree(sg_list);
-
-       ret = 0;
+       ret = 1;
+       goto unlock;
 
 bail:
+       qp->s_flags &= ~IPATH_S_BUSY;
+unlock:
+       spin_unlock_irqrestore(&qp->s_lock, flags);
        return ret;
 }
 
@@ -433,13 +413,9 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        int opcode;
        u32 hdrsize;
        u32 pad;
-       unsigned long flags;
        struct ib_wc wc;
        u32 qkey;
        u32 src_qp;
-       struct ipath_rq *rq;
-       struct ipath_srq *srq;
-       struct ipath_rwqe *wqe;
        u16 dlid;
        int header_in_data;
 
@@ -458,8 +434,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                 * the eager header buffer size to 56 bytes so the last 12
                 * bytes of the IB header is in the data buffer.
                 */
-               header_in_data =
-                       ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+               header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
                if (header_in_data) {
                        qkey = be32_to_cpu(((__be32 *) data)[1]);
                        src_qp = be32_to_cpu(((__be32 *) data)[2]);
@@ -469,7 +444,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                        src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
                }
        }
-       src_qp &= IPS_QPN_MASK;
+       src_qp &= IPATH_QPN_MASK;
 
        /*
         * Check that the permissive LID is only used on QP0
@@ -497,6 +472,28 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                }
        }
 
+       /*
+        * The opcode is in the low byte when its in network order
+        * (top byte when in host order).
+        */
+       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+       if (qp->ibqp.qp_num > 1 &&
+           opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+               if (header_in_data) {
+                       wc.ex.imm_data = *(__be32 *) data;
+                       data += sizeof(__be32);
+               } else
+                       wc.ex.imm_data = ohdr->u.ud.imm_data;
+               wc.wc_flags = IB_WC_WITH_IMM;
+               hdrsize += sizeof(u32);
+       } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+               wc.ex.imm_data = 0;
+               wc.wc_flags = 0;
+       } else {
+               dev->n_pkt_drops++;
+               goto bail;
+       }
+
        /* Get the number of bytes the message was padded by. */
        pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
        if (unlikely(tlen < (hdrsize + pad + 4))) {
@@ -524,42 +521,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        wc.byte_len = tlen + sizeof(struct ib_grh);
 
        /*
-        * The opcode is in the low byte when its in network order
-        * (top byte when in host order).
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-       if (qp->ibqp.qp_num > 1 &&
-           opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
-               if (header_in_data) {
-                       wc.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else
-                       wc.imm_data = ohdr->u.ud.imm_data;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               hdrsize += sizeof(u32);
-       } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
-               wc.imm_data = 0;
-               wc.wc_flags = 0;
-       } else {
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-
-       /*
         * Get the next work request entry to find where to put the data.
-        * Note that it is safe to drop the lock after changing rq->tail
-        * since ipath_post_receive() won't fill the empty slot.
         */
-       if (qp->ibqp.srq) {
-               srq = to_isrq(qp->ibqp.srq);
-               rq = &srq->rq;
-       } else {
-               srq = NULL;
-               rq = &qp->r_rq;
-       }
-       spin_lock_irqsave(&rq->lock, flags);
-       if (rq->tail == rq->head) {
-               spin_unlock_irqrestore(&rq->lock, flags);
+       if (qp->r_flags & IPATH_R_REUSE_SGE)
+               qp->r_flags &= ~IPATH_R_REUSE_SGE;
+       else if (!ipath_get_rwqe(qp, 0)) {
                /*
                 * Count VL15 packets dropped due to no receive buffer.
                 * Otherwise, count them as buffer overruns since usually,
@@ -573,39 +539,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                goto bail;
        }
        /* Silently drop packets which are too big. */
-       wqe = get_rwqe_ptr(rq, rq->tail);
-       if (wc.byte_len > wqe->length) {
-               spin_unlock_irqrestore(&rq->lock, flags);
+       if (wc.byte_len > qp->r_len) {
+               qp->r_flags |= IPATH_R_REUSE_SGE;
                dev->n_pkt_drops++;
                goto bail;
        }
-       wc.wr_id = wqe->wr_id;
-       qp->r_sge.sge = wqe->sg_list[0];
-       qp->r_sge.sg_list = wqe->sg_list + 1;
-       qp->r_sge.num_sge = wqe->num_sge;
-       if (++rq->tail >= rq->size)
-               rq->tail = 0;
-       if (srq && srq->ibsrq.event_handler) {
-               u32 n;
-
-               if (rq->head < rq->tail)
-                       n = rq->size + rq->head - rq->tail;
-               else
-                       n = rq->head - rq->tail;
-               if (n < srq->limit) {
-                       struct ib_event ev;
-
-                       srq->limit = 0;
-                       spin_unlock_irqrestore(&rq->lock, flags);
-                       ev.device = qp->ibqp.device;
-                       ev.element.srq = qp->ibqp.srq;
-                       ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-                       srq->ibsrq.event_handler(&ev,
-                                                srq->ibsrq.srq_context);
-               } else
-                       spin_unlock_irqrestore(&rq->lock, flags);
-       } else
-               spin_unlock_irqrestore(&rq->lock, flags);
        if (has_grh) {
                ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
                               sizeof(struct ib_grh));
@@ -614,10 +552,13 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
        ipath_copy_sge(&qp->r_sge, data,
                       wc.byte_len - sizeof(struct ib_grh));
+       if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
+               goto bail;
+       wc.wr_id = qp->r_wr_id;
        wc.status = IB_WC_SUCCESS;
        wc.opcode = IB_WC_RECV;
        wc.vendor_err = 0;
-       wc.qp_num = qp->ibqp.qp_num;
+       wc.qp = &qp->ibqp;
        wc.src_qp = src_qp;
        /* XXX do we know which pkey matched? Only needed for GSI. */
        wc.pkey_index = 0;
@@ -627,12 +568,13 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        /*
         * Save the LMC lower bits if the destination LID is a unicast LID.
         */
-       wc.dlid_path_bits = dlid >= IPS_MULTICAST_LID_BASE ? 0 :
-               dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+       wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
+               dlid & ((1 << dev->dd->ipath_lmc) - 1);
+       wc.port_num = 1;
        /* Signal completion event if the solicited bit is set. */
        ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
                       (ohdr->bth[0] &
-                       __constant_cpu_to_be32(1 << 23)) != 0);
+                       cpu_to_be32(1 << 23)) != 0);
 
 bail:;
 }