IB/ipath: Flush RWQEs if access error or invalid error seen
authorBryan O'Sullivan <bos@pathscale.com>
Thu, 28 Sep 2006 16:00:14 +0000 (09:00 -0700)
committerRoland Dreier <rolandd@cisco.com>
Thu, 28 Sep 2006 18:16:55 +0000 (11:16 -0700)
If the receiver goes into the error state, we need to flush the
posted receive WQEs.

Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/ipath/ipath_qp.c
drivers/infiniband/hw/ipath/ipath_rc.c
drivers/infiniband/hw/ipath/ipath_ruc.c
drivers/infiniband/hw/ipath/ipath_verbs.h

index ecfaca7..46c1c89 100644 (file)
@@ -335,6 +335,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
        qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
        qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
        qp->r_nak_state = 0;
+       qp->r_wrid_valid = 0;
        qp->s_rnr_timeout = 0;
        qp->s_head = 0;
        qp->s_tail = 0;
@@ -353,12 +354,13 @@ static void ipath_reset_qp(struct ipath_qp *qp)
 /**
  * ipath_error_qp - put a QP into an error state
  * @qp: the QP to put into an error state
+ * @err: the receive completion error to signal if a RWQE is active
  *
  * Flushes both send and receive work queues.
  * QP s_lock should be held and interrupts disabled.
  */
 
-void ipath_error_qp(struct ipath_qp *qp)
+void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
 {
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ib_wc wc;
@@ -374,7 +376,6 @@ void ipath_error_qp(struct ipath_qp *qp)
                list_del_init(&qp->piowait);
        spin_unlock(&dev->pending_lock);
 
-       wc.status = IB_WC_WR_FLUSH_ERR;
        wc.vendor_err = 0;
        wc.byte_len = 0;
        wc.imm_data = 0;
@@ -386,6 +387,12 @@ void ipath_error_qp(struct ipath_qp *qp)
        wc.sl = 0;
        wc.dlid_path_bits = 0;
        wc.port_num = 0;
+       if (qp->r_wrid_valid) {
+               qp->r_wrid_valid = 0;
+               wc.status = err;
+               ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
+       }
+       wc.status = IB_WC_WR_FLUSH_ERR;
 
        while (qp->s_last != qp->s_head) {
                struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
@@ -502,7 +509,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                break;
 
        case IB_QPS_ERR:
-               ipath_error_qp(qp);
+               ipath_error_qp(qp, IB_WC_GENERAL_ERR);
                break;
 
        default:
index 595941b..a504cf6 100644 (file)
@@ -1293,6 +1293,14 @@ done:
        return 1;
 }
 
+static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
+{
+       spin_lock_irq(&qp->s_lock);
+       qp->state = IB_QPS_ERR;
+       ipath_error_qp(qp, err);
+       spin_unlock_irq(&qp->s_lock);
+}
+
 /**
  * ipath_rc_rcv - process an incoming RC packet
  * @dev: the device this packet came in on
@@ -1385,8 +1393,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                 */
                if (qp->r_ack_state >= OP(COMPARE_SWAP))
                        goto send_ack;
-               /* XXX Flush WQEs */
-               qp->state = IB_QPS_ERR;
+               ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
                qp->r_ack_state = OP(SEND_ONLY);
                qp->r_nak_state = IB_NAK_INVALID_REQUEST;
                qp->r_ack_psn = qp->r_psn;
@@ -1492,9 +1499,9 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                        goto nack_inv;
                ipath_copy_sge(&qp->r_sge, data, tlen);
                qp->r_msn++;
-               if (opcode == OP(RDMA_WRITE_LAST) ||
-                   opcode == OP(RDMA_WRITE_ONLY))
+               if (!qp->r_wrid_valid)
                        break;
+               qp->r_wrid_valid = 0;
                wc.wr_id = qp->r_wr_id;
                wc.status = IB_WC_SUCCESS;
                wc.opcode = IB_WC_RECV;
@@ -1685,8 +1692,7 @@ nack_acc:
         * is pending though.
         */
        if (qp->r_ack_state < OP(COMPARE_SWAP)) {
-               /* XXX Flush WQEs */
-               qp->state = IB_QPS_ERR;
+               ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
                qp->r_ack_state = OP(RDMA_WRITE_ONLY);
                qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
                qp->r_ack_psn = qp->r_psn;
index 17ae23f..f753051 100644 (file)
@@ -229,6 +229,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
                }
        }
        spin_unlock_irqrestore(&rq->lock, flags);
+       qp->r_wrid_valid = 1;
 
 bail:
        return ret;
index 3597d36..8039f6e 100644 (file)
@@ -365,6 +365,7 @@ struct ipath_qp {
        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
        u8 r_reuse_sge;         /* for UC receive errors */
        u8 r_sge_inx;           /* current index into sg_list */
+       u8 r_wrid_valid;        /* r_wrid set but CQ entry not yet made */
        u8 qp_access_flags;
        u8 s_max_sge;           /* size of s_wq->sg_list */
        u8 s_retry_cnt;         /* number of times to retry */
@@ -639,6 +640,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
 
 int ipath_destroy_qp(struct ib_qp *ibqp);
 
+void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
+
 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                    int attr_mask, struct ib_udata *udata);