include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / infiniband / hw / amso1100 / c2_qp.c
index 1226113..d8f4bb8 100644 (file)
@@ -35,6 +35,9 @@
  *
  */
 
+#include <linux/delay.h>
+#include <linux/gfp.h>
+
 #include "c2.h"
 #include "c2_vq.h"
 #include "c2_status.h"
@@ -119,7 +122,7 @@ void c2_set_qp_state(struct c2_qp *qp, int c2_state)
        int new_state = to_ib_state(c2_state);
 
        pr_debug("%s: qp[%p] state modify %s --> %s\n",
-              __FUNCTION__,
+              __func__,
                qp,
                to_ib_state_str(qp->state),
                to_ib_state_str(new_state));
@@ -139,7 +142,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
        int err;
 
        pr_debug("%s:%d qp=%p, %s --> %s\n",
-               __FUNCTION__, __LINE__,
+               __func__, __LINE__,
                qp,
                to_ib_state_str(qp->state),
                to_ib_state_str(attr->qp_state));
@@ -159,8 +162,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
 
        if (attr_mask & IB_QP_STATE) {
                /* Ensure the state is valid */
-               if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
-                       return -EINVAL;
+               if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
+                       err = -EINVAL;
+                       goto bail0;
+               }
 
                wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
 
@@ -182,9 +187,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
                if (attr->cur_qp_state != IB_QPS_RTR &&
                    attr->cur_qp_state != IB_QPS_RTS &&
                    attr->cur_qp_state != IB_QPS_SQD &&
-                   attr->cur_qp_state != IB_QPS_SQE)
-                       return -EINVAL;
-               else
+                   attr->cur_qp_state != IB_QPS_SQE) {
+                       err = -EINVAL;
+                       goto bail0;
+               } else
                        wr.next_qp_state =
                            cpu_to_be32(to_c2_state(attr->cur_qp_state));
 
@@ -219,7 +225,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
                qp->state = next_state;
 #ifdef DEBUG
        else
-               pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err);
+               pr_debug("%s: c2_errno=%d\n", __func__, err);
 #endif
        /*
         * If we're going to error and generating the event here, then
@@ -238,7 +244,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
        vq_req_free(c2dev, vq_req);
 
        pr_debug("%s:%d qp=%p, cur_state=%s\n",
-               __FUNCTION__, __LINE__,
+               __func__, __LINE__,
                qp,
                to_ib_state_str(qp->state));
        return err;
@@ -501,6 +507,7 @@ int c2_alloc_qp(struct c2_dev *c2dev,
        qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
        qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
        qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
+       init_waitqueue_head(&qp->wait);
 
        /* Initialize the SQ MQ */
        q_size = be32_to_cpu(reply->sq_depth);
@@ -562,6 +569,32 @@ int c2_alloc_qp(struct c2_dev *c2dev,
        return err;
 }
 
+static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+       if (send_cq == recv_cq)
+               spin_lock_irq(&send_cq->lock);
+       else if (send_cq > recv_cq) {
+               spin_lock_irq(&send_cq->lock);
+               spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+       } else {
+               spin_lock_irq(&recv_cq->lock);
+               spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+       }
+}
+
+static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+       if (send_cq == recv_cq)
+               spin_unlock_irq(&send_cq->lock);
+       else if (send_cq > recv_cq) {
+               spin_unlock(&recv_cq->lock);
+               spin_unlock_irq(&send_cq->lock);
+       } else {
+               spin_unlock(&send_cq->lock);
+               spin_unlock_irq(&recv_cq->lock);
+       }
+}
+
 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
 {
        struct c2_cq *send_cq;
@@ -574,15 +607,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
         * Lock CQs here, so that CQ polling code can do QP lookup
         * without taking a lock.
         */
-       spin_lock_irq(&send_cq->lock);
-       if (send_cq != recv_cq)
-               spin_lock(&recv_cq->lock);
-
+       c2_lock_cqs(send_cq, recv_cq);
        c2_free_qpn(c2dev, qp->qpn);
-
-       if (send_cq != recv_cq)
-               spin_unlock(&recv_cq->lock);
-       spin_unlock_irq(&send_cq->lock);
+       c2_unlock_cqs(send_cq, recv_cq);
 
        /*
         * Destory qp in the rnic...
@@ -705,10 +732,8 @@ static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
         * cannot get on the bus and the card and system hang in a
         * deadlock -- thus the need for this code. [TOT]
         */
-       while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(0);
-       }
+       while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
+               udelay(10);
 
        __raw_writel(C2_HINT_MAKE(mq_index, shared),
                     c2dev->regs + PCI_BAR0_ADAPTER_HINT);
@@ -766,6 +791,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
        struct c2_dev *c2dev = to_c2dev(ibqp->device);
        struct c2_qp *qp = to_c2qp(ibqp);
        union c2wr wr;
+       unsigned long lock_flags;
        int err = 0;
 
        u32 flags;
@@ -773,8 +799,10 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
        u8 actual_sge_count;
        u32 msg_size;
 
-       if (qp->state > IB_QPS_RTS)
-               return -EINVAL;
+       if (qp->state > IB_QPS_RTS) {
+               err = -EINVAL;
+               goto out;
+       }
 
        while (ib_wr) {
 
@@ -786,16 +814,24 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
 
                switch (ib_wr->opcode) {
                case IB_WR_SEND:
-                       if (ib_wr->send_flags & IB_SEND_SOLICITED) {
-                               c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
-                               msg_size = sizeof(struct c2wr_send_req);
+               case IB_WR_SEND_WITH_INV:
+                       if (ib_wr->opcode == IB_WR_SEND) {
+                               if (ib_wr->send_flags & IB_SEND_SOLICITED)
+                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
+                               else
+                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
+                               wr.sqwr.send.remote_stag = 0;
                        } else {
-                               c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
-                               msg_size = sizeof(struct c2wr_send_req);
+                               if (ib_wr->send_flags & IB_SEND_SOLICITED)
+                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
+                               else
+                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
+                               wr.sqwr.send.remote_stag =
+                                       cpu_to_be32(ib_wr->ex.invalidate_rkey);
                        }
 
-                       wr.sqwr.send.remote_stag = 0;
-                       msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;
+                       msg_size = sizeof(struct c2wr_send_req) +
+                               sizeof(struct c2_data_addr) * ib_wr->num_sge;
                        if (ib_wr->num_sge > qp->send_sgl_depth) {
                                err = -EINVAL;
                                break;
@@ -881,8 +917,10 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                /*
                 * Post the puppy!
                 */
+               spin_lock_irqsave(&qp->lock, lock_flags);
                err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
                if (err) {
+                       spin_unlock_irqrestore(&qp->lock, lock_flags);
                        break;
                }
 
@@ -890,10 +928,12 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                 * Enqueue mq index to activity FIFO.
                 */
                c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
+               spin_unlock_irqrestore(&qp->lock, lock_flags);
 
                ib_wr = ib_wr->next;
        }
 
+out:
        if (err)
                *bad_wr = ib_wr;
        return err;
@@ -905,10 +945,13 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
        struct c2_dev *c2dev = to_c2dev(ibqp->device);
        struct c2_qp *qp = to_c2qp(ibqp);
        union c2wr wr;
+       unsigned long lock_flags;
        int err = 0;
 
-       if (qp->state > IB_QPS_RTS)
-               return -EINVAL;
+       if (qp->state > IB_QPS_RTS) {
+               err = -EINVAL;
+               goto out;
+       }
 
        /*
         * Try and post each work request
@@ -945,8 +988,10 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
                        break;
                }
 
+               spin_lock_irqsave(&qp->lock, lock_flags);
                err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
                if (err) {
+                       spin_unlock_irqrestore(&qp->lock, lock_flags);
                        break;
                }
 
@@ -954,10 +999,12 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
                 * Enqueue mq index to activity FIFO
                 */
                c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
+               spin_unlock_irqrestore(&qp->lock, lock_flags);
 
                ib_wr = ib_wr->next;
        }
 
+out:
        if (err)
                *bad_wr = ib_wr;
        return err;