include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / infiniband / hw / mlx4 / cq.c
index 8bf44da..cc2ddd2 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -32,6 +33,7 @@
 
 #include <linux/mlx4/cq.h>
 #include <linux/mlx4/qp.h>
+#include <linux/slab.h>
 
 #include "mlx4_ib.h"
 #include "user.h"
@@ -64,13 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
 
 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
 {
-       int offset = n * sizeof (struct mlx4_cqe);
-
-       if (buf->buf.nbufs == 1)
-               return buf->buf.u.direct.buf + offset;
-       else
-               return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf +
-                       (offset & (PAGE_SIZE - 1));
+       return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
 }
 
 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
@@ -91,6 +87,82 @@ static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
        return get_sw_cqe(cq, cq->mcq.cons_index);
 }
 
+int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+       struct mlx4_ib_cq *mcq = to_mcq(cq);
+       struct mlx4_ib_dev *dev = to_mdev(cq->device);
+
+       return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
+}
+
+static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
+{
+       int err;
+
+       err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
+                            PAGE_SIZE * 2, &buf->buf);
+
+       if (err)
+               goto out;
+
+       err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
+                                   &buf->mtt);
+       if (err)
+               goto err_buf;
+
+       err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
+       if (err)
+               goto err_mtt;
+
+       return 0;
+
+err_mtt:
+       mlx4_mtt_cleanup(dev->dev, &buf->mtt);
+
+err_buf:
+       mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
+                             &buf->buf);
+
+out:
+       return err;
+}
+
+static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
+{
+       mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
+}
+
+static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
+                              struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
+                              u64 buf_addr, int cqe)
+{
+       int err;
+
+       *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
+                           IB_ACCESS_LOCAL_WRITE, 1);
+       if (IS_ERR(*umem))
+               return PTR_ERR(*umem);
+
+       err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
+                           ilog2((*umem)->page_size), &buf->mtt);
+       if (err)
+               goto err_buf;
+
+       err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
+       if (err)
+               goto err_mtt;
+
+       return 0;
+
+err_mtt:
+       mlx4_mtt_cleanup(dev->dev, &buf->mtt);
+
+err_buf:
+       ib_umem_release(*umem);
+
+       return err;
+}
+
 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
                                struct ib_ucontext *context,
                                struct ib_udata *udata)
@@ -98,7 +170,6 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
        struct mlx4_ib_dev *dev = to_mdev(ibdev);
        struct mlx4_ib_cq *cq;
        struct mlx4_uar *uar;
-       int buf_size;
        int err;
 
        if (entries < 1 || entries > dev->dev->caps.max_cqes)
@@ -110,8 +181,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
 
        entries      = roundup_pow_of_two(entries + 1);
        cq->ibcq.cqe = entries - 1;
-       buf_size     = entries * sizeof (struct mlx4_cqe);
+       mutex_init(&cq->resize_mutex);
        spin_lock_init(&cq->lock);
+       cq->resize_buf = NULL;
+       cq->resize_umem = NULL;
 
        if (context) {
                struct mlx4_ib_create_cq ucmd;
@@ -121,21 +194,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
                        goto err_cq;
                }
 
-               cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size,
-                                      IB_ACCESS_LOCAL_WRITE);
-               if (IS_ERR(cq->umem)) {
-                       err = PTR_ERR(cq->umem);
-                       goto err_cq;
-               }
-
-               err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
-                                   ilog2(cq->umem->page_size), &cq->buf.mtt);
+               err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
+                                         ucmd.buf_addr, entries);
                if (err)
-                       goto err_buf;
-
-               err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
-               if (err)
-                       goto err_mtt;
+                       goto err_cq;
 
                err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
                                          &cq->db);
@@ -144,7 +206,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
 
                uar = &to_mucontext(context)->uar;
        } else {
-               err = mlx4_ib_db_alloc(dev, &cq->db, 1);
+               err = mlx4_db_alloc(dev->dev, &cq->db, 1);
                if (err)
                        goto err_cq;
 
@@ -153,25 +215,15 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
                *cq->mcq.set_ci_db = 0;
                *cq->mcq.arm_db    = 0;
 
-               if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) {
-                       err = -ENOMEM;
-                       goto err_db;
-               }
-
-               err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
-                                   &cq->buf.mtt);
+               err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
                if (err)
-                       goto err_buf;
-
-               err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
-               if (err)
-                       goto err_mtt;
+                       goto err_db;
 
                uar = &dev->priv_uar;
        }
 
        err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
-                           cq->db.dma, &cq->mcq);
+                           cq->db.dma, &cq->mcq, vector, 0);
        if (err)
                goto err_dbmap;
 
@@ -193,16 +245,14 @@ err_dbmap:
 err_mtt:
        mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
 
-err_buf:
        if (context)
                ib_umem_release(cq->umem);
        else
-               mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe),
-                             &cq->buf.buf);
+               mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
 
 err_db:
        if (!context)
-               mlx4_ib_db_free(dev, &cq->db);
+               mlx4_db_free(dev->dev, &cq->db);
 
 err_cq:
        kfree(cq);
@@ -210,6 +260,177 @@ err_cq:
        return ERR_PTR(err);
 }
 
+static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
+                                 int entries)
+{
+       int err;
+
+       if (cq->resize_buf)
+               return -EBUSY;
+
+       cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+       if (!cq->resize_buf)
+               return -ENOMEM;
+
+       err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
+       if (err) {
+               kfree(cq->resize_buf);
+               cq->resize_buf = NULL;
+               return err;
+       }
+
+       cq->resize_buf->cqe = entries - 1;
+
+       return 0;
+}
+
+static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
+                                  int entries, struct ib_udata *udata)
+{
+       struct mlx4_ib_resize_cq ucmd;
+       int err;
+
+       if (cq->resize_umem)
+               return -EBUSY;
+
+       if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+               return -EFAULT;
+
+       cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+       if (!cq->resize_buf)
+               return -ENOMEM;
+
+       err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
+                                 &cq->resize_umem, ucmd.buf_addr, entries);
+       if (err) {
+               kfree(cq->resize_buf);
+               cq->resize_buf = NULL;
+               return err;
+       }
+
+       cq->resize_buf->cqe = entries - 1;
+
+       return 0;
+}
+
+static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
+{
+       u32 i;
+
+       i = cq->mcq.cons_index;
+       while (get_sw_cqe(cq, i & cq->ibcq.cqe))
+               ++i;
+
+       return i - cq->mcq.cons_index;
+}
+
+static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
+{
+       struct mlx4_cqe *cqe, *new_cqe;
+       int i;
+
+       i = cq->mcq.cons_index;
+       cqe = get_cqe(cq, i & cq->ibcq.cqe);
+       while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
+               new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
+                                          (i + 1) & cq->resize_buf->cqe);
+               memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
+               new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
+                       (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
+               cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
+       }
+       ++cq->mcq.cons_index;
+}
+
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
+       struct mlx4_ib_cq *cq = to_mcq(ibcq);
+       struct mlx4_mtt mtt;
+       int outst_cqe;
+       int err;
+
+       mutex_lock(&cq->resize_mutex);
+
+       if (entries < 1 || entries > dev->dev->caps.max_cqes) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       entries = roundup_pow_of_two(entries + 1);
+       if (entries == ibcq->cqe + 1) {
+               err = 0;
+               goto out;
+       }
+
+       if (ibcq->uobject) {
+               err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
+               if (err)
+                       goto out;
+       } else {
+               /* Can't be smaller than the number of outstanding CQEs */
+               outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
+               if (entries < outst_cqe + 1) {
+                       err = 0;
+                       goto out;
+               }
+
+               err = mlx4_alloc_resize_buf(dev, cq, entries);
+               if (err)
+                       goto out;
+       }
+
+       mtt = cq->buf.mtt;
+
+       err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
+       if (err)
+               goto err_buf;
+
+       mlx4_mtt_cleanup(dev->dev, &mtt);
+       if (ibcq->uobject) {
+               cq->buf      = cq->resize_buf->buf;
+               cq->ibcq.cqe = cq->resize_buf->cqe;
+               ib_umem_release(cq->umem);
+               cq->umem     = cq->resize_umem;
+
+               kfree(cq->resize_buf);
+               cq->resize_buf = NULL;
+               cq->resize_umem = NULL;
+       } else {
+               spin_lock_irq(&cq->lock);
+               if (cq->resize_buf) {
+                       mlx4_ib_cq_resize_copy_cqes(cq);
+                       mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+                       cq->buf      = cq->resize_buf->buf;
+                       cq->ibcq.cqe = cq->resize_buf->cqe;
+
+                       kfree(cq->resize_buf);
+                       cq->resize_buf = NULL;
+               }
+               spin_unlock_irq(&cq->lock);
+       }
+
+       goto out;
+
+err_buf:
+       mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
+       if (!ibcq->uobject)
+               mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
+                                   cq->resize_buf->cqe);
+
+       kfree(cq->resize_buf);
+       cq->resize_buf = NULL;
+
+       if (cq->resize_umem) {
+               ib_umem_release(cq->resize_umem);
+               cq->resize_umem = NULL;
+       }
+
+out:
+       mutex_unlock(&cq->resize_mutex);
+       return err;
+}
+
 int mlx4_ib_destroy_cq(struct ib_cq *cq)
 {
        struct mlx4_ib_dev *dev = to_mdev(cq->device);
@@ -222,9 +443,8 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
                mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
                ib_umem_release(mcq->umem);
        } else {
-               mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe),
-                             &mcq->buf.buf);
-               mlx4_ib_db_free(dev, &mcq->db);
+               mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
+               mlx4_db_free(dev->dev, &mcq->db);
        }
 
        kfree(mcq);
@@ -303,6 +523,20 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
        wc->vendor_err = cqe->vendor_err_syndrome;
 }
 
+static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
+{
+       return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4      |
+                                     MLX4_CQE_STATUS_IPV4F     |
+                                     MLX4_CQE_STATUS_IPV4OPT   |
+                                     MLX4_CQE_STATUS_IPV6      |
+                                     MLX4_CQE_STATUS_IPOK)) ==
+               cpu_to_be16(MLX4_CQE_STATUS_IPV4        |
+                           MLX4_CQE_STATUS_IPOK))              &&
+               (status & cpu_to_be16(MLX4_CQE_STATUS_UDP       |
+                                     MLX4_CQE_STATUS_TCP))     &&
+               checksum == cpu_to_be16(0xffff);
+}
+
 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                            struct mlx4_ib_qp **cur_qp,
                            struct ib_wc *wc)
@@ -313,8 +547,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
        struct mlx4_ib_srq *srq;
        int is_send;
        int is_error;
+       u32 g_mlpath_rqpn;
        u16 wqe_ctr;
 
+repoll:
        cqe = next_cqe_sw(cq);
        if (!cqe)
                return -EAGAIN;
@@ -331,18 +567,40 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
        is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                MLX4_CQE_OPCODE_ERROR;
 
+       if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
+                    is_send)) {
+               printk(KERN_WARNING "Completion for NOP opcode detected!\n");
+               return -EINVAL;
+       }
+
+       /* Resize CQ in progress */
+       if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
+               if (cq->resize_buf) {
+                       struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
+
+                       mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+                       cq->buf      = cq->resize_buf->buf;
+                       cq->ibcq.cqe = cq->resize_buf->cqe;
+
+                       kfree(cq->resize_buf);
+                       cq->resize_buf = NULL;
+               }
+
+               goto repoll;
+       }
+
        if (!*cur_qp ||
-           (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
+           (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
                /*
                 * We do not have to take the QP table lock here,
                 * because CQs will be locked while QPs are removed
                 * from the table.
                 */
                mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
-                                      be32_to_cpu(cqe->my_qpn));
+                                      be32_to_cpu(cqe->vlan_my_qpn));
                if (unlikely(!mqp)) {
                        printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
-                              cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff);
+                              cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
                        return -EINVAL;
                }
 
@@ -353,8 +611,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 
        if (is_send) {
                wq = &(*cur_qp)->sq;
-               wqe_ctr = be16_to_cpu(cqe->wqe_index);
-               wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
+               if (!(*cur_qp)->sq_signal_bits) {
+                       wqe_ctr = be16_to_cpu(cqe->wqe_index);
+                       wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
+               }
                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
                ++wq->tail;
        } else if ((*cur_qp)->ibqp.srq) {
@@ -386,6 +646,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                case MLX4_OPCODE_SEND_IMM:
                        wc->wc_flags |= IB_WC_WITH_IMM;
                case MLX4_OPCODE_SEND:
+               case MLX4_OPCODE_SEND_INVAL:
                        wc->opcode    = IB_WC_SEND;
                        break;
                case MLX4_OPCODE_RDMA_READ:
@@ -403,34 +664,49 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                case MLX4_OPCODE_BIND_MW:
                        wc->opcode    = IB_WC_BIND_MW;
                        break;
+               case MLX4_OPCODE_LSO:
+                       wc->opcode    = IB_WC_LSO;
+                       break;
+               case MLX4_OPCODE_FMR:
+                       wc->opcode    = IB_WC_FAST_REG_MR;
+                       break;
+               case MLX4_OPCODE_LOCAL_INVAL:
+                       wc->opcode    = IB_WC_LOCAL_INV;
+                       break;
                }
        } else {
                wc->byte_len = be32_to_cpu(cqe->byte_cnt);
 
                switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
                case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
-                       wc->opcode   = IB_WC_RECV_RDMA_WITH_IMM;
-                       wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->imm_data = cqe->immed_rss_invalid;
+                       wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
+                       wc->wc_flags    = IB_WC_WITH_IMM;
+                       wc->ex.imm_data = cqe->immed_rss_invalid;
+                       break;
+               case MLX4_RECV_OPCODE_SEND_INVAL:
+                       wc->opcode      = IB_WC_RECV;
+                       wc->wc_flags    = IB_WC_WITH_INVALIDATE;
+                       wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
                        break;
                case MLX4_RECV_OPCODE_SEND:
                        wc->opcode   = IB_WC_RECV;
                        wc->wc_flags = 0;
                        break;
                case MLX4_RECV_OPCODE_SEND_IMM:
-                       wc->opcode   = IB_WC_RECV;
-                       wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->imm_data = cqe->immed_rss_invalid;
+                       wc->opcode      = IB_WC_RECV;
+                       wc->wc_flags    = IB_WC_WITH_IMM;
+                       wc->ex.imm_data = cqe->immed_rss_invalid;
                        break;
                }
 
                wc->slid           = be16_to_cpu(cqe->rlid);
-               wc->sl             = cqe->sl >> 4;
-               wc->src_qp         = be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff;
-               wc->dlid_path_bits = (be32_to_cpu(cqe->g_mlpath_rqpn) >> 24) & 0x7f;
-               wc->wc_flags      |= be32_to_cpu(cqe->g_mlpath_rqpn) & 0x80000000 ?
-                       IB_WC_GRH : 0;
-               wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) >> 16;
+               wc->sl             = be16_to_cpu(cqe->sl_vid) >> 12;
+               g_mlpath_rqpn      = be32_to_cpu(cqe->g_mlpath_rqpn);
+               wc->src_qp         = g_mlpath_rqpn & 0xffffff;
+               wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
+               wc->wc_flags      |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
+               wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
+               wc->csum_ok        = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
        }
 
        return 0;
@@ -498,7 +774,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
         */
        while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
                cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
-               if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) {
+               if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
                        if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
                                mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
                        ++nfreed;