/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
*/
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
{
- struct ipath_cq_wc *wc = cq->queue;
+ struct ipath_cq_wc *wc;
unsigned long flags;
u32 head;
u32 next;
* Note that the head pointer might be writable by user processes.
* Take care to verify it is a sane value.
*/
+ wc = cq->queue;
head = wc->head;
if (head >= (unsigned) cq->ibcq.cqe) {
head = cq->ibcq.cqe;
}
return;
}
- wc->queue[head] = *entry;
+ if (cq->ip) {
+ wc->uqueue[head].wr_id = entry->wr_id;
+ wc->uqueue[head].status = entry->status;
+ wc->uqueue[head].opcode = entry->opcode;
+ wc->uqueue[head].vendor_err = entry->vendor_err;
+ wc->uqueue[head].byte_len = entry->byte_len;
+ wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
+ wc->uqueue[head].qp_num = entry->qp->qp_num;
+ wc->uqueue[head].src_qp = entry->src_qp;
+ wc->uqueue[head].wc_flags = entry->wc_flags;
+ wc->uqueue[head].pkey_index = entry->pkey_index;
+ wc->uqueue[head].slid = entry->slid;
+ wc->uqueue[head].sl = entry->sl;
+ wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+ wc->uqueue[head].port_num = entry->port_num;
+ /* Make sure entry is written before the head index. */
+ smp_wmb();
+ } else
+ wc->kqueue[head] = *entry;
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
struct ipath_cq *cq = to_icq(ibcq);
- struct ipath_cq_wc *wc = cq->queue;
+ struct ipath_cq_wc *wc;
unsigned long flags;
int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip) {
+ npolled = -EINVAL;
+ goto bail;
+ }
spin_lock_irqsave(&cq->lock, flags);
+ wc = cq->queue;
+ tail = wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (wc->tail == wc->head)
+ if (tail == wc->head)
break;
- *entry = wc->queue[wc->tail];
- if (wc->tail >= cq->ibcq.cqe)
- wc->tail = 0;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
else
- wc->tail++;
+ tail++;
}
+ wc->tail = tail;
spin_unlock_irqrestore(&cq->lock, flags);
+bail:
return npolled;
}
*
* Called by ib_create_cq() in the generic verbs code.
*/
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
+struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct ipath_cq *cq;
struct ipath_cq_wc *wc;
struct ib_cq *ret;
+ u32 sz;
- if (entries > ib_ipath_max_cqes) {
+ if (entries < 1 || entries > ib_ipath_max_cqes) {
ret = ERR_PTR(-EINVAL);
goto done;
}
- if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
/* Allocate the completion queue structure. */
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
* We need to use vmalloc() in order to support mmap and large
* numbers of entries.
*/
- wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries);
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+ else
+ sz += sizeof(struct ib_wc) * (entries + 1);
+ wc = vmalloc_user(sz);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
- struct ipath_mmap_info *ip;
- __u64 offset = (__u64) wc;
int err;
- err = ib_copy_to_udata(udata, &offset, sizeof(offset));
- if (err) {
- ret = ERR_PTR(err);
+ cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
+ if (!cq->ip) {
+ ret = ERR_PTR(-ENOMEM);
goto bail_wc;
}
- /* Allocate info for ipath_mmap(). */
- ip = kmalloc(sizeof(*ip), GFP_KERNEL);
- if (!ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wc;
+ err = ib_copy_to_udata(udata, &cq->ip->offset,
+ sizeof(cq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
}
- cq->ip = ip;
- ip->context = context;
- ip->obj = wc;
- kref_init(&ip->ref);
- ip->mmap_cnt = 0;
- ip->size = PAGE_ALIGN(sizeof(*wc) +
- sizeof(struct ib_wc) * entries);
- spin_lock_irq(&dev->pending_lock);
- ip->next = dev->pending_mmaps;
- dev->pending_mmaps = ip;
- spin_unlock_irq(&dev->pending_lock);
} else
cq->ip = NULL;
+ spin_lock(&dev->n_cqs_lock);
+ if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
+ spin_unlock(&dev->n_cqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_cqs_allocated++;
+ spin_unlock(&dev->n_cqs_lock);
+
+ if (cq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
/*
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
* The number of entries should be >= the number requested or return
ret = &cq->ibcq;
- dev->n_cqs_allocated++;
goto done;
+bail_ip:
+ kfree(cq->ip);
bail_wc:
vfree(wc);
-
bail_cq:
kfree(cq);
-
done:
return ret;
}
struct ipath_cq *cq = to_icq(ibcq);
tasklet_kill(&cq->comptask);
+ spin_lock(&dev->n_cqs_lock);
dev->n_cqs_allocated--;
+ spin_unlock(&dev->n_cqs_lock);
if (cq->ip)
kref_put(&cq->ip->ref, ipath_release_mmap_info);
else
/**
* ipath_req_notify_cq - change the notification type for a completion queue
* @ibcq: the completion queue
- * @notify: the type of notification to request
+ * @notify_flags: the type of notification to request
*
* Returns 0 for success.
*
* This may be called from interrupt context. Also called by
* ib_req_notify_cq() in the generic verbs code.
*/
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
+int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
{
struct ipath_cq *cq = to_icq(ibcq);
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&cq->lock, flags);
/*
* any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
*/
if (cq->notify != IB_CQ_NEXT_COMP)
- cq->notify = notify;
+ cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->queue->head != cq->queue->tail)
+ ret = 1;
+
spin_unlock_irqrestore(&cq->lock, flags);
- return 0;
+
+ return ret;
}
+/**
+ * ipath_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Returns 0 for success.
+ */
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
struct ipath_cq *cq = to_icq(ibcq);
- struct ipath_cq_wc *old_wc = cq->queue;
+ struct ipath_cq_wc *old_wc;
struct ipath_cq_wc *wc;
u32 head, tail, n;
int ret;
+ u32 sz;
+
+ if (cqe < 1 || cqe > ib_ipath_max_cqes) {
+ ret = -EINVAL;
+ goto bail;
+ }
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
- wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe);
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ else
+ sz += sizeof(struct ib_wc) * (cqe + 1);
+ wc = vmalloc_user(sz);
if (!wc) {
ret = -ENOMEM;
goto bail;
}
- /*
- * Return the address of the WC as the offset to mmap.
- * See ipath_mmap() for details.
- */
+ /* Check that we can write the offset to mmap. */
if (udata && udata->outlen >= sizeof(__u64)) {
- __u64 offset = (__u64) wc;
+ __u64 offset = 0;
ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (ret)
- goto bail;
+ goto bail_free;
}
spin_lock_irq(&cq->lock);
* Make sure head and tail are sane since they
* might be user writable.
*/
+ old_wc = cq->queue;
head = old_wc->head;
if (head > (u32) cq->ibcq.cqe)
head = (u32) cq->ibcq.cqe;
else
n = head - tail;
if (unlikely((u32)cqe < n)) {
- spin_unlock_irq(&cq->lock);
- vfree(wc);
- ret = -EOVERFLOW;
- goto bail;
+ ret = -EINVAL;
+ goto bail_unlock;
}
for (n = 0; tail != head; n++) {
- wc->queue[n] = old_wc->queue[tail];
+ if (cq->ip)
+ wc->uqueue[n] = old_wc->uqueue[tail];
+ else
+ wc->kqueue[n] = old_wc->kqueue[tail];
if (tail == (u32) cq->ibcq.cqe)
tail = 0;
else
struct ipath_ibdev *dev = to_idev(ibcq->device);
struct ipath_mmap_info *ip = cq->ip;
- ip->obj = wc;
- ip->size = PAGE_ALIGN(sizeof(*wc) +
- sizeof(struct ib_wc) * cqe);
+ ipath_update_mmap_info(dev, ip, sz, wc);
+
+ /*
+ * Return the offset to mmap.
+ * See ipath_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
spin_lock_irq(&dev->pending_lock);
- ip->next = dev->pending_mmaps;
- dev->pending_mmaps = ip;
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
ret = 0;
+ goto bail;
+bail_unlock:
+ spin_unlock_irq(&cq->lock);
+bail_free:
+ vfree(wc);
bail:
return ret;
}