#include <linux/vmalloc.h>
#include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_kernel.h"
#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
BITS_PER_PAGE, off)
-#define TRANS_INVALID 0
-#define TRANS_ANY2RST 1
-#define TRANS_RST2INIT 2
-#define TRANS_INIT2INIT 3
-#define TRANS_INIT2RTR 4
-#define TRANS_RTR2RTS 5
-#define TRANS_RTS2RTS 6
-#define TRANS_SQERR2RTS 7
-#define TRANS_ANY2ERR 8
-#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */
-#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */
-#define TRANS_SQD2RTS 11 /* error if not drained */
-
/*
* Convert the AETH credit code into the number of credits.
*/
free_qpn(qpt, qp->ibqp.qp_num);
if (!atomic_dec_and_test(&qp->refcount) ||
!ipath_destroy_qp(&qp->ibqp))
- _VERBS_INFO("QP memory leak!\n");
+ ipath_dbg(KERN_INFO "QP memory leak!\n");
qp = nqp;
}
}
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
+ clear_bit(IPATH_S_BUSY, &qp->s_flags);
qp->s_hdrwords = 0;
qp->s_psn = 0;
qp->r_psn = 0;
- atomic_set(&qp->msn, 0);
+ qp->r_msn = 0;
if (qp->ibqp.qp_type == IB_QPT_RC) {
qp->s_state = IB_OPCODE_RC_SEND_LAST;
qp->r_state = IB_OPCODE_RC_SEND_LAST;
qp->r_state = IB_OPCODE_UC_SEND_LAST;
}
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- qp->s_nak_state = 0;
+ qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
qp->s_rnr_timeout = 0;
qp->s_head = 0;
qp->s_tail = 0;
qp->s_last = 0;
qp->s_ssn = 1;
qp->s_lsn = 0;
- qp->r_rq.head = 0;
- qp->r_rq.tail = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
qp->r_reuse_sge = 0;
}
* @qp: the QP to put into an error state
*
* Flushes both send and receive work queues.
- * QP r_rq.lock and s_lock should be held.
+ * QP s_lock should be held and interrupts disabled.
*/
-static void ipath_error_qp(struct ipath_qp *qp)
+void ipath_error_qp(struct ipath_qp *qp)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
- _VERBS_INFO("QP%d/%d in error state\n",
- qp->ibqp.qp_num, qp->remote_qpn);
+ ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
+ qp->ibqp.qp_num, qp->remote_qpn);
spin_lock(&dev->pending_lock);
/* XXX What if its already removed by the timeout code? */
qp->s_hdrwords = 0;
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- wc.opcode = IB_WC_RECV;
- while (qp->r_rq.tail != qp->r_rq.head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
- if (++qp->r_rq.tail >= qp->r_rq.size)
- qp->r_rq.tail = 0;
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ if (qp->r_rq.wq) {
+ struct ipath_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ wc.opcode = IB_WC_RECV;
+ while (tail != head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
}
}
* @ibqp: the queue pair who's attributes we're modifying
* @attr: the new attributes
* @attr_mask: the mask of attributes to modify
+ * @udata: user data for ipathverbs.so
*
* Returns 0 on success, otherwise returns an errno.
*/
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask)
+ int attr_mask, struct ib_udata *udata)
{
struct ipath_ibdev *dev = to_idev(ibqp->device);
struct ipath_qp *qp = to_iqp(ibqp);
unsigned long flags;
int ret;
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- spin_lock(&qp->s_lock);
+ spin_lock_irqsave(&qp->s_lock, flags);
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : qp->state;
if (attr_mask & IB_QP_AV)
if (attr->ah_attr.dlid == 0 ||
- attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
+ attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
goto inval;
if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
+ if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
goto inval;
if (attr_mask & IB_QP_MIN_RNR_TIMER)
}
if (attr_mask & IB_QP_MIN_RNR_TIMER)
- qp->s_min_rnr_timer = attr->min_rnr_timer;
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
qp->state = new_state;
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
- /*
- * If QP1 changed to the RTS state, try to move to the link to INIT
- * even if it was ACTIVE so the SM will reinitialize the SMA's
- * state.
- */
- if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) {
- struct ipath_ibdev *dev = to_idev(ibqp->device);
-
- ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
- }
ret = 0;
goto bail;
inval:
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EINVAL;
bail:
attr->dest_qp_num = qp->remote_qpn;
attr->qp_access_flags = qp->qp_access_flags;
attr->cap.max_send_wr = qp->s_size - 1;
- attr->cap.max_recv_wr = qp->r_rq.size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
attr->cap.max_send_sge = qp->s_max_sge;
attr->cap.max_recv_sge = qp->r_rq.max_sge;
attr->cap.max_inline_data = 0;
attr->sq_draining = 0;
attr->max_rd_atomic = 1;
attr->max_dest_rd_atomic = 1;
- attr->min_rnr_timer = qp->s_min_rnr_timer;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
attr->port_num = 1;
attr->timeout = 0;
attr->retry_cnt = qp->s_retry_cnt;
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
- *
- * The QP s_lock should be held.
*/
__be32 ipath_compute_aeth(struct ipath_qp *qp)
{
- u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK;
+ u32 aeth = qp->r_msn & IPATH_MSN_MASK;
- if (qp->s_nak_state) {
- aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT;
- } else if (qp->ibqp.srq) {
+ if (qp->ibqp.srq) {
/*
* Shared receive queues don't generate credits.
* Set the credit field to the invalid value.
*/
- aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT;
+ aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
} else {
u32 min, max, x;
u32 credits;
-
+ struct ipath_rwq *wq = qp->r_rq.wq;
+ u32 head;
+ u32 tail;
+
+ /* sanity check pointers before trusting them */
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
/*
* Compute the number of credits available (RWQEs).
* XXX Not holding the r_rq.lock here so there is a small
* chance that the pair of reads are not atomic.
*/
- credits = qp->r_rq.head - qp->r_rq.tail;
+ credits = head - tail;
if ((int)credits < 0)
credits += qp->r_rq.size;
/*
else
min = x;
}
- aeth |= x << IPS_AETH_CREDIT_SHIFT;
+ aeth |= x << IPATH_AETH_CREDIT_SHIFT;
}
return cpu_to_be32(aeth);
}
size_t sz;
struct ib_qp *ret;
- if (init_attr->cap.max_send_sge > 255 ||
- init_attr->cap.max_recv_sge > 255) {
+ if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
+ init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
+ init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
+ init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_wr == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
switch (init_attr->qp_type) {
case IB_QPT_UC:
case IB_QPT_RC:
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
- qp = kmalloc(sizeof(*qp), GFP_KERNEL);
+ sz = sizeof(*qp);
+ if (init_attr->srq) {
+ struct ipath_srq *srq = to_isrq(init_attr->srq);
+
+ sz += sizeof(*qp->r_sg_list) *
+ srq->rq.max_sge;
+ } else
+ sz += sizeof(*qp->r_sg_list) *
+ init_attr->cap.max_recv_sge;
+ qp = kmalloc(sz, GFP_KERNEL);
if (!qp) {
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto bail_swq;
}
- qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
- sz = sizeof(struct ipath_sge) *
- init_attr->cap.max_recv_sge +
- sizeof(struct ipath_rwqe);
- qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
- if (!qp->r_rq.wq) {
- kfree(qp);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
+ if (init_attr->srq) {
+ sz = 0;
+ qp->r_rq.size = 0;
+ qp->r_rq.max_sge = 0;
+ qp->r_rq.wq = NULL;
+ init_attr->cap.max_recv_wr = 0;
+ init_attr->cap.max_recv_sge = 0;
+ } else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct ipath_rwqe);
+ qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
+ qp->r_rq.size * sz);
+ if (!qp->r_rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+ }
}
/*
qp->s_wq = swq;
qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge;
- qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
1 << IPATH_S_SIGNAL_REQ_WR : 0;
dev = to_idev(ibpd->device);
err = ipath_alloc_qpn(&dev->qp_table, qp,
init_attr->qp_type);
if (err) {
- vfree(swq);
- vfree(qp->r_rq.wq);
- kfree(qp);
ret = ERR_PTR(err);
- goto bail;
+ goto bail_rwq;
}
+ qp->ip = NULL;
ipath_reset_qp(qp);
-
- /* Tell the core driver that the kernel SMA is present. */
- if (init_attr->qp_type == IB_QPT_SMI)
- ipath_layer_set_verbs_flags(dev->dd,
- IPATH_VERBS_KERNEL_SMA);
break;
default:
init_attr->cap.max_inline_data = 0;
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See ipath_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ struct ipath_mmap_info *ip;
+ __u64 offset = (__u64) qp->r_rq.wq;
+ int err;
+
+ err = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_rwq;
+ }
+
+ if (qp->r_rq.wq) {
+ /* Allocate info for ipath_mmap(). */
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_rwq;
+ }
+ qp->ip = ip;
+ ip->context = ibpd->uobject->context;
+ ip->obj = qp->r_rq.wq;
+ kref_init(&ip->ref);
+ ip->mmap_cnt = 0;
+ ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
+ qp->r_rq.size * sz);
+ spin_lock_irq(&dev->pending_lock);
+ ip->next = dev->pending_mmaps;
+ dev->pending_mmaps = ip;
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ }
+
ret = &qp->ibqp;
+ goto bail;
+bail_rwq:
+ vfree(qp->r_rq.wq);
+bail_qp:
+ kfree(qp);
+bail_swq:
+ vfree(swq);
bail:
return ret;
}
struct ipath_ibdev *dev = to_idev(ibqp->device);
unsigned long flags;
- /* Tell the core driver that the kernel SMA is gone. */
- if (qp->ibqp.qp_type == IB_QPT_SMI)
- ipath_layer_set_verbs_flags(dev->dd, 0);
-
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- spin_lock(&qp->s_lock);
+ spin_lock_irqsave(&qp->s_lock, flags);
qp->state = IB_QPS_ERR;
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
/* Stop the sending tasklet. */
tasklet_kill(&qp->s_task);
if (atomic_read(&qp->refcount) != 0)
ipath_free_qp(&dev->qp_table, qp);
+ if (qp->ip)
+ kref_put(&qp->ip->ref, ipath_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
vfree(qp->s_wq);
- vfree(qp->r_rq.wq);
kfree(qp);
return 0;
}
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
- _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n",
- qp->ibqp.qp_num, qp->remote_qpn, wc->status);
+ ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
+ qp->ibqp.qp_num, qp->remote_qpn, wc->status);
spin_lock(&dev->pending_lock);
/* XXX What if its already removed by the timeout code? */
*/
void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
{
- u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK;
+ u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
/*
* If the credit is invalid, we can send
* as many packets as we like. Otherwise, we have to
* honor the credit field.
*/
- if (credit == IPS_AETH_CREDIT_INVAL)
+ if (credit == IPATH_AETH_CREDIT_INVAL)
qp->s_lsn = (u32) -1;
else if (qp->s_lsn != (u32) -1) {
/* Compute new LSN (i.e., MSN + credit) */
- credit = (aeth + credit_table[credit]) & IPS_MSN_MASK;
+ credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
if (ipath_cmp24(credit, qp->s_lsn) > 0)
qp->s_lsn = credit;
}