2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_post_srq_receive - post a receive on a shared receive queue
41 * @ibsrq: the SRQ to post the receive on
42 * @wr: the list of work requests to post
43 * @bad_wr: the first WR to cause a problem is put here
45 * This may be called from interrupt context.
47 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
48 struct ib_recv_wr **bad_wr)
50 struct ipath_srq *srq = to_isrq(ibsrq);
51 struct ipath_ibdev *dev = to_idev(ibsrq->device);
55 for (; wr; wr = wr->next) {
56 struct ipath_rwqe *wqe;
60 if (wr->num_sge > srq->rq.max_sge) {
66 spin_lock_irqsave(&srq->rq.lock, flags);
67 next = srq->rq.head + 1;
68 if (next >= srq->rq.size)
70 if (next == srq->rq.tail) {
71 spin_unlock_irqrestore(&srq->rq.lock, flags);
77 wqe = get_rwqe_ptr(&srq->rq, srq->rq.head);
78 wqe->wr_id = wr->wr_id;
79 wqe->sg_list[0].mr = NULL;
80 wqe->sg_list[0].vaddr = NULL;
81 wqe->sg_list[0].length = 0;
82 wqe->sg_list[0].sge_length = 0;
84 for (i = 0, j = 0; i < wr->num_sge; i++) {
86 if (to_ipd(srq->ibsrq.pd)->user &&
87 wr->sg_list[i].lkey == 0) {
88 spin_unlock_irqrestore(&srq->rq.lock,
94 if (wr->sg_list[i].length == 0)
96 if (!ipath_lkey_ok(&dev->lk_table,
99 IB_ACCESS_LOCAL_WRITE)) {
100 spin_unlock_irqrestore(&srq->rq.lock,
106 wqe->length += wr->sg_list[i].length;
111 spin_unlock_irqrestore(&srq->rq.lock, flags);
120 * ipath_create_srq - create a shared receive queue
121 * @ibpd: the protection domain of the SRQ to create
122 * @attr: the attributes of the SRQ
123 * @udata: not used by the InfiniPath verbs driver
125 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
126 struct ib_srq_init_attr *srq_init_attr,
127 struct ib_udata *udata)
129 struct ipath_srq *srq;
133 if (srq_init_attr->attr.max_sge < 1) {
134 ret = ERR_PTR(-EINVAL);
138 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
140 ret = ERR_PTR(-ENOMEM);
145 * Need to use vmalloc() if we want to support large #s of entries.
147 srq->rq.size = srq_init_attr->attr.max_wr + 1;
148 sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge +
149 sizeof(struct ipath_rwqe);
150 srq->rq.wq = vmalloc(srq->rq.size * sz);
153 ret = ERR_PTR(-ENOMEM);
158 * ib_create_srq() will initialize srq->ibsrq.
160 spin_lock_init(&srq->rq.lock);
163 srq->rq.max_sge = srq_init_attr->attr.max_sge;
164 srq->limit = srq_init_attr->attr.srq_limit;
173 * ipath_modify_srq - modify a shared receive queue
174 * @ibsrq: the SRQ to modify
175 * @attr: the new attributes of the SRQ
176 * @attr_mask: indicates which attributes to modify
178 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
179 enum ib_srq_attr_mask attr_mask)
181 struct ipath_srq *srq = to_isrq(ibsrq);
185 if (attr_mask & IB_SRQ_LIMIT) {
186 spin_lock_irqsave(&srq->rq.lock, flags);
187 srq->limit = attr->srq_limit;
188 spin_unlock_irqrestore(&srq->rq.lock, flags);
190 if (attr_mask & IB_SRQ_MAX_WR) {
191 u32 size = attr->max_wr + 1;
192 struct ipath_rwqe *wq, *p;
196 if (attr->max_sge < srq->rq.max_sge) {
201 sz = sizeof(struct ipath_rwqe) +
202 attr->max_sge * sizeof(struct ipath_sge);
203 wq = vmalloc(size * sz);
209 spin_lock_irqsave(&srq->rq.lock, flags);
210 if (srq->rq.head < srq->rq.tail)
211 n = srq->rq.size + srq->rq.head - srq->rq.tail;
213 n = srq->rq.head - srq->rq.tail;
214 if (size <= n || size <= srq->limit) {
215 spin_unlock_irqrestore(&srq->rq.lock, flags);
222 while (srq->rq.tail != srq->rq.head) {
223 struct ipath_rwqe *wqe;
226 wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail);
227 p->wr_id = wqe->wr_id;
228 p->length = wqe->length;
229 p->num_sge = wqe->num_sge;
230 for (i = 0; i < wqe->num_sge; i++)
231 p->sg_list[i] = wqe->sg_list[i];
233 p = (struct ipath_rwqe *)((char *) p + sz);
234 if (++srq->rq.tail >= srq->rq.size)
242 srq->rq.max_sge = attr->max_sge;
243 spin_unlock_irqrestore(&srq->rq.lock, flags);
252 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
254 struct ipath_srq *srq = to_isrq(ibsrq);
256 attr->max_wr = srq->rq.size - 1;
257 attr->max_sge = srq->rq.max_sge;
258 attr->srq_limit = srq->limit;
263 * ipath_destroy_srq - destroy a shared receive queue
264 * @ibsrq: the SRQ to destroy
266 int ipath_destroy_srq(struct ib_srq *ibsrq)
268 struct ipath_srq *srq = to_isrq(ibsrq);