svcrdma: Add dma map count and WARN_ON
authorTom Tucker <tom@opengridcomputing.com>
Wed, 28 May 2008 18:17:44 +0000 (13:17 -0500)
committerTom Tucker <tom@opengridcomputing.com>
Wed, 2 Jul 2008 20:01:56 +0000 (15:01 -0500)
Add a dma map count in order to verify that all DMA mapping resources
have been freed when the transport is closed.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c

index fd5e8a1..ab93afc 100644 (file)
@@ -130,6 +130,7 @@ struct svcxprt_rdma {
 
        struct ib_pd         *sc_pd;
 
+       atomic_t             sc_dma_used;
        atomic_t             sc_ctxt_used;
        struct list_head     sc_ctxt_free;
        int                  sc_ctxt_cnt;
index d25971b..b4b17f4 100644 (file)
@@ -222,6 +222,7 @@ static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
        ctxt->count = count;
        ctxt->direction = DMA_FROM_DEVICE;
        for (i = 0; i < count; i++) {
+               atomic_inc(&xprt->sc_dma_used);
                ctxt->sge[i].addr =
                        ib_dma_map_single(xprt->sc_cm_id->device,
                                          vec[i].iov_base, vec[i].iov_len,
index bdc11a3..a19b22b 100644 (file)
@@ -163,6 +163,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
                sge_bytes = min((size_t)bc,
                                (size_t)(vec->sge[xdr_sge_no].iov_len-sge_off));
                sge[sge_no].length = sge_bytes;
+               atomic_inc(&xprt->sc_dma_used);
                sge[sge_no].addr =
                        ib_dma_map_single(xprt->sc_cm_id->device,
                                          (void *)
@@ -385,6 +386,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
        ctxt->count = 1;
 
        /* Prepare the SGE for the RPCRDMA Header */
+       atomic_inc(&rdma->sc_dma_used);
        ctxt->sge[0].addr =
                ib_dma_map_page(rdma->sc_cm_id->device,
                                page, 0, PAGE_SIZE, DMA_TO_DEVICE);
@@ -396,6 +398,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
        for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
                sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
                byte_count -= sge_bytes;
+               atomic_inc(&rdma->sc_dma_used);
                ctxt->sge[sge_no].addr =
                        ib_dma_map_single(rdma->sc_cm_id->device,
                                          vec->sge[sge_no].iov_base,
index 7e8ee66..6fddd58 100644 (file)
@@ -155,6 +155,7 @@ static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
        struct svcxprt_rdma *xprt = ctxt->xprt;
        int i;
        for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
+               atomic_dec(&xprt->sc_dma_used);
                ib_dma_unmap_single(xprt->sc_cm_id->device,
                                    ctxt->sge[i].addr,
                                    ctxt->sge[i].length,
@@ -519,6 +520,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
        cma_xprt->sc_max_requests = svcrdma_max_requests;
        cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
        atomic_set(&cma_xprt->sc_sq_count, 0);
+       atomic_set(&cma_xprt->sc_ctxt_used, 0);
 
        if (!listener) {
                int reqs = cma_xprt->sc_max_requests;
@@ -569,6 +571,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
                BUG_ON(sge_no >= xprt->sc_max_sge);
                page = svc_rdma_get_page();
                ctxt->pages[sge_no] = page;
+               atomic_inc(&xprt->sc_dma_used);
                pa = ib_dma_map_page(xprt->sc_cm_id->device,
                                     page, 0, PAGE_SIZE,
                                     DMA_FROM_DEVICE);
@@ -1049,6 +1052,7 @@ static void __svc_rdma_free(struct work_struct *work)
 
        /* Warn if we leaked a resource or under-referenced */
        WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
+       WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
 
        /* Destroy the QP if present (not a listener) */
        if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
@@ -1169,6 +1173,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
        length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
 
        /* Prepare SGE for local address */
+       atomic_inc(&xprt->sc_dma_used);
        sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
                                   p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
        sge.lkey = xprt->sc_phys_mr->lkey;