include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / infiniband / hw / ehca / ehca_uverbs.c
index 3340f49..45ee89b 100644 (file)
@@ -40,7 +40,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <asm/current.h>
+#include <linux/slab.h>
 
 #include "ehca_classes.h"
 #include "ehca_iverbs.h"
@@ -97,7 +97,7 @@ static void ehca_mm_close(struct vm_area_struct *vma)
                     vma->vm_start, vma->vm_end, *count);
 }
 
-static struct vm_operations_struct vm_ops = {
+static const struct vm_operations_struct vm_ops = {
        .open = ehca_mm_open,
        .close = ehca_mm_close,
 };
@@ -109,19 +109,19 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
        u64 vsize, physical;
 
        vsize = vma->vm_end - vma->vm_start;
-       if (vsize != EHCA_PAGESIZE) {
+       if (vsize < EHCA_PAGESIZE) {
                ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
                return -EINVAL;
        }
 
        physical = galpas->user.fw_handle;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
+       ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
        /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
-       ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
-                             vsize, vma->vm_page_prot);
+       ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
+                          vma->vm_page_prot);
        if (unlikely(ret)) {
-               ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
+               ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
                return -ENOMEM;
        }
 
@@ -146,7 +146,7 @@ static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
                page = virt_to_page(virt_addr);
                ret = vm_insert_page(vma, start, page);
                if (unlikely(ret)) {
-                       ehca_gen_err("vm_insert_page() failed rc=%x", ret);
+                       ehca_gen_err("vm_insert_page() failed rc=%i", ret);
                        return ret;
                }
                start += PAGE_SIZE;
@@ -169,7 +169,7 @@ static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
                ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
                if (unlikely(ret)) {
                        ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_fw() failed rc=%x cq_num=%x",
+                                "ehca_mmap_fw() failed rc=%i cq_num=%x",
                                 ret, cq->cq_number);
                        return ret;
                }
@@ -180,7 +180,7 @@ static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
                ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
                if (unlikely(ret)) {
                        ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_queue() failed rc=%x cq_num=%x",
+                                "ehca_mmap_queue() failed rc=%i cq_num=%x",
                                 ret, cq->cq_number);
                        return ret;
                }
@@ -206,33 +206,31 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
                ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
                if (unlikely(ret)) {
                        ehca_err(qp->ib_qp.device,
-                                "remap_pfn_range() failed ret=%x qp_num=%x",
+                                "remap_pfn_range() failed ret=%i qp_num=%x",
                                 ret, qp->ib_qp.qp_num);
                        return -ENOMEM;
                }
                break;
 
        case 1: /* qp rqueue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
-                        qp->ib_qp.qp_num);
+               ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
                ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
                                      &qp->mm_count_rqueue);
                if (unlikely(ret)) {
                        ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
+                                "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
                                 ret, qp->ib_qp.qp_num);
                        return ret;
                }
                break;
 
        case 2: /* qp squeue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
-                        qp->ib_qp.qp_num);
+               ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
                ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
                                      &qp->mm_count_squeue);
                if (unlikely(ret)) {
                        ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
+                                "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
                                 ret, qp->ib_qp.qp_num);
                        return ret;
                }
@@ -253,11 +251,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
        u32 idr_handle = fileoffset & 0x1FFFFFF;
        u32 q_type = (fileoffset >> 27) & 0x1;    /* CQ, QP,...        */
        u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
-       u32 cur_pid = current->tgid;
        u32 ret;
        struct ehca_cq *cq;
        struct ehca_qp *qp;
-       struct ehca_pd *pd;
        struct ib_uobject *uobject;
 
        switch (q_type) {
@@ -270,20 +266,13 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
                if (!cq)
                        return -EINVAL;
 
-               if (cq->ownpid != cur_pid) {
-                       ehca_err(cq->ib_cq.device,
-                                "Invalid caller pid=%x ownpid=%x",
-                                cur_pid, cq->ownpid);
-                       return -ENOMEM;
-               }
-
                if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
                        return -EINVAL;
 
                ret = ehca_mmap_cq(vma, cq, rsrc_type);
                if (unlikely(ret)) {
                        ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_cq() failed rc=%x cq_num=%x",
+                                "ehca_mmap_cq() failed rc=%i cq_num=%x",
                                 ret, cq->cq_number);
                        return ret;
                }
@@ -298,14 +287,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
                if (!qp)
                        return -EINVAL;
 
-               pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
-               if (pd->ownpid != cur_pid) {
-                       ehca_err(qp->ib_qp.device,
-                                "Invalid caller pid=%x ownpid=%x",
-                                cur_pid, pd->ownpid);
-                       return -ENOMEM;
-               }
-
                uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
                if (!uobject || uobject->context != context)
                        return -EINVAL;
@@ -313,7 +294,7 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
                ret = ehca_mmap_qp(vma, qp, rsrc_type);
                if (unlikely(ret)) {
                        ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_qp() failed rc=%x qp_num=%x",
+                                "ehca_mmap_qp() failed rc=%i qp_num=%x",
                                 ret, qp->ib_qp.qp_num);
                        return ret;
                }