X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Finfiniband%2Fhw%2Fehca%2Fehca_cq.c;h=67f0670fe3b18624cd752a9ed01b65a660165d81;hb=f4fd0b224d60044d2da5ca02f8f2b5150c1d8731;hp=93995b658d94a7dedbe816f2927589a63b51dde4;hpb=e94b1766097d53e6f3ccfb36c8baa562ffeda3fc;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 93995b6..67f0670 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -113,7 +113,7 @@ struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) return ret; } -struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, +struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { @@ -134,19 +134,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) return ERR_PTR(-EINVAL); - my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL); + my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL); if (!my_cq) { ehca_err(device, "Out of memory for ehca_cq struct device=%p", device); return ERR_PTR(-ENOMEM); } - memset(my_cq, 0, sizeof(struct ehca_cq)); memset(¶m, 0, sizeof(struct ehca_alloc_cq_parms)); spin_lock_init(&my_cq->spinlock); spin_lock_init(&my_cq->cb_lock); spin_lock_init(&my_cq->task_lock); + init_waitqueue_head(&my_cq->wait_completion); my_cq->ownpid = current->tgid; cq = &my_cq->ib_cq; @@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, if (context) { struct ipz_queue *ipz_queue = &my_cq->ipz_queue; struct ehca_create_cq_resp resp; - struct vm_area_struct *vma; memset(&resp, 0, sizeof(resp)); resp.cq_number = my_cq->cq_number; resp.token = my_cq->token; @@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, resp.ipz_queue.queue_length = ipz_queue->queue_length; resp.ipz_queue.pagesize = ipz_queue->pagesize; resp.ipz_queue.toggle_state = ipz_queue->toggle_state; - ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000, - ipz_queue->queue_length, - (void**)&resp.ipz_queue.queue, - &vma); - if (ret) { - ehca_err(device, "Could not mmap queue pages"); - cq = ERR_PTR(ret); - goto create_cq_exit4; - } - my_cq->uspace_queue = resp.ipz_queue.queue; - resp.galpas = my_cq->galpas; - ret = ehca_mmap_register(my_cq->galpas.user.fw_handle, - (void**)&resp.galpas.kernel.fw_handle, - &vma); - if (ret) { - ehca_err(device, "Could not mmap fw_handle"); - cq = ERR_PTR(ret); - goto create_cq_exit5; - } - my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { ehca_err(device, "Copy to udata failed."); - goto create_cq_exit6; + goto create_cq_exit4; } } return cq; -create_cq_exit6: - ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE); - -create_cq_exit5: - ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length); - create_cq_exit4: ipz_queue_dtor(&my_cq->ipz_queue); @@ -330,10 +303,19 @@ create_cq_exit1: return cq; } +static int get_cq_nr_events(struct ehca_cq *my_cq) +{ + int ret; + unsigned long flags; + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + ret = my_cq->nr_events; + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + return ret; +} + int ehca_destroy_cq(struct ib_cq *cq) { u64 h_ret; - int ret; struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); int cq_num = my_cq->cq_number; struct ib_device *device = cq->device; @@ -343,32 +325,31 @@ int ehca_destroy_cq(struct ib_cq *cq) u32 cur_pid = current->tgid; unsigned long flags; + if (cq->uobject) { + if (my_cq->mm_count_galpa || my_cq->mm_count_queue) { + ehca_err(device, "Resources still referenced in " + "user space cq_num=%x", my_cq->cq_number); + return -EINVAL; + } + if (my_cq->ownpid != cur_pid) { + ehca_err(device, "Invalid caller pid=%x ownpid=%x " + "cq_num=%x", + cur_pid, my_cq->ownpid, my_cq->cq_number); + return -EINVAL; + } + } + spin_lock_irqsave(&ehca_cq_idr_lock, flags); - while (my_cq->nr_callbacks) - yield(); + while (my_cq->nr_events) { + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq)); + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + /* recheck nr_events to assure no cqe has just arrived */ + } idr_remove(&ehca_cq_idr, my_cq->token); spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); - if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { - ehca_err(device, "Invalid caller pid=%x ownpid=%x", - cur_pid, my_cq->ownpid); - return -EINVAL; - } - - /* un-mmap if vma alloc */ - if (my_cq->uspace_queue ) { - ret = ehca_munmap(my_cq->uspace_queue, - my_cq->ipz_queue.queue_length); - if (ret) - ehca_err(device, "Could not munmap queue ehca_cq=%p " - "cq_num=%x", my_cq, cq_num); - ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE); - if (ret) - ehca_err(device, "Could not munmap fwh ehca_cq=%p " - "cq_num=%x", my_cq, cq_num); - } - h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); if (h_ret == H_R_STATE) { /* cq in err: read err data and destroy it forcibly */ @@ -397,7 +378,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); u32 cur_pid = current->tgid; - if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { + if (cq->uobject && my_cq->ownpid != cur_pid) { ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", cur_pid, my_cq->ownpid); return -EINVAL;