X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Finfiniband%2Fcore%2Fmad.c;h=85ccf13b8041353db8c43b6b5bd91d4efb17b446;hb=f4fd0b224d60044d2da5ca02f8f2b5150c1d8731;hp=32d3028b274b81c43fae4313ea609e05feee02ce;hpb=3cd965646b7cb75ae84dd0daf6258adf20e4f169;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 32d3028..85ccf13 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * @@ -31,7 +31,6 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $ */ #include #include @@ -46,7 +45,7 @@ MODULE_DESCRIPTION("kernel IB MAD API"); MODULE_AUTHOR("Hal Rosenstock"); MODULE_AUTHOR("Sean Hefty"); -static kmem_cache_t *ib_mad_cache; +static struct kmem_cache *ib_mad_cache; static struct list_head ib_mad_port_list; static u32 ib_mad_client_id = 0; @@ -65,8 +64,8 @@ static struct ib_mad_agent_private *find_mad_agent( static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad); static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); -static void timeout_sends(void *data); -static void local_completions(void *data); +static void timeout_sends(struct work_struct *work); +static void local_completions(struct work_struct *work); static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class); @@ -356,10 +355,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->done_list); INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); - INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); + INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); INIT_LIST_HEAD(&mad_agent_priv->local_list); - INIT_WORK(&mad_agent_priv->local_work, local_completions, - mad_agent_priv); + INIT_WORK(&mad_agent_priv->local_work, local_completions); atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); @@ -643,7 +641,8 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info, spin_unlock_irqrestore(&qp_info->snoop_lock, flags); } -static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, +static void build_smp_wc(struct ib_qp *qp, + u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, struct ib_wc *wc) { memset(wc, 0, sizeof *wc); @@ -653,7 +652,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, wc->pkey_index = pkey_index; wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); wc->src_qp = IB_QP0; - wc->qp_num = IB_QP0; + wc->qp = qp; wc->slid = slid; wc->sl = 0; wc->dlid_path_bits = 0; @@ -668,7 +667,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr) { - int ret; + int ret = 0; struct ib_smp *smp = mad_send_wr->send_buf.mad; unsigned long flags; struct ib_mad_local_private *local; @@ -688,14 +687,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, */ if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == IB_LID_PERMISSIVE && - !smi_handle_dr_smp_send(smp, device->node_type, port_num)) { + smi_handle_dr_smp_send(smp, device->node_type, port_num) == + IB_SMI_DISCARD) { ret = -EINVAL; printk(KERN_ERR PFX "Invalid directed route\n"); goto out; } + /* Check to post send on QP or process locally */ - ret = smi_check_local_smp(smp, device); - if (!ret) + if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD) goto out; local = kmalloc(sizeof *local, GFP_ATOMIC); @@ -714,7 +714,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, goto out; } - build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), + build_smp_wc(mad_agent_priv->agent.qp, + send_wr->wr_id, be16_to_cpu(smp->dr_slid), send_wr->wr.ud.pkey_index, send_wr->wr.ud.port_num, &mad_wc); @@ -999,17 +1000,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) mad_agent = mad_send_wr->send_buf.mad_agent; sge = mad_send_wr->sg_list; - sge[0].addr = dma_map_single(mad_agent->device->dma_device, - mad_send_wr->send_buf.mad, - sge[0].length, - DMA_TO_DEVICE); - pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); - - sge[1].addr = dma_map_single(mad_agent->device->dma_device, - ib_get_payload(mad_send_wr), - sge[1].length, - DMA_TO_DEVICE); - pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); + sge[0].addr = ib_dma_map_single(mad_agent->device, + mad_send_wr->send_buf.mad, + sge[0].length, + DMA_TO_DEVICE); + mad_send_wr->header_mapping = sge[0].addr; + + sge[1].addr = ib_dma_map_single(mad_agent->device, + ib_get_payload(mad_send_wr), + sge[1].length, + DMA_TO_DEVICE); + mad_send_wr->payload_mapping = sge[1].addr; spin_lock_irqsave(&qp_info->send_queue.lock, flags); if (qp_info->send_queue.count < qp_info->send_queue.max_active) { @@ -1027,12 +1028,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) } spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); if (ret) { - dma_unmap_single(mad_agent->device->dma_device, - pci_unmap_addr(mad_send_wr, header_mapping), - sge[0].length, DMA_TO_DEVICE); - dma_unmap_single(mad_agent->device->dma_device, - pci_unmap_addr(mad_send_wr, payload_mapping), - sge[1].length, DMA_TO_DEVICE); + ib_dma_unmap_single(mad_agent->device, + mad_send_wr->header_mapping, + sge[0].length, DMA_TO_DEVICE); + ib_dma_unmap_single(mad_agent->device, + mad_send_wr->payload_mapping, + sge[1].length, DMA_TO_DEVICE); } return ret; } @@ -1750,7 +1751,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, */ (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) - return wr; + return (wr->status == IB_WC_SUCCESS) ? wr : NULL; } /* @@ -1851,11 +1852,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); - dma_unmap_single(port_priv->device->dma_device, - pci_unmap_addr(&recv->header, mapping), - sizeof(struct ib_mad_private) - - sizeof(struct ib_mad_private_header), - DMA_FROM_DEVICE); + ib_dma_unmap_single(port_priv->device, + recv->header.mapping, + sizeof(struct ib_mad_private) - + sizeof(struct ib_mad_private_header), + DMA_FROM_DEVICE); /* Setup MAD receive work completion from "normal" work completion */ recv->header.wc = *wc; @@ -1873,18 +1874,22 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { - if (!smi_handle_dr_smp_recv(&recv->mad.smp, - port_priv->device->node_type, - port_priv->port_num, - port_priv->device->phys_port_cnt)) + if (smi_handle_dr_smp_recv(&recv->mad.smp, + port_priv->device->node_type, + port_priv->port_num, + port_priv->device->phys_port_cnt) == + IB_SMI_DISCARD) goto out; - if (!smi_check_forward_dr_smp(&recv->mad.smp)) + + if (smi_check_forward_dr_smp(&recv->mad.smp) == IB_SMI_LOCAL) goto local; - if (!smi_handle_dr_smp_send(&recv->mad.smp, - port_priv->device->node_type, - port_priv->port_num)) + + if (smi_handle_dr_smp_send(&recv->mad.smp, + port_priv->device->node_type, + port_priv->port_num) == IB_SMI_DISCARD) goto out; - if (!smi_check_local_smp(&recv->mad.smp, port_priv->device)) + + if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) goto out; } @@ -2081,12 +2086,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, qp_info = send_queue->qp_info; retry: - dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, - pci_unmap_addr(mad_send_wr, header_mapping), - mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); - dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, - pci_unmap_addr(mad_send_wr, payload_mapping), - mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); + ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, + mad_send_wr->header_mapping, + mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); + ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, + mad_send_wr->payload_mapping, + mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); queued_send_wr = NULL; spin_lock_irqsave(&send_queue->lock, flags); list_del(&mad_list->list); @@ -2198,12 +2203,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, /* * IB MAD completion callback */ -static void ib_mad_completion_handler(void *data) +static void ib_mad_completion_handler(struct work_struct *work) { struct ib_mad_port_private *port_priv; struct ib_wc wc; - port_priv = (struct ib_mad_port_private *)data; + port_priv = container_of(work, struct ib_mad_port_private, work); ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { @@ -2324,7 +2329,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent, } EXPORT_SYMBOL(ib_cancel_mad); -static void local_completions(void *data) +static void local_completions(struct work_struct *work) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_local_private *local; @@ -2334,7 +2339,8 @@ static void local_completions(void *data) struct ib_wc wc; struct ib_mad_send_wc mad_send_wc; - mad_agent_priv = (struct ib_mad_agent_private *)data; + mad_agent_priv = + container_of(work, struct ib_mad_agent_private, local_work); spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->local_list)) { @@ -2355,7 +2361,8 @@ static void local_completions(void *data) * Defined behavior is to complete response * before request */ - build_smp_wc((unsigned long) local->mad_send_wr, + build_smp_wc(recv_mad_agent->agent.qp, + (unsigned long) local->mad_send_wr, be16_to_cpu(IB_LID_PERMISSIVE), 0, recv_mad_agent->agent.port_num, &wc); @@ -2434,14 +2441,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) return ret; } -static void timeout_sends(void *data) +static void timeout_sends(struct work_struct *work) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags, delay; - mad_agent_priv = (struct ib_mad_agent_private *)data; + mad_agent_priv = container_of(work, struct ib_mad_agent_private, + timed_work.work); mad_send_wc.vendor_err = 0; spin_lock_irqsave(&mad_agent_priv->lock, flags); @@ -2527,13 +2535,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, break; } } - sg_list.addr = dma_map_single(qp_info->port_priv-> - device->dma_device, - &mad_priv->grh, - sizeof *mad_priv - - sizeof mad_priv->header, - DMA_FROM_DEVICE); - pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); + sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, + &mad_priv->grh, + sizeof *mad_priv - + sizeof mad_priv->header, + DMA_FROM_DEVICE); + mad_priv->header.mapping = sg_list.addr; recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; mad_priv->header.mad_list.mad_queue = recv_queue; @@ -2548,12 +2555,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); - dma_unmap_single(qp_info->port_priv->device->dma_device, - pci_unmap_addr(&mad_priv->header, - mapping), - sizeof *mad_priv - - sizeof mad_priv->header, - DMA_FROM_DEVICE); + ib_dma_unmap_single(qp_info->port_priv->device, + mad_priv->header.mapping, + sizeof *mad_priv - + sizeof mad_priv->header, + DMA_FROM_DEVICE); kmem_cache_free(ib_mad_cache, mad_priv); printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); break; @@ -2585,11 +2591,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) /* Remove from posted receive MAD list */ list_del(&mad_list->list); - dma_unmap_single(qp_info->port_priv->device->dma_device, - pci_unmap_addr(&recv->header, mapping), - sizeof(struct ib_mad_private) - - sizeof(struct ib_mad_private_header), - DMA_FROM_DEVICE); + ib_dma_unmap_single(qp_info->port_priv->device, + recv->header.mapping, + sizeof(struct ib_mad_private) - + sizeof(struct ib_mad_private_header), + DMA_FROM_DEVICE); kmem_cache_free(ib_mad_cache, recv); } @@ -2765,7 +2771,7 @@ static int ib_mad_port_open(struct ib_device *device, cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; port_priv->cq = ib_create_cq(port_priv->device, ib_mad_thread_completion_handler, - NULL, port_priv, cq_size); + NULL, port_priv, cq_size, 0); if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); @@ -2799,7 +2805,7 @@ static int ib_mad_port_open(struct ib_device *device, ret = -ENOMEM; goto error8; } - INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); + INIT_WORK(&port_priv->work, ib_mad_completion_handler); spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_mad_port_list); @@ -2876,7 +2882,10 @@ static void ib_mad_init_device(struct ib_device *device) { int start, end, i; - if (device->node_type == IB_NODE_SWITCH) { + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + + if (device->node_type == RDMA_NODE_IB_SWITCH) { start = 0; end = 0; } else { @@ -2923,7 +2932,7 @@ static void ib_mad_remove_device(struct ib_device *device) { int i, num_ports, cur_port; - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { num_ports = 1; cur_port = 0; } else { @@ -2984,10 +2993,7 @@ error1: static void __exit ib_mad_cleanup_module(void) { ib_unregister_client(&mad_client); - - if (kmem_cache_destroy(ib_mad_cache)) { - printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n"); - } + kmem_cache_destroy(ib_mad_cache); } module_init(ib_mad_init_module);