X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Finfiniband%2Fulp%2Fsrp%2Fib_srp.c;h=214f66195af23e6699584c49273f44dfec32d026;hb=c4028958b6ecad064b1a6303a6a5906d4fe48d73;hp=28d741e7805cb6d154046c28e9cb257c33d42ad5;hpb=1962a4a1e4b3716aa836ebeb5b80c804a7f7c5ba;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 28d741e..214f661 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -62,6 +62,13 @@ MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " "v" DRV_VERSION " (" DRV_RELDATE ")"); MODULE_LICENSE("Dual BSD/GPL"); +static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE; +static int srp_max_iu_len; + +module_param(srp_sg_tablesize, int, 0444); +MODULE_PARM_DESC(srp_sg_tablesize, + "Max number of gather/scatter entries per I/O (default is 12)"); + static int topspin_workarounds = 1; module_param(topspin_workarounds, int, 0444); @@ -70,6 +77,14 @@ MODULE_PARM_DESC(topspin_workarounds, static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; +static int mellanox_workarounds = 1; + +module_param(mellanox_workarounds, int, 0444); +MODULE_PARM_DESC(mellanox_workarounds, + "Enable workarounds for Mellanox SRP target bugs if != 0"); + +static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; + static void srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device); static void srp_completion(struct ib_cq *cq, void *target_ptr); @@ -81,6 +96,8 @@ static struct ib_client srp_client = { .remove = srp_remove_one }; +static struct ib_sa_client srp_sa_client; + static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) { return (struct srp_target_port *) host->hostdata; @@ -252,7 +269,8 @@ static int srp_lookup_path(struct srp_target_port *target) init_completion(&target->done); - target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev, + target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, + target->srp_host->dev->dev, target->srp_host->port, &target->path, IB_SA_PATH_REC_DGID | @@ -311,24 +329,47 @@ static int srp_send_req(struct srp_target_port *target) req->priv.opcode = SRP_LOGIN_REQ; req->priv.tag = 0; - req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); + req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len); req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); - memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); + /* + * In the published SRP specification (draft rev. 16a), the + * port identifier format is 8 bytes of ID extension followed + * by 8 bytes of GUID. Older drafts put the two halves in the + * opposite order, so that the GUID comes first. + * + * Targets conforming to these obsolete drafts can be + * recognized by the I/O Class they report. + */ + if (target->io_class == SRP_REV10_IB_IO_CLASS) { + memcpy(req->priv.initiator_port_id, + &target->path.sgid.global.interface_id, 8); + memcpy(req->priv.initiator_port_id + 8, + &target->initiator_ext, 8); + memcpy(req->priv.target_port_id, &target->ioc_guid, 8); + memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); + } else { + memcpy(req->priv.initiator_port_id, + &target->initiator_ext, 8); + memcpy(req->priv.initiator_port_id + 8, + &target->path.sgid.global.interface_id, 8); + memcpy(req->priv.target_port_id, &target->id_ext, 8); + memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); + } + /* * Topspin/Cisco SRP targets will reject our login unless we - * zero out the first 8 bytes of our initiator port ID. The - * second 8 bytes must be our local node GUID, but we always - * use that anyway. + * zero out the first 8 bytes of our initiator port ID and set + * the second 8 bytes to the local node GUID. */ if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " "activated for target GUID %016llx\n", (unsigned long long) be64_to_cpu(target->ioc_guid)); memset(req->priv.initiator_port_id, 0, 8); + memcpy(req->priv.initiator_port_id + 8, + &target->srp_host->dev->dev->node_guid, 8); } - memcpy(req->priv.target_port_id, &target->id_ext, 8); - memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); status = ib_send_cm_req(target->cm_id, &req->param); @@ -349,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target) wait_for_completion(&target->done); } -static void srp_remove_work(void *target_ptr) +static void srp_remove_work(struct work_struct *work) { - struct srp_target_port *target = target_ptr; + struct srp_target_port *target = + container_of(work, struct srp_target_port, work); spin_lock_irq(target->scsi_host->host_lock); if (target->state != SRP_TARGET_DEAD) { @@ -361,9 +403,9 @@ static void srp_remove_work(void *target_ptr) target->state = SRP_TARGET_REMOVED; spin_unlock_irq(target->scsi_host->host_lock); - mutex_lock(&target->srp_host->target_mutex); + spin_lock(&target->srp_host->target_lock); list_del(&target->list); - mutex_unlock(&target->srp_host->target_mutex); + spin_unlock(&target->srp_host->target_lock); scsi_remove_host(target->scsi_host); ib_destroy_cm_id(target->cm_id); @@ -444,14 +486,26 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, scmnd->sc_data_direction); } +static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) +{ + srp_unmap_data(req->scmnd, target, req); + list_move_tail(&req->list, &target->free_reqs); +} + +static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) +{ + req->scmnd->result = DID_RESET << 16; + req->scmnd->scsi_done(req->scmnd); + srp_remove_req(target, req); +} + static int srp_reconnect_target(struct srp_target_port *target) { struct ib_cm_id *new_cm_id; struct ib_qp_attr qp_attr; - struct srp_request *req; + struct srp_request *req, *tmp; struct ib_wc wc; int ret; - int i; spin_lock_irq(target->scsi_host->host_lock); if (target->state != SRP_TARGET_LIVE) { @@ -487,19 +541,14 @@ static int srp_reconnect_target(struct srp_target_port *target) while (ib_poll_cq(target->cq, 1, &wc) > 0) ; /* nothing */ - list_for_each_entry(req, &target->req_queue, list) { - req->scmnd->result = DID_RESET << 16; - req->scmnd->scsi_done(req->scmnd); - srp_unmap_data(req->scmnd, target, req); - } + spin_lock_irq(target->scsi_host->host_lock); + list_for_each_entry_safe(req, tmp, &target->req_queue, list) + srp_reset_req(target, req); + spin_unlock_irq(target->scsi_host->host_lock); target->rx_head = 0; target->tx_head = 0; target->tx_tail = 0; - INIT_LIST_HEAD(&target->free_reqs); - INIT_LIST_HEAD(&target->req_queue); - for (i = 0; i < SRP_SQ_SIZE; ++i) - list_add_tail(&target->req_ring[i].list, &target->free_reqs); ret = srp_connect_target(target); if (ret) @@ -527,7 +576,7 @@ err: spin_lock_irq(target->scsi_host->host_lock); if (target->state == SRP_TARGET_CONNECTING) { target->state = SRP_TARGET_DEAD; - INIT_WORK(&target->work, srp_remove_work, target); + INIT_WORK(&target->work, srp_remove_work); schedule_work(&target->work); } spin_unlock_irq(target->scsi_host->host_lock); @@ -535,7 +584,7 @@ err: return ret; } -static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, +static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, int sg_cnt, struct srp_request *req, struct srp_direct_buf *buf) { @@ -545,10 +594,15 @@ static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, int page_cnt; int i, j; int ret; + struct srp_device *dev = target->srp_host->dev; if (!dev->fmr_pool) return -ENODEV; + if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && + mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) + return -EINVAL; + len = page_cnt = 0; for (i = 0; i < sg_cnt; ++i) { if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { @@ -583,9 +637,10 @@ static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, - dma_pages, page_cnt, &io_addr); + dma_pages, page_cnt, io_addr); if (IS_ERR(req->fmr)) { ret = PTR_ERR(req->fmr); + req->fmr = NULL; goto out; } @@ -650,7 +705,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, buf->va = cpu_to_be64(sg_dma_address(scat)); buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); buf->len = cpu_to_be32(sg_dma_len(scat)); - } else if (srp_map_fmr(target->srp_host->dev, scat, count, req, + } else if (srp_map_fmr(target, scat, count, req, (void *) cmd->add_data)) { /* * FMR mapping failed, and the scatterlist has more @@ -699,12 +754,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, return len; } -static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) -{ - srp_unmap_data(req->scmnd, target, req); - list_move_tail(&req->list, &target->free_reqs); -} - static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) { struct srp_request *req; @@ -757,13 +806,6 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) spin_unlock_irqrestore(target->scsi_host->host_lock, flags); } -static void srp_reconnect_work(void *target_ptr) -{ - struct srp_target_port *target = target_ptr; - - srp_reconnect_target(target); -} - static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) { struct srp_iu *iu; @@ -816,7 +858,6 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr) { struct srp_target_port *target = target_ptr; struct ib_wc wc; - unsigned long flags; ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(cq, 1, &wc) > 0) { @@ -824,10 +865,6 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr) printk(KERN_ERR PFX "failed %s status %d\n", wc.wr_id & SRP_OP_RECV ? "receive" : "send", wc.status); - spin_lock_irqsave(target->scsi_host->host_lock, flags); - if (target->state == SRP_TARGET_LIVE) - schedule_work(&target->work); - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); break; } @@ -887,12 +924,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) return NULL; - if (unlikely(target->req_lim < 1)) { - if (printk_ratelimit()) - printk(KERN_DEBUG PFX "Target has req_lim %d\n", - target->req_lim); - return NULL; - } + if (unlikely(target->req_lim < 1)) + ++target->zero_req_lim; return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; } @@ -953,7 +986,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, goto err; dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, - SRP_MAX_IU_LEN, DMA_TO_DEVICE); + srp_max_iu_len, DMA_TO_DEVICE); req = list_entry(target->free_reqs.next, struct srp_request, list); @@ -986,7 +1019,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, } dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, - SRP_MAX_IU_LEN, DMA_TO_DEVICE); + srp_max_iu_len, DMA_TO_DEVICE); if (__srp_post_send(target, iu, len)) { printk(KERN_ERR PFX "Send failed\n"); @@ -1018,7 +1051,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { target->tx_ring[i] = srp_alloc_iu(target->srp_host, - SRP_MAX_IU_LEN, + srp_max_iu_len, GFP_KERNEL, DMA_TO_DEVICE); if (!target->tx_ring[i]) goto err; @@ -1189,11 +1222,10 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) srp_cm_rej_handler(cm_id, event, target); break; - case IB_CM_MRA_RECEIVED: - printk(KERN_ERR PFX "MRA received\n"); - break; - - case IB_CM_DREP_RECEIVED: + case IB_CM_DREQ_RECEIVED: + printk(KERN_WARNING PFX "DREQ received - connection closed\n"); + if (ib_send_cm_drep(cm_id, NULL, 0)) + printk(KERN_ERR PFX "Sending CM DREP failed\n"); break; case IB_CM_TIMEWAIT_EXIT: @@ -1203,6 +1235,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) target->status = 0; break; + case IB_CM_MRA_RECEIVED: + case IB_CM_DREQ_ERROR: + case IB_CM_DREP_RECEIVED: + break; + default: printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); break; @@ -1321,11 +1358,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) spin_lock_irq(target->scsi_host->host_lock); list_for_each_entry_safe(req, tmp, &target->req_queue, list) - if (req->scmnd->device == scmnd->device) { - req->scmnd->result = DID_RESET << 16; - req->scmnd->scsi_done(req->scmnd); - srp_remove_req(target, req); - } + if (req->scmnd->device == scmnd->device) + srp_reset_req(target, req); spin_unlock_irq(target->scsi_host->host_lock); @@ -1411,11 +1445,39 @@ static ssize_t show_dgid(struct class_device *cdev, char *buf) be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); } -static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); -static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); -static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); -static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); -static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); +static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(cdev)); + + if (target->state == SRP_TARGET_DEAD || + target->state == SRP_TARGET_REMOVED) + return -ENODEV; + + return sprintf(buf, "%d\n", target->zero_req_lim); +} + +static ssize_t show_local_ib_port(struct class_device *cdev, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(cdev)); + + return sprintf(buf, "%d\n", target->srp_host->port); +} + +static ssize_t show_local_ib_device(struct class_device *cdev, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(cdev)); + + return sprintf(buf, "%s\n", target->srp_host->dev->dev->name); +} + +static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); +static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); +static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); +static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); +static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); +static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); +static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); +static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); static struct class_device_attribute *srp_host_attrs[] = { &class_device_attr_id_ext, @@ -1423,6 +1485,9 @@ static struct class_device_attribute *srp_host_attrs[] = { &class_device_attr_service_id, &class_device_attr_pkey, &class_device_attr_dgid, + &class_device_attr_zero_req_lim, + &class_device_attr_local_ib_port, + &class_device_attr_local_ib_device, NULL }; @@ -1436,7 +1501,6 @@ static struct scsi_host_template srp_template = { .eh_host_reset_handler = srp_reset_host, .can_queue = SRP_SQ_SIZE, .this_id = -1, - .sg_tablesize = SRP_MAX_INDIRECT, .cmd_per_lun = SRP_SQ_SIZE, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = srp_host_attrs @@ -1450,9 +1514,9 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device)) return -ENODEV; - mutex_lock(&host->target_mutex); + spin_lock(&host->target_lock); list_add_tail(&target->list, &host->target_list); - mutex_unlock(&host->target_mutex); + spin_unlock(&host->target_lock); target->state = SRP_TARGET_LIVE; @@ -1491,6 +1555,9 @@ enum { SRP_OPT_PKEY = 1 << 3, SRP_OPT_SERVICE_ID = 1 << 4, SRP_OPT_MAX_SECT = 1 << 5, + SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, + SRP_OPT_IO_CLASS = 1 << 7, + SRP_OPT_INITIATOR_EXT = 1 << 8, SRP_OPT_ALL = (SRP_OPT_ID_EXT | SRP_OPT_IOC_GUID | SRP_OPT_DGID | @@ -1499,13 +1566,16 @@ enum { }; static match_table_t srp_opt_tokens = { - { SRP_OPT_ID_EXT, "id_ext=%s" }, - { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, - { SRP_OPT_DGID, "dgid=%s" }, - { SRP_OPT_PKEY, "pkey=%x" }, - { SRP_OPT_SERVICE_ID, "service_id=%s" }, - { SRP_OPT_MAX_SECT, "max_sect=%d" }, - { SRP_OPT_ERR, NULL } + { SRP_OPT_ID_EXT, "id_ext=%s" }, + { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, + { SRP_OPT_DGID, "dgid=%s" }, + { SRP_OPT_PKEY, "pkey=%x" }, + { SRP_OPT_SERVICE_ID, "service_id=%s" }, + { SRP_OPT_MAX_SECT, "max_sect=%d" }, + { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, + { SRP_OPT_IO_CLASS, "io_class=%x" }, + { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, + { SRP_OPT_ERR, NULL } }; static int srp_parse_options(const char *buf, struct srp_target_port *target) @@ -1581,6 +1651,35 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->scsi_host->max_sectors = token; break; + case SRP_OPT_MAX_CMD_PER_LUN: + if (match_int(args, &token)) { + printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); + goto out; + } + target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); + break; + + case SRP_OPT_IO_CLASS: + if (match_hex(args, &token)) { + printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p); + goto out; + } + if (token != SRP_REV10_IB_IO_CLASS && + token != SRP_REV16A_IB_IO_CLASS) { + printk(KERN_WARNING PFX "unknown IO class parameter value" + " %x specified (use %x or %x).\n", + token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); + goto out; + } + target->io_class = token; + break; + + case SRP_OPT_INITIATOR_EXT: + p = match_strdup(args); + target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); + kfree(p); + break; + default: printk(KERN_WARNING PFX "unknown parameter or missing value " "'%s' in target creation request\n", p); @@ -1621,13 +1720,11 @@ static ssize_t srp_create_target(struct class_device *class_dev, target_host->max_lun = SRP_MAX_LUN; target = host_to_target(target_host); - memset(target, 0, sizeof *target); + target->io_class = SRP_REV16A_IB_IO_CLASS; target->scsi_host = target_host; target->srp_host = host; - INIT_WORK(&target->work, srp_reconnect_work, target); - INIT_LIST_HEAD(&target->free_reqs); INIT_LIST_HEAD(&target->req_queue); for (i = 0; i < SRP_SQ_SIZE; ++i) { @@ -1724,14 +1821,11 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port) return NULL; INIT_LIST_HEAD(&host->target_list); - mutex_init(&host->target_mutex); + spin_lock_init(&host->target_lock); init_completion(&host->released); host->dev = device; host->port = port; - host->initiator_port_id[7] = port; - memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8); - host->class_dev.class = &srp_class; host->class_dev.dev = device->dev->dma_device; snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", @@ -1816,7 +1910,7 @@ static void srp_add_one(struct ib_device *device) if (IS_ERR(srp_dev->fmr_pool)) srp_dev->fmr_pool = NULL; - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { s = 0; e = 0; } else { @@ -1850,7 +1944,6 @@ static void srp_remove_one(struct ib_device *device) struct srp_host *host, *tmp_host; LIST_HEAD(target_list); struct srp_target_port *target, *tmp_target; - unsigned long flags; srp_dev = ib_get_client_data(device, &srp_client); @@ -1866,15 +1959,13 @@ static void srp_remove_one(struct ib_device *device) * Mark all target ports as removed, so we stop queueing * commands and don't try to reconnect. */ - mutex_lock(&host->target_mutex); - list_for_each_entry_safe(target, tmp_target, - &host->target_list, list) { - spin_lock_irqsave(target->scsi_host->host_lock, flags); - if (target->state != SRP_TARGET_REMOVED) - target->state = SRP_TARGET_REMOVED; - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); + spin_lock(&host->target_lock); + list_for_each_entry(target, &host->target_list, list) { + spin_lock_irq(target->scsi_host->host_lock); + target->state = SRP_TARGET_REMOVED; + spin_unlock_irq(target->scsi_host->host_lock); } - mutex_unlock(&host->target_mutex); + spin_unlock(&host->target_lock); /* * Wait for any reconnection tasks that may have @@ -1907,15 +1998,23 @@ static int __init srp_init_module(void) { int ret; + srp_template.sg_tablesize = srp_sg_tablesize; + srp_max_iu_len = (sizeof (struct srp_cmd) + + sizeof (struct srp_indirect_buf) + + srp_sg_tablesize * 16); + ret = class_register(&srp_class); if (ret) { printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); return ret; } + ib_sa_register_client(&srp_sa_client); + ret = ib_register_client(&srp_client); if (ret) { printk(KERN_ERR PFX "couldn't register IB client\n"); + ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); return ret; } @@ -1926,6 +2025,7 @@ static int __init srp_init_module(void) static void __exit srp_cleanup_module(void) { ib_unregister_client(&srp_client); + ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); }