[SCSI] Merge scsi-misc-2.6 into scsi-rc-fixes-2.6
authorJames Bottomley <James.Bottomley@suse.de>
Tue, 18 May 2010 14:33:43 +0000 (10:33 -0400)
committerJames Bottomley <James.Bottomley@suse.de>
Tue, 18 May 2010 14:37:41 +0000 (10:37 -0400)
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
1  2 
MAINTAINERS
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/sd.c

diff --combined MAINTAINERS
@@@ -131,19 -131,12 +131,12 @@@ L:      netdev@vger.kernel.or
  S:    Maintained
  F:    drivers/net/typhoon*
  
- 3W-9XXX SATA-RAID CONTROLLER DRIVER
- M:    Adam Radford <linuxraid@amcc.com>
+ 3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
+ M:    Adam Radford <linuxraid@lsi.com>
  L:    linux-scsi@vger.kernel.org
- W:    http://www.amcc.com
+ W:    http://www.lsi.com
  S:    Supported
- F:    drivers/scsi/3w-9xxx*
- 3W-XXXX ATA-RAID CONTROLLER DRIVER
- M:    Adam Radford <linuxraid@amcc.com>
- L:    linux-scsi@vger.kernel.org
- W:    http://www.amcc.com
- S:    Supported
- F:    drivers/scsi/3w-xxxx*
+ F:    drivers/scsi/3w-*
  
  53C700 AND 53C700-66 SCSI DRIVER
  M:    "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@@@ -485,8 -478,8 +478,8 @@@ S: Maintaine
  F:    drivers/input/mouse/bcm5974.c
  
  APPLE SMC DRIVER
 -M:    Nicolas Boichat <nicolas@boichat.ch>
 -L:    mactel-linux-devel@lists.sourceforge.net
 +M:    Henrik Rydberg <rydberg@euromail.se>
 +L:    lm-sensors@lm-sensors.org
  S:    Maintained
  F:    drivers/hwmon/applesmc.c
  
@@@ -971,16 -964,6 +964,16 @@@ L:       linux-arm-kernel@lists.infradead.or
  W:    http://www.mcuos.com
  S:    Maintained
  
 +ARM/U300 MACHINE SUPPORT
 +M:    Linus Walleij <linus.walleij@stericsson.com>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Supported
 +F:    arch/arm/mach-u300/
 +F:    drivers/i2c/busses/i2c-stu300.c
 +F:    drivers/rtc/rtc-coh901331.c
 +F:    drivers/watchdog/coh901327_wdt.c
 +F:    drivers/dma/coh901318*
 +
  ARM/U8500 ARM ARCHITECTURE
  M:    Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1960,7 -1943,7 +1953,7 @@@ F:      lib/kobj
  
  DRM DRIVERS
  M:    David Airlie <airlied@linux.ie>
 -L:    dri-devel@lists.sourceforge.net
 +L:    dri-devel@lists.freedesktop.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
  S:    Maintained
  F:    drivers/gpu/drm/
@@@ -4482,17 -4465,17 +4475,17 @@@ S:   Maintaine
  F:    drivers/ata/sata_promise.*
  
  PS3 NETWORK SUPPORT
 -M:    Geoff Levand <geoffrey.levand@am.sony.com>
 +M:    Geoff Levand <geoff@infradead.org>
  L:    netdev@vger.kernel.org
  L:    cbe-oss-dev@ozlabs.org
 -S:    Supported
 +S:    Maintained
  F:    drivers/net/ps3_gelic_net.*
  
  PS3 PLATFORM SUPPORT
 -M:    Geoff Levand <geoffrey.levand@am.sony.com>
 +M:    Geoff Levand <geoff@infradead.org>
  L:    linuxppc-dev@ozlabs.org
  L:    cbe-oss-dev@ozlabs.org
 -S:    Supported
 +S:    Maintained
  F:    arch/powerpc/boot/ps3*
  F:    arch/powerpc/include/asm/lv1call.h
  F:    arch/powerpc/include/asm/ps3*.h
@@@ -4577,6 -4560,14 +4570,14 @@@ S:    Supporte
  F:    Documentation/scsi/LICENSE.qla2xxx
  F:    drivers/scsi/qla2xxx/
  
+ QLOGIC QLA4XXX iSCSI DRIVER
+ M:    Ravi Anand <ravi.anand@qlogic.com>
+ M:    Vikas Chaudhary <vikas.chaudhary@qlogic.com>
+ M:    iscsi-driver@qlogic.com
+ L:    linux-scsi@vger.kernel.org
+ S:    Supported
+ F:    drivers/scsi/qla4xxx/
  QLOGIC QLA3XXX NETWORK DRIVER
  M:    Ron Mercer <ron.mercer@qlogic.com>
  M:    linux-driver@qlogic.com
@@@ -4791,11 -4782,12 +4792,11 @@@ F:   drivers/s390/crypto
  
  S390 ZFCP DRIVER
  M:    Christof Schmitt <christof.schmitt@de.ibm.com>
 -M:    Martin Peschke <mp3@de.ibm.com>
 +M:    Swen Schillig <swen@vnet.ibm.com>
  M:    linux390@de.ibm.com
  L:    linux-s390@vger.kernel.org
  W:    http://www.ibm.com/developerworks/linux/linux390/
  S:    Supported
 -F:    Documentation/s390/zfcpdump.txt
  F:    drivers/s390/scsi/zfcp_*
  
  S390 IUCV NETWORK LAYER
@@@ -5492,7 -5484,7 +5493,7 @@@ S:      Maintaine
  F:    drivers/mmc/host/tmio_mmc.*
  
  TMPFS (SHMEM FILESYSTEM)
 -M:    Hugh Dickins <hugh.dickins@tiscali.co.uk>
 +M:    Hugh Dickins <hughd@google.com>
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    include/linux/shmem_fs.h
@@@ -496,6 -496,7 +496,7 @@@ static int zfcp_fsf_exchange_config_eva
  
        adapter->hydra_version = bottom->adapter_type;
        adapter->timer_ticks = bottom->timer_interval;
+       adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16);
  
        if (fc_host_permanent_port_name(shost) == -1)
                fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@@ -640,37 -641,6 +641,6 @@@ static void zfcp_fsf_exchange_port_data
        }
  }
  
- static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
- {
-       struct zfcp_qdio_queue *req_q = &qdio->req_q;
-       spin_lock_bh(&qdio->req_q_lock);
-       if (atomic_read(&req_q->count))
-               return 1;
-       spin_unlock_bh(&qdio->req_q_lock);
-       return 0;
- }
- static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
- {
-       struct zfcp_adapter *adapter = qdio->adapter;
-       long ret;
-       spin_unlock_bh(&qdio->req_q_lock);
-       ret = wait_event_interruptible_timeout(qdio->req_q_wq,
-                              zfcp_fsf_sbal_check(qdio), 5 * HZ);
-       if (ret > 0)
-               return 0;
-       if (!ret) {
-               atomic_inc(&qdio->req_q_full);
-               /* assume hanging outbound queue, try queue recovery */
-               zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
-       }
-       spin_lock_bh(&qdio->req_q_lock);
-       return -EIO;
- }
  static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
  {
        struct zfcp_fsf_req *req;
@@@ -705,10 -675,9 +675,9 @@@ static struct fsf_qtcb *zfcp_qtcb_alloc
  }
  
  static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
-                                               u32 fsf_cmd, mempool_t *pool)
+                                               u32 fsf_cmd, u32 sbtype,
+                                               mempool_t *pool)
  {
-       struct qdio_buffer_element *sbale;
-       struct zfcp_qdio_queue *req_q = &qdio->req_q;
        struct zfcp_adapter *adapter = qdio->adapter;
        struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
  
        req->adapter = adapter;
        req->fsf_command = fsf_cmd;
        req->req_id = adapter->req_no;
-       req->qdio_req.sbal_number = 1;
-       req->qdio_req.sbal_first = req_q->first;
-       req->qdio_req.sbal_last = req_q->first;
-       req->qdio_req.sbale_curr = 1;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].addr = (void *) req->req_id;
-       sbale[0].flags |= SBAL_FLAGS0_COMMAND;
  
        if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
                if (likely(pool))
                req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
                req->qtcb->header.req_handle = req->req_id;
                req->qtcb->header.fsf_command = req->fsf_command;
-               sbale[1].addr = (void *) req->qtcb;
-               sbale[1].length = sizeof(struct fsf_qtcb);
        }
  
+       zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+                          req->qtcb, sizeof(struct fsf_qtcb));
        if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
                zfcp_fsf_req_free(req);
                return ERR_PTR(-EIO);
@@@ -803,24 -765,19 +765,19 @@@ int zfcp_fsf_status_read(struct zfcp_qd
        struct zfcp_adapter *adapter = qdio->adapter;
        struct zfcp_fsf_req *req;
        struct fsf_status_read_buffer *sr_buf;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
-       req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+       req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
                                  adapter->pool.status_read_req);
        if (IS_ERR(req)) {
                retval = PTR_ERR(req);
                goto out;
        }
  
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
-       req->qdio_req.sbale_curr = 2;
        sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
        if (!sr_buf) {
                retval = -ENOMEM;
        }
        memset(sr_buf, 0, sizeof(*sr_buf));
        req->data = sr_buf;
-       sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
-       sbale->addr = (void *) sr_buf;
-       sbale->length = sizeof(*sr_buf);
+       zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        retval = zfcp_fsf_req_send(req);
        if (retval)
@@@ -907,14 -864,14 +864,14 @@@ static void zfcp_fsf_abort_fcp_command_
  struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
                                                struct zfcp_unit *unit)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_fsf_req *req = NULL;
        struct zfcp_qdio *qdio = unit->port->adapter->qdio;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.scsi_abort);
        if (IS_ERR(req)) {
                req = NULL;
                       ZFCP_STATUS_COMMON_UNBLOCKED)))
                goto out_error_free;
  
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->data = unit;
        req->handler = zfcp_fsf_abort_fcp_command_handler;
@@@ -996,21 -951,14 +951,14 @@@ skip_fsfstatus
                ct->handler(ct->handler_data);
  }
  
- static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
+ static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+                                           struct zfcp_qdio_req *q_req,
                                            struct scatterlist *sg_req,
                                            struct scatterlist *sg_resp)
  {
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
-       sbale[2].addr   = sg_virt(sg_req);
-       sbale[2].length = sg_req->length;
-       sbale[3].addr   = sg_virt(sg_resp);
-       sbale[3].length = sg_resp->length;
-       sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
- }
- static int zfcp_fsf_one_sbal(struct scatterlist *sg)
- {
-       return sg_is_last(sg) && sg->length <= PAGE_SIZE;
+       zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+       zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+       zfcp_qdio_set_sbale_last(qdio, q_req);
  }
  
  static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
                                       int max_sbals)
  {
        struct zfcp_adapter *adapter = req->adapter;
-       struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
-                                                              &req->qdio_req);
        u32 feat = adapter->adapter_features;
        int bytes;
  
        if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
-               if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
+               if (!zfcp_qdio_sg_one_sbale(sg_req) ||
+                   !zfcp_qdio_sg_one_sbale(sg_resp))
                        return -EOPNOTSUPP;
  
-               zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+               zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+                                               sg_req, sg_resp);
                return 0;
        }
  
        /* use single, unchained SBAL if it can hold the request */
-       if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
-               zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+       if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) {
+               zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+                                               sg_req, sg_resp);
                return 0;
        }
  
        bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
-                                       SBAL_FLAGS0_TYPE_WRITE_READ,
                                        sg_req, max_sbals);
        if (bytes <= 0)
                return -EIO;
        req->qtcb->bottom.support.req_buf_length = bytes;
-       req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+       zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
  
        bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
-                                       SBAL_FLAGS0_TYPE_WRITE_READ,
                                        sg_resp, max_sbals);
        req->qtcb->bottom.support.resp_buf_length = bytes;
        if (bytes <= 0)
@@@ -1091,10 -1038,11 +1038,11 @@@ int zfcp_fsf_send_ct(struct zfcp_fc_wka
        int ret = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
-       req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
+       req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+                                 SBAL_FLAGS0_TYPE_WRITE_READ, pool);
  
        if (IS_ERR(req)) {
                ret = PTR_ERR(req);
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
        ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
-                                   FSF_MAX_SBALS_PER_REQ, timeout);
+                                   ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
        if (ret)
                goto failed_send;
  
@@@ -1187,10 -1135,11 +1135,11 @@@ int zfcp_fsf_send_els(struct zfcp_adapt
        int ret = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
-       req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
+       req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+                                 SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
  
        if (IS_ERR(req)) {
                ret = PTR_ERR(req);
  
  int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_fsf_req *req;
        struct zfcp_qdio *qdio = erp_action->adapter->qdio;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (IS_ERR(req)) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->qtcb->bottom.config.feature_selection =
                        FSF_FEATURE_CFDC |
  int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
                                       struct fsf_qtcb_bottom_config *data)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_fsf_req *req = NULL;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out_unlock;
  
-       req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
+       req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+                                 SBAL_FLAGS0_TYPE_READ, NULL);
  
        if (IS_ERR(req)) {
                retval = PTR_ERR(req);
                goto out_unlock;
        }
  
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
        req->handler = zfcp_fsf_exchange_config_data_handler;
  
        req->qtcb->bottom.config.feature_selection =
@@@ -1320,7 -1265,6 +1265,6 @@@ out_unlock
  int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
  {
        struct zfcp_qdio *qdio = erp_action->adapter->qdio;
-       struct qdio_buffer_element *sbale;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
                return -EOPNOTSUPP;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (IS_ERR(req)) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->handler = zfcp_fsf_exchange_port_data_handler;
        req->erp_action = erp_action;
@@@ -1368,7 -1311,6 +1311,6 @@@ out
  int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
                                     struct fsf_qtcb_bottom_port *data)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_fsf_req *req = NULL;
        int retval = -EIO;
  
                return -EOPNOTSUPP;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out_unlock;
  
-       req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
+       req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+                                 SBAL_FLAGS0_TYPE_READ, NULL);
  
        if (IS_ERR(req)) {
                retval = PTR_ERR(req);
        if (data)
                req->data = data;
  
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->handler = zfcp_fsf_exchange_port_data_handler;
        zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
   */
  int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_qdio *qdio = erp_action->adapter->qdio;
        struct zfcp_port *port = erp_action->port;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (IS_ERR(req)) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->handler = zfcp_fsf_open_port_handler;
        hton24(req->qtcb->bottom.support.d_id, port->d_id);
@@@ -1556,16 -1495,16 +1495,16 @@@ static void zfcp_fsf_close_port_handler
   */
  int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_qdio *qdio = erp_action->adapter->qdio;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (IS_ERR(req)) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->handler = zfcp_fsf_close_port_handler;
        req->data = erp_action->port;
   */
  int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (unlikely(IS_ERR(req))) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->handler = zfcp_fsf_open_wka_port_handler;
        hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
@@@ -1688,16 -1623,16 +1623,16 @@@ static void zfcp_fsf_close_wka_port_han
   */
  int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (unlikely(IS_ERR(req))) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->handler = zfcp_fsf_close_wka_port_handler;
        req->data = wka_port;
@@@ -1782,16 -1715,16 +1715,16 @@@ static void zfcp_fsf_close_physical_por
   */
  int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_qdio *qdio = erp_action->adapter->qdio;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (IS_ERR(req)) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->data = erp_action->port;
        req->qtcb->header.port_handle = erp_action->port->handle;
@@@ -1954,17 -1885,17 +1885,17 @@@ static void zfcp_fsf_open_unit_handler(
   */
  int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_adapter *adapter = erp_action->adapter;
        struct zfcp_qdio *qdio = adapter->qdio;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  adapter->pool.erp_req);
  
        if (IS_ERR(req)) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->qtcb->header.port_handle = erp_action->port->handle;
        req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
@@@ -2041,16 -1970,16 +1970,16 @@@ static void zfcp_fsf_close_unit_handler
   */
  int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_qdio *qdio = erp_action->adapter->qdio;
        struct zfcp_fsf_req *req;
        int retval = -EIO;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+                                 SBAL_FLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
  
        if (IS_ERR(req)) {
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        req->qtcb->header.port_handle = erp_action->port->handle;
        req->qtcb->header.lun_handle = erp_action->unit->handle;
@@@ -2105,8 -2032,7 +2032,8 @@@ static void zfcp_fsf_req_trace(struct z
        blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
        blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
  
 -      if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
 +      if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
 +          !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
                blktrc.flags |= ZFCP_BLK_LAT_VALID;
                blktrc.channel_lat = lat_in->channel_lat * ticks;
                blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@@ -2158,8 -2084,9 +2085,8 @@@ static void zfcp_fsf_send_fcp_command_t
        fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
        zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
  
 -      zfcp_fsf_req_trace(req, scpnt);
 -
  skip_fsfstatus:
 +      zfcp_fsf_req_trace(req, scpnt);
        zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
  
        scpnt->host_scribble = NULL;
@@@ -2289,8 -2216,11 +2216,11 @@@ int zfcp_fsf_send_fcp_command_task(stru
                goto out;
        }
  
+       if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+               sbtype = SBAL_FLAGS0_TYPE_WRITE;
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
-                                 adapter->pool.scsi_req);
+                                 sbtype, adapter->pool.scsi_req);
  
        if (IS_ERR(req)) {
                retval = PTR_ERR(req);
        }
  
        req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-       get_device(&unit->dev);
        req->unit = unit;
        req->data = scsi_cmnd;
        req->handler = zfcp_fsf_send_fcp_command_handler;
                break;
        case DMA_TO_DEVICE:
                req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
-               sbtype = SBAL_FLAGS0_TYPE_WRITE;
                break;
        case DMA_BIDIRECTIONAL:
                goto failed_scsi_cmnd;
        }
  
+       get_device(&unit->dev);
        fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
        zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
  
-       real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
+       real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
                                             scsi_sglist(scsi_cmnd),
-                                            FSF_MAX_SBALS_PER_REQ);
+                                            ZFCP_FSF_MAX_SBALS_PER_REQ);
        if (unlikely(real_bytes < 0)) {
-               if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+               if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
                        dev_err(&adapter->ccw_device->dev,
                                "Oversize data package, unit 0x%016Lx "
                                "on port 0x%016Lx closed\n",
@@@ -2371,7 -2301,6 +2301,6 @@@ out
   */
  struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_fsf_req *req = NULL;
        struct fcp_cmnd *fcp_cmnd;
        struct zfcp_qdio *qdio = unit->port->adapter->qdio;
                return NULL;
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+                                 SBAL_FLAGS0_TYPE_WRITE,
                                  qdio->adapter->pool.scsi_req);
  
        if (IS_ERR(req)) {
        req->qtcb->bottom.io.service_class = FSF_CLASS_3;
        req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
  
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
-       sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+       zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
  
        fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
        zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
@@@ -2432,7 -2360,6 +2360,6 @@@ static void zfcp_fsf_control_file_handl
  struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
                                           struct zfcp_fsf_cfdc *fsf_cfdc)
  {
-       struct qdio_buffer_element *sbale;
        struct zfcp_qdio *qdio = adapter->qdio;
        struct zfcp_fsf_req *req = NULL;
        struct fsf_qtcb_bottom_support *bottom;
        }
  
        spin_lock_bh(&qdio->req_q_lock);
-       if (zfcp_fsf_req_sbal_get(qdio))
+       if (zfcp_qdio_sbal_get(qdio))
                goto out;
  
-       req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
+       req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
        if (IS_ERR(req)) {
                retval = -EPERM;
                goto out;
  
        req->handler = zfcp_fsf_control_file_handler;
  
-       sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
-       sbale[0].flags |= direction;
        bottom = &req->qtcb->bottom.support;
        bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
        bottom->option = fsf_cfdc->option;
  
        bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
-                                       direction, fsf_cfdc->sg,
-                                       FSF_MAX_SBALS_PER_REQ);
+                                       fsf_cfdc->sg,
+                                       ZFCP_FSF_MAX_SBALS_PER_REQ);
        if (bytes != ZFCP_CFDC_MAX_SIZE) {
                zfcp_fsf_req_free(req);
                goto out;
@@@ -17,8 -17,8 +17,8 @@@ static struct list_head adapter_list = 
  static u32 adapter_count;
  
  #define DRV_MODULE_NAME               "bnx2i"
- #define DRV_MODULE_VERSION    "2.1.0"
- #define DRV_MODULE_RELDATE    "Dec 06, 2009"
+ #define DRV_MODULE_VERSION    "2.1.1"
+ #define DRV_MODULE_RELDATE    "Mar 24, 2010"
  
  static char version[] __devinitdata =
                "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@@ -26,7 -26,8 +26,8 @@@
  
  
  MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
- MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+                  " iSCSI Driver");
  MODULE_LICENSE("GPL");
  MODULE_VERSION(DRV_MODULE_VERSION);
  
@@@ -177,22 -178,11 +178,22 @@@ void bnx2i_stop(void *handle
        struct bnx2i_hba *hba = handle;
  
        /* check if cleanup happened in GOING_DOWN context */
 -      clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
        if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
                                &hba->adapter_state))
                iscsi_host_for_each_session(hba->shost,
                                            bnx2i_drop_session);
 +
 +      /* Wait for all endpoints to be torn down, Chip will be reset once
 +       *  control returns to network driver. So it is required to cleanup and
 +       * release all connection resources before returning from this routine.
 +       */
 +      wait_event_interruptible_timeout(hba->eh_wait,
 +                                       (hba->ofld_conns_active == 0),
 +                                       hba->hba_shutdown_tmo);
 +      /* This flag should be cleared last so that ep_disconnect() gracefully
 +       * cleans up connection context
 +       */
 +      clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  }
  
  /**
@@@ -289,6 -279,7 +290,7 @@@ static int bnx2i_init_one(struct bnx2i_
        int rc;
  
        mutex_lock(&bnx2i_dev_lock);
+       hba->cnic = cnic;
        rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
        if (!rc) {
                hba->age++;
@@@ -335,8 -326,7 +337,7 @@@ void bnx2i_ulp_init(struct cnic_dev *de
        if (bnx2i_init_one(hba, dev)) {
                printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
                bnx2i_free_hba(hba);
-       } else
-               hba->cnic = dev;
+       }
  }
  
  
diff --combined drivers/scsi/iscsi_tcp.c
@@@ -206,8 -206,10 +206,10 @@@ static void iscsi_sw_tcp_conn_set_callb
  }
  
  static void
- iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
+ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
  {
+       struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+       struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
        struct sock *sk = tcp_sw_conn->sock->sk;
  
        /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@@ -555,7 -557,7 +557,7 @@@ static void iscsi_sw_tcp_release_conn(s
                return;
  
        sock_hold(sock->sk);
-       iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
+       iscsi_sw_tcp_conn_restore_callbacks(conn);
        sock_put(sock->sk);
  
        spin_lock_bh(&session->lock);
@@@ -599,7 -601,7 +601,7 @@@ static void iscsi_sw_tcp_conn_stop(stru
        set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
        write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
  
 -      if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
 +      if (sock->sk->sk_sleep) {
                sock->sk->sk_err = EIO;
                wake_up_interruptible(sock->sk->sk_sleep);
        }
@@@ -172,108 -172,207 +172,207 @@@ mbox_exit
        return status;
  }
  
+ uint8_t
+ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+                uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+ {
+       memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+       memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+       mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
+       mbox_cmd[1] = 0;
+       mbox_cmd[2] = LSDW(init_fw_cb_dma);
+       mbox_cmd[3] = MSDW(init_fw_cb_dma);
+       mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+       mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
+       if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
+           QLA_SUCCESS) {
+               DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+                             "MBOX_CMD_INITIALIZE_FIRMWARE"
+                             " failed w/ status %04X\n",
+                             ha->host_no, __func__, mbox_sts[0]));
+               return QLA_ERROR;
+       }
+       return QLA_SUCCESS;
+ }
+ uint8_t
+ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+                uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+ {
+       memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+       memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+       mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+       mbox_cmd[2] = LSDW(init_fw_cb_dma);
+       mbox_cmd[3] = MSDW(init_fw_cb_dma);
+       mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+       if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
+           QLA_SUCCESS) {
+               DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+                             "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
+                             " failed w/ status %04X\n",
+                             ha->host_no, __func__, mbox_sts[0]));
+               return QLA_ERROR;
+       }
+       return QLA_SUCCESS;
+ }
+ void
+ qla4xxx_update_local_ip(struct scsi_qla_host *ha,
+                        struct addr_ctrl_blk  *init_fw_cb)
+ {
+       /* Save IPv4 Address Info */
+       memcpy(ha->ip_address, init_fw_cb->ipv4_addr,
+               min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr)));
+       memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet,
+               min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet)));
+       memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr,
+               min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr)));
+       if (is_ipv6_enabled(ha)) {
+               /* Save IPv6 Address */
+               ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state;
+               ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state;
+               ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state;
+               ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state;
+               ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
+               ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
+               memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8],
+                       init_fw_cb->ipv6_if_id,
+                       min(sizeof(ha->ipv6_link_local_addr)/2,
+                       sizeof(init_fw_cb->ipv6_if_id)));
+               memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0,
+                       min(sizeof(ha->ipv6_addr0),
+                       sizeof(init_fw_cb->ipv6_addr0)));
+               memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1,
+                       min(sizeof(ha->ipv6_addr1),
+                       sizeof(init_fw_cb->ipv6_addr1)));
+               memcpy(&ha->ipv6_default_router_addr,
+                       init_fw_cb->ipv6_dflt_rtr_addr,
+                       min(sizeof(ha->ipv6_default_router_addr),
+                       sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+       }
+ }
+ uint8_t
+ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+                         uint32_t *mbox_cmd,
+                         uint32_t *mbox_sts,
+                         struct addr_ctrl_blk  *init_fw_cb,
+                         dma_addr_t init_fw_cb_dma)
+ {
+       if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
+           != QLA_SUCCESS) {
+               DEBUG2(printk(KERN_WARNING
+                             "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+                             ha->host_no, __func__));
+               return QLA_ERROR;
+       }
+       DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
+       /* Save some info in adapter structure. */
+       ha->acb_version = init_fw_cb->acb_version;
+       ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
+       ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
+       ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
+       ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state);
+       ha->heartbeat_interval = init_fw_cb->hb_interval;
+       memcpy(ha->name_string, init_fw_cb->iscsi_name,
+               min(sizeof(ha->name_string),
+               sizeof(init_fw_cb->iscsi_name)));
+       /*memcpy(ha->alias, init_fw_cb->Alias,
+              min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
+       /* Save Command Line Paramater info */
+       ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout);
+       ha->discovery_wait = ql4xdiscoverywait;
+       if (ha->acb_version == ACB_SUPPORTED) {
+               ha->ipv6_options = init_fw_cb->ipv6_opts;
+               ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
+       }
+       qla4xxx_update_local_ip(ha, init_fw_cb);
+       return QLA_SUCCESS;
+ }
  /**
   * qla4xxx_initialize_fw_cb - initializes firmware control block.
   * @ha: Pointer to host adapter structure.
   **/
  int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
  {
-       struct init_fw_ctrl_blk *init_fw_cb;
+       struct addr_ctrl_blk *init_fw_cb;
        dma_addr_t init_fw_cb_dma;
        uint32_t mbox_cmd[MBOX_REG_COUNT];
        uint32_t mbox_sts[MBOX_REG_COUNT];
        int status = QLA_ERROR;
  
        init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
-                                       sizeof(struct init_fw_ctrl_blk),
+                                       sizeof(struct addr_ctrl_blk),
                                        &init_fw_cb_dma, GFP_KERNEL);
        if (init_fw_cb == NULL) {
                DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
                              ha->host_no, __func__));
                return 10;
        }
-       memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
+       memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
  
        /* Get Initialize Firmware Control Block. */
        memset(&mbox_cmd, 0, sizeof(mbox_cmd));
        memset(&mbox_sts, 0, sizeof(mbox_sts));
  
-       mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
-       mbox_cmd[2] = LSDW(init_fw_cb_dma);
-       mbox_cmd[3] = MSDW(init_fw_cb_dma);
-       mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-       if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+       if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
            QLA_SUCCESS) {
                dma_free_coherent(&ha->pdev->dev,
-                                 sizeof(struct init_fw_ctrl_blk),
+                                 sizeof(struct addr_ctrl_blk),
                                  init_fw_cb, init_fw_cb_dma);
-               return status;
+               goto exit_init_fw_cb;
        }
  
        /* Initialize request and response queues. */
        qla4xxx_init_rings(ha);
  
        /* Fill in the request and response queue information. */
-       init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
-       init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
-       init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
-       init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
-       init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
-       init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
-       init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
-       init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
-       init_fw_cb->pri.shdwreg_addr_lo =
-               cpu_to_le32(LSDW(ha->shadow_regs_dma));
-       init_fw_cb->pri.shdwreg_addr_hi =
-               cpu_to_le32(MSDW(ha->shadow_regs_dma));
+       init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
+       init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
+       init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+       init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+       init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+       init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+       init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+       init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+       init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
+       init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
  
        /* Set up required options. */
-       init_fw_cb->pri.fw_options |=
+       init_fw_cb->fw_options |=
                __constant_cpu_to_le16(FWOPT_SESSION_MODE |
                                       FWOPT_INITIATOR_MODE);
-       init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
-       /* Save some info in adapter structure. */
-       ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
-       ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
-       ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
-       memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
-              min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
-       memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
-              min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
-       memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
-              min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
-       memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
-              min(sizeof(ha->name_string),
-                  sizeof(init_fw_cb->pri.iscsi_name)));
-       /*memcpy(ha->alias, init_fw_cb->Alias,
-              min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
+       init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
  
-       /* Save Command Line Paramater info */
-       ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
-       ha->discovery_wait = ql4xdiscoverywait;
-       /* Send Initialize Firmware Control Block. */
-       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
-       memset(&mbox_sts, 0, sizeof(mbox_sts));
-       mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
-       mbox_cmd[1] = 0;
-       mbox_cmd[2] = LSDW(init_fw_cb_dma);
-       mbox_cmd[3] = MSDW(init_fw_cb_dma);
-       mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+       if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
+               != QLA_SUCCESS) {
+               DEBUG2(printk(KERN_WARNING
+                             "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
+                             ha->host_no, __func__));
+               goto exit_init_fw_cb;
+       }
  
-       if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
-           QLA_SUCCESS)
-               status = QLA_SUCCESS;
-        else {
-               DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
-                             "failed w/ status %04X\n", ha->host_no, __func__,
-                             mbox_sts[0]));
+       if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
+               init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
+               DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
+                               ha->host_no, __func__));
+               goto exit_init_fw_cb;
        }
-       dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
-                         init_fw_cb, init_fw_cb_dma);
+       status = QLA_SUCCESS;
+ exit_init_fw_cb:
+       dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+                               init_fw_cb, init_fw_cb_dma);
  
        return status;
  }
   **/
  int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
  {
-       struct init_fw_ctrl_blk *init_fw_cb;
+       struct addr_ctrl_blk *init_fw_cb;
        dma_addr_t init_fw_cb_dma;
        uint32_t mbox_cmd[MBOX_REG_COUNT];
        uint32_t mbox_sts[MBOX_REG_COUNT];
  
        init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
-                                       sizeof(struct init_fw_ctrl_blk),
+                                       sizeof(struct addr_ctrl_blk),
                                        &init_fw_cb_dma, GFP_KERNEL);
        if (init_fw_cb == NULL) {
                printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
        }
  
        /* Get Initialize Firmware Control Block. */
-       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
-       memset(&mbox_sts, 0, sizeof(mbox_sts));
-       memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
-       mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
-       mbox_cmd[2] = LSDW(init_fw_cb_dma);
-       mbox_cmd[3] = MSDW(init_fw_cb_dma);
-       mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-       if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+       memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+       if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
            QLA_SUCCESS) {
                DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
                              ha->host_no, __func__));
                dma_free_coherent(&ha->pdev->dev,
-                                 sizeof(struct init_fw_ctrl_blk),
+                                 sizeof(struct addr_ctrl_blk),
                                  init_fw_cb, init_fw_cb_dma);
                return QLA_ERROR;
        }
  
        /* Save IP Address. */
-       memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
-              min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
-       memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
-              min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
-       memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
-              min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
-       dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
-                         init_fw_cb, init_fw_cb_dma);
+       qla4xxx_update_local_ip(ha, init_fw_cb);
+       dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+                               init_fw_cb, init_fw_cb_dma);
  
        return QLA_SUCCESS;
  }
@@@ -409,6 -494,7 +494,7 @@@ int qla4xxx_get_fwddb_entry(struct scsi
                            uint16_t *connection_id)
  {
        int status = QLA_ERROR;
+       uint16_t options;
        uint32_t mbox_cmd[MBOX_REG_COUNT];
        uint32_t mbox_sts[MBOX_REG_COUNT];
  
                goto exit_get_fwddb;
        }
        if (fw_ddb_entry) {
-               dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
-                          "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
-                          fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
-                          mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
-                          fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
-                          fw_ddb_entry->ip_addr[3],
-                          le16_to_cpu(fw_ddb_entry->port),
-                          fw_ddb_entry->iscsi_name);
+               options = le16_to_cpu(fw_ddb_entry->options);
+               if (options & DDB_OPT_IPV6_DEVICE) {
+                       dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+                               "Next %d State %04x ConnErr %08x %pI6 "
+                               ":%04d \"%s\"\n", __func__, fw_ddb_index,
+                               mbox_sts[0], mbox_sts[2], mbox_sts[3],
+                               mbox_sts[4], mbox_sts[5],
+                               fw_ddb_entry->ip_addr,
+                               le16_to_cpu(fw_ddb_entry->port),
+                               fw_ddb_entry->iscsi_name);
+               } else {
+                       dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+                               "Next %d State %04x ConnErr %08x %pI4 "
+                               ":%04d \"%s\"\n", __func__, fw_ddb_index,
+                               mbox_sts[0], mbox_sts[2], mbox_sts[3],
+                               mbox_sts[4], mbox_sts[5],
+                               fw_ddb_entry->ip_addr,
+                               le16_to_cpu(fw_ddb_entry->port),
+                               fw_ddb_entry->iscsi_name);
+               }
        }
        if (num_valid_ddb_entries)
                *num_valid_ddb_entries = mbox_sts[2];
        if (conn_err_detail)
                *conn_err_detail = mbox_sts[5];
        if (tcp_source_port_num)
 -              *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16;
 +              *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
        if (connection_id)
                *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
        status = QLA_SUCCESS;
@@@ -664,6 -762,59 +762,59 @@@ exit_get_event_log
  }
  
  /**
+  * qla4xxx_abort_task - issues Abort Task
+  * @ha: Pointer to host adapter structure.
+  * @srb: Pointer to srb entry
+  *
+  * This routine performs a LUN RESET on the specified target/lun.
+  * The caller must ensure that the ddb_entry and lun_entry pointers
+  * are valid before calling this routine.
+  **/
+ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
+ {
+       uint32_t mbox_cmd[MBOX_REG_COUNT];
+       uint32_t mbox_sts[MBOX_REG_COUNT];
+       struct scsi_cmnd *cmd = srb->cmd;
+       int status = QLA_SUCCESS;
+       unsigned long flags = 0;
+       uint32_t index;
+       /*
+        * Send abort task command to ISP, so that the ISP will return
+        * request with ABORT status
+        */
+       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+       memset(&mbox_sts, 0, sizeof(mbox_sts));
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       index = (unsigned long)(unsigned char *)cmd->host_scribble;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       /* Firmware already posted completion on response queue */
+       if (index == MAX_SRBS)
+               return status;
+       mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
+       mbox_cmd[1] = srb->fw_ddb_index;
+       mbox_cmd[2] = index;
+       /* Immediate Command Enable */
+       mbox_cmd[5] = 0x01;
+       qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+           &mbox_sts[0]);
+       if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
+               status = QLA_ERROR;
+               DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: "
+                   "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
+                   ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
+                   mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
+       }
+       return status;
+ }
+ /**
   * qla4xxx_reset_lun - issues LUN Reset
   * @ha: Pointer to host adapter structure.
   * @db_entry: Pointer to device database entry
@@@ -12,7 -12,7 +12,7 @@@
   *  SAS disks.
   *
   *
-  *  For documentation see http://www.torque.net/sg/sdebug26.html
+  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
   *
   *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
   *   dpg: work for devfs large number of disks [20010809]
@@@ -58,8 -58,8 +58,8 @@@
  #include "sd.h"
  #include "scsi_logging.h"
  
- #define SCSI_DEBUG_VERSION "1.81"
- static const char * scsi_debug_version_date = "20070104";
+ #define SCSI_DEBUG_VERSION "1.82"
+ static const char * scsi_debug_version_date = "20100324";
  
  /* Additional Sense Code (ASC) */
  #define NO_ADDITIONAL_SENSE 0x0
  #define DEF_ATO 1
  #define DEF_PHYSBLK_EXP 0
  #define DEF_LOWEST_ALIGNED 0
+ #define DEF_OPT_BLKS 64
  #define DEF_UNMAP_MAX_BLOCKS 0
  #define DEF_UNMAP_MAX_DESC 0
  #define DEF_UNMAP_GRANULARITY 0
  #define SAM2_LUN_ADDRESS_METHOD 0
  #define SAM2_WLUN_REPORT_LUNS 0xc101
  
+ /* Can queue up to this number of commands. Typically commands that
+  * that have a non-zero delay are queued. */
+ #define SCSI_DEBUG_CANQUEUE  255
  static int scsi_debug_add_host = DEF_NUM_HOST;
  static int scsi_debug_delay = DEF_DELAY;
  static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
  static int scsi_debug_every_nth = DEF_EVERY_NTH;
  static int scsi_debug_max_luns = DEF_MAX_LUNS;
+ static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
  static int scsi_debug_num_parts = DEF_NUM_PARTS;
+ static int scsi_debug_no_uld = 0;
  static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
  static int scsi_debug_opts = DEF_OPTS;
  static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
@@@ -169,6 -176,7 +176,7 @@@ static int scsi_debug_guard = DEF_GUARD
  static int scsi_debug_ato = DEF_ATO;
  static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
  static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
+ static int scsi_debug_opt_blks = DEF_OPT_BLKS;
  static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
  static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
  static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
@@@ -192,7 -200,6 +200,6 @@@ static int sdebug_sectors_per;             /* sect
  
  #define SDEBUG_SENSE_LEN 32
  
- #define SCSI_DEBUG_CANQUEUE  255
  #define SCSI_DEBUG_MAX_CMD_LEN 32
  
  struct sdebug_dev_info {
@@@ -699,9 -706,13 +706,13 @@@ static int inquiry_evpd_b0(unsigned cha
        unsigned int gran;
  
        memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
+       /* Optimal transfer length granularity */
        gran = 1 << scsi_debug_physblk_exp;
        arr[2] = (gran >> 8) & 0xff;
        arr[3] = gran & 0xff;
+       /* Maximum Transfer Length */
        if (sdebug_store_sectors > 0x400) {
                arr[4] = (sdebug_store_sectors >> 24) & 0xff;
                arr[5] = (sdebug_store_sectors >> 16) & 0xff;
                arr[7] = sdebug_store_sectors & 0xff;
        }
  
+       /* Optimal Transfer Length */
+       put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
        if (scsi_debug_unmap_max_desc) {
                unsigned int blocks;
  
                else
                        blocks = 0xffffffff;
  
+               /* Maximum Unmap LBA Count */
                put_unaligned_be32(blocks, &arr[16]);
+               /* Maximum Unmap Block Descriptor Count */
                put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
        }
  
+       /* Unmap Granularity Alignment */
        if (scsi_debug_unmap_alignment) {
                put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
                arr[28] |= 0x80; /* UGAVALID */
        }
  
+       /* Optimal Unmap Granularity */
        if (scsi_debug_unmap_granularity) {
                put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
                return 0x3c; /* Mandatory page length for thin provisioning */
@@@ -957,8 -976,7 +976,8 @@@ static int resp_start_stop(struct scsi_
  static sector_t get_sdebug_capacity(void)
  {
        if (scsi_debug_virtual_gb > 0)
 -              return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
 +              return (sector_t)scsi_debug_virtual_gb *
 +                      (1073741824 / scsi_debug_sector_size);
        else
                return sdebug_store_sectors;
  }
@@@ -2266,7 -2284,7 +2285,7 @@@ static void timer_intr_handler(unsigne
        struct sdebug_queued_cmd * sqcp;
        unsigned long iflags;
  
-       if (indx >= SCSI_DEBUG_CANQUEUE) {
+       if (indx >= scsi_debug_max_queue) {
                printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
                       "large\n");
                return;
@@@ -2380,6 -2398,8 +2399,8 @@@ static int scsi_debug_slave_configure(s
                scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
                                        sdp->host->cmd_per_lun);
        blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
+       if (scsi_debug_no_uld)
+               sdp->no_uld_attach = 1;
        return 0;
  }
  
@@@ -2406,7 -2426,7 +2427,7 @@@ static int stop_queued_cmnd(struct scsi
        struct sdebug_queued_cmd *sqcp;
  
        spin_lock_irqsave(&queued_arr_lock, iflags);
-       for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+       for (k = 0; k < scsi_debug_max_queue; ++k) {
                sqcp = &queued_arr[k];
                if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
                        del_timer_sync(&sqcp->cmnd_timer);
                }
        }
        spin_unlock_irqrestore(&queued_arr_lock, iflags);
-       return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0;
+       return (k < scsi_debug_max_queue) ? 1 : 0;
  }
  
  /* Deletes (stops) timers of all queued commands */
@@@ -2427,7 -2447,7 +2448,7 @@@ static void stop_all_queued(void
        struct sdebug_queued_cmd *sqcp;
  
        spin_lock_irqsave(&queued_arr_lock, iflags);
-       for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+       for (k = 0; k < scsi_debug_max_queue; ++k) {
                sqcp = &queued_arr[k];
                if (sqcp->in_use && sqcp->a_cmnd) {
                        del_timer_sync(&sqcp->cmnd_timer);
@@@ -2533,7 -2553,7 +2554,7 @@@ static void __init init_all_queued(void
        struct sdebug_queued_cmd * sqcp;
  
        spin_lock_irqsave(&queued_arr_lock, iflags);
-       for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+       for (k = 0; k < scsi_debug_max_queue; ++k) {
                sqcp = &queued_arr[k];
                init_timer(&sqcp->cmnd_timer);
                sqcp->in_use = 0;
@@@ -2625,12 -2645,12 +2646,12 @@@ static int schedule_resp(struct scsi_cm
                struct sdebug_queued_cmd * sqcp = NULL;
  
                spin_lock_irqsave(&queued_arr_lock, iflags);
-               for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+               for (k = 0; k < scsi_debug_max_queue; ++k) {
                        sqcp = &queued_arr[k];
                        if (! sqcp->in_use)
                                break;
                }
-               if (k >= SCSI_DEBUG_CANQUEUE) {
+               if (k >= scsi_debug_max_queue) {
                        spin_unlock_irqrestore(&queued_arr_lock, iflags);
                        printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
                        return 1;       /* report busy to mid level */
@@@ -2662,7 -2682,9 +2683,9 @@@ module_param_named(dsense, scsi_debug_d
  module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
  module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
  module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
+ module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
  module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
+ module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
  module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
  module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
  module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
@@@ -2677,6 -2699,7 +2700,7 @@@ module_param_named(dif, scsi_debug_dif
  module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
  module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
  module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
+ module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
  module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
  module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
  module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
@@@ -2695,7 -2718,9 +2719,9 @@@ MODULE_PARM_DESC(dsense, "use descripto
  MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
  MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
  MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
+ MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
  MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+ MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
  MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
  MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
  MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
@@@ -2705,6 -2730,7 +2731,7 @@@ MODULE_PARM_DESC(virtual_gb, "virtual g
  MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
  MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
  MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+ MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
  MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
  MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
  MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@@ -2970,6 -2996,31 +2997,31 @@@ static ssize_t sdebug_max_luns_store(st
  DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
            sdebug_max_luns_store);
  
+ static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
+ {
+         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
+ }
+ static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
+                                     const char * buf, size_t count)
+ {
+         int n;
+       if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
+           (n <= SCSI_DEBUG_CANQUEUE)) {
+               scsi_debug_max_queue = n;
+               return count;
+       }
+       return -EINVAL;
+ }
+ DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
+           sdebug_max_queue_store);
+ static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
+ {
+         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
+ }
+ DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
  static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
  {
          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
@@@ -3107,7 -3158,9 +3159,9 @@@ static int do_create_driverfs_files(voi
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
+       ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+       ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
@@@ -3139,7 -3192,9 +3193,9 @@@ static void do_remove_driverfs_files(vo
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
+       driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+       driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
@@@ -3830,12 -3885,13 +3886,13 @@@ static int sdebug_driver_probe(struct d
  
        sdbg_host = to_sdebug_host(dev);
  
-         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
-         if (NULL == hpnt) {
-                 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
-                 error = -ENODEV;
+       sdebug_driver_template.can_queue = scsi_debug_max_queue;
+       hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
+       if (NULL == hpnt) {
+               printk(KERN_ERR "%s: scsi_register failed\n", __func__);
+               error = -ENODEV;
                return error;
-         }
+       }
  
          sdbg_host->shost = hpnt;
        *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
@@@ -39,6 -39,8 +39,8 @@@
  #include "scsi_logging.h"
  #include "scsi_transport_api.h"
  
+ #include <trace/events/scsi.h>
  #define SENSE_TIMEOUT         (10*HZ)
  
  /*
@@@ -52,6 -54,7 +54,7 @@@
  void scsi_eh_wakeup(struct Scsi_Host *shost)
  {
        if (shost->host_busy == shost->host_failed) {
+               trace_scsi_eh_wakeup(shost);
                wake_up_process(shost->ehandler);
                SCSI_LOG_ERROR_RECOVERY(5,
                                printk("Waking error handler thread\n"));
@@@ -127,6 -130,7 +130,7 @@@ enum blk_eh_timer_return scsi_times_out
        struct scsi_cmnd *scmd = req->special;
        enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
  
+       trace_scsi_dispatch_cmd_timeout(scmd);
        scsi_log_completion(scmd, TIMEOUT_ERROR);
  
        if (scmd->device->host->transportt->eh_timed_out)
@@@ -302,20 -306,7 +306,20 @@@ static int scsi_check_sense(struct scsi
                if (scmd->device->allow_restart &&
                    (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
                        return FAILED;
 -              return SUCCESS;
 +
 +              if (blk_barrier_rq(scmd->request))
 +                      /*
 +                       * barrier requests should always retry on UA
 +                       * otherwise block will get a spurious error
 +                       */
 +                      return NEEDS_RETRY;
 +              else
 +                      /*
 +                       * for normal (non barrier) commands, pass the
 +                       * UA upwards for a determination in the
 +                       * completion functions
 +                       */
 +                      return SUCCESS;
  
                /* these three are not supported */
        case COPY_ABORTED:
@@@ -970,9 -961,10 +974,10 @@@ static int scsi_eh_abort_cmds(struct li
                                                  "0x%p\n", current->comm,
                                                  scmd));
                rtn = scsi_try_to_abort_cmd(scmd);
-               if (rtn == SUCCESS) {
+               if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
                        scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
                        if (!scsi_device_online(scmd->device) ||
+                           rtn == FAST_IO_FAIL ||
                            !scsi_eh_tur(scmd)) {
                                scsi_eh_finish_cmd(scmd, done_q);
                        }
@@@ -1099,8 -1091,9 +1104,9 @@@ static int scsi_eh_bus_device_reset(str
                                                  " 0x%p\n", current->comm,
                                                  sdev));
                rtn = scsi_try_bus_device_reset(bdr_scmd);
-               if (rtn == SUCCESS) {
+               if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
                        if (!scsi_device_online(sdev) ||
+                           rtn == FAST_IO_FAIL ||
                            !scsi_eh_tur(bdr_scmd)) {
                                list_for_each_entry_safe(scmd, next,
                                                         work_q, eh_entry) {
@@@ -1163,10 -1156,11 +1169,11 @@@ static int scsi_eh_target_reset(struct 
                                                  "to target %d\n",
                                                  current->comm, id));
                rtn = scsi_try_target_reset(tgtr_scmd);
-               if (rtn == SUCCESS) {
+               if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
                        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
                                if (id == scmd_id(scmd))
                                        if (!scsi_device_online(scmd->device) ||
+                                           rtn == FAST_IO_FAIL ||
                                            !scsi_eh_tur(tgtr_scmd))
                                                scsi_eh_finish_cmd(scmd,
                                                                   done_q);
@@@ -1222,10 -1216,11 +1229,11 @@@ static int scsi_eh_bus_reset(struct Scs
                                                  " %d\n", current->comm,
                                                  channel));
                rtn = scsi_try_bus_reset(chan_scmd);
-               if (rtn == SUCCESS) {
+               if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
                        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
                                if (channel == scmd_channel(scmd))
                                        if (!scsi_device_online(scmd->device) ||
+                                           rtn == FAST_IO_FAIL ||
                                            !scsi_eh_tur(scmd))
                                                scsi_eh_finish_cmd(scmd,
                                                                   done_q);
@@@ -1259,9 -1254,10 +1267,10 @@@ static int scsi_eh_host_reset(struct li
                                                  , current->comm));
  
                rtn = scsi_try_host_reset(scmd);
-               if (rtn == SUCCESS) {
+               if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
                        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
                                if (!scsi_device_online(scmd->device) ||
+                                   rtn == FAST_IO_FAIL ||
                                    (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
                                    !scsi_eh_tur(scmd))
                                        scsi_eh_finish_cmd(scmd, done_q);
diff --combined drivers/scsi/sd.c
@@@ -1040,7 -1040,6 +1040,7 @@@ static void sd_prepare_flush(struct req
  {
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->timeout = SD_TIMEOUT;
 +      rq->retries = SD_MAX_RETRIES;
        rq->cmd[0] = SYNCHRONIZE_CACHE;
        rq->cmd_len = 10;
  }
@@@ -1434,6 -1433,8 +1434,8 @@@ static void read_capacity_error(struct 
  #error RC16_LEN must not be more than SD_BUF_SIZE
  #endif
  
+ #define READ_CAPACITY_RETRIES_ON_RESET        10
  static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                                                unsigned char *buffer)
  {
        struct scsi_sense_hdr sshdr;
        int sense_valid = 0;
        int the_result;
-       int retries = 3;
+       int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
        unsigned int alignment;
        unsigned long long lba;
        unsigned sector_size;
                                 * Invalid Field in CDB, just retry
                                 * silently with RC10 */
                                return -EINVAL;
+                       if (sense_valid &&
+                           sshdr.sense_key == UNIT_ATTENTION &&
+                           sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+                               /* Device reset might occur several times,
+                                * give it one more chance */
+                               if (--reset_retries > 0)
+                                       continue;
                }
                retries--;
  
@@@ -1528,7 -1536,7 +1537,7 @@@ static int read_capacity_10(struct scsi
        struct scsi_sense_hdr sshdr;
        int sense_valid = 0;
        int the_result;
-       int retries = 3;
+       int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
        sector_t lba;
        unsigned sector_size;
  
                if (media_not_present(sdkp, &sshdr))
                        return -ENODEV;
  
-               if (the_result)
+               if (the_result) {
                        sense_valid = scsi_sense_valid(&sshdr);
+                       if (sense_valid &&
+                           sshdr.sense_key == UNIT_ATTENTION &&
+                           sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+                               /* Device reset might occur several times,
+                                * give it one more chance */
+                               if (--reset_retries > 0)
+                                       continue;
+               }
                retries--;
  
        } while (the_result && retries);
  
  static int sd_try_rc16_first(struct scsi_device *sdp)
  {
+       if (sdp->host->max_cmd_len < 16)
+               return 0;
        if (sdp->scsi_level > SCSI_SPC_2)
                return 1;
        if (scsi_device_protection(sdp))
@@@ -2187,7 -2205,7 +2206,7 @@@ static void sd_probe_async(void *data, 
        blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
  
        gd->driverfs_dev = &sdp->sdev_gendev;
 -      gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
 +      gd->flags = GENHD_FL_EXT_DEVT;
        if (sdp->removable)
                gd->flags |= GENHD_FL_REMOVABLE;