IB/iser: New receive buffer posting logic
[safe/jmp/linux-2.6] / drivers / infiniband / ulp / iser / iser_initiator.c
index 9b3d79c..6d9bbe6 100644 (file)
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
  */
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-#include <asm/io.h>
-#include <asm/scatterlist.h>
 #include <linux/scatterlist.h>
 #include <linux/kfifo.h>
 #include <scsi/scsi_cmnd.h>
@@ -43,9 +39,6 @@
 
 #include "iscsi_iser.h"
 
-/* Constant PDU lengths calculations */
-#define ISER_TOTAL_HEADERS_LEN  (sizeof (struct iser_hdr) + \
-                                sizeof (struct iscsi_hdr))
 
 /* iser_dto_add_regd_buff - increments the reference count for *
  * the registered buffer & adds it to the DTO object           */
@@ -68,46 +61,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
 
 /* Register user buffer memory and initialize passive rdma
  *  dto descriptor. Total data size is stored in
- *  iser_ctask->data[ISER_DIR_IN].data_len
+ *  iser_task->data[ISER_DIR_IN].data_len
  */
-static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+static int iser_prepare_read_cmd(struct iscsi_task *task,
                                 unsigned int edtl)
 
 {
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_regd_buf *regd_buf;
        int err;
-       struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
-       struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+       struct iser_hdr *hdr = &iser_task->desc.iser_header;
+       struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
 
-       err = iser_dma_map_task_data(iser_ctask,
+       err = iser_dma_map_task_data(iser_task,
                                     buf_in,
                                     ISER_DIR_IN,
                                     DMA_FROM_DEVICE);
        if (err)
                return err;
 
-       if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+       if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
                iser_err("Total data length: %ld, less than EDTL: "
                         "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
-                        iser_ctask->data[ISER_DIR_IN].data_len, edtl,
-                        ctask->itt, iser_ctask->iser_conn);
+                        iser_task->data[ISER_DIR_IN].data_len, edtl,
+                        task->itt, iser_task->iser_conn);
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+       err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
        if (err) {
                iser_err("Failed to set up Data-IN RDMA\n");
                return err;
        }
-       regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+       regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
 
        hdr->flags    |= ISER_RSV;
        hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
        hdr->read_va   = cpu_to_be64(regd_buf->reg.va);
 
        iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
-                ctask->itt, regd_buf->reg.rkey,
+                task->itt, regd_buf->reg.rkey,
                 (unsigned long long)regd_buf->reg.va);
 
        return 0;
@@ -115,43 +108,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
 
 /* Register user buffer memory and initialize passive rdma
  *  dto descriptor. Total data size is stored in
- *  ctask->data[ISER_DIR_OUT].data_len
+ *  task->data[ISER_DIR_OUT].data_len
  */
 static int
-iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+iser_prepare_write_cmd(struct iscsi_task *task,
                       unsigned int imm_sz,
                       unsigned int unsol_sz,
                       unsigned int edtl)
 {
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_regd_buf *regd_buf;
        int err;
-       struct iser_dto *send_dto = &iser_ctask->desc.dto;
-       struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
-       struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+       struct iser_dto *send_dto = &iser_task->desc.dto;
+       struct iser_hdr *hdr = &iser_task->desc.iser_header;
+       struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
 
-       err = iser_dma_map_task_data(iser_ctask,
+       err = iser_dma_map_task_data(iser_task,
                                     buf_out,
                                     ISER_DIR_OUT,
                                     DMA_TO_DEVICE);
        if (err)
                return err;
 
-       if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+       if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
                iser_err("Total data length: %ld, less than EDTL: %d, "
                         "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
-                        iser_ctask->data[ISER_DIR_OUT].data_len,
-                        edtl, ctask->itt, ctask->conn);
+                        iser_task->data[ISER_DIR_OUT].data_len,
+                        edtl, task->itt, task->conn);
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+       err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
        if (err != 0) {
                iser_err("Failed to register write cmd RDMA mem\n");
                return err;
        }
 
-       regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+       regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
 
        if (unsol_sz < edtl) {
                hdr->flags     |= ISER_WSV;
@@ -160,13 +153,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
 
                iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
                         "VA:%#llX + unsol:%d\n",
-                        ctask->itt, regd_buf->reg.rkey,
+                        task->itt, regd_buf->reg.rkey,
                         (unsigned long long)regd_buf->reg.va, unsol_sz);
        }
 
        if (imm_sz > 0) {
                iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
-                        ctask->itt, imm_sz);
+                        task->itt, imm_sz);
                iser_dto_add_regd_buff(send_dto,
                                       regd_buf,
                                       0,
@@ -176,78 +169,6 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
        return 0;
 }
 
-/**
- * iser_post_receive_control - allocates, initializes and posts receive DTO.
- */
-static int iser_post_receive_control(struct iscsi_conn *conn)
-{
-       struct iscsi_iser_conn *iser_conn = conn->dd_data;
-       struct iser_desc     *rx_desc;
-       struct iser_regd_buf *regd_hdr;
-       struct iser_regd_buf *regd_data;
-       struct iser_dto      *recv_dto = NULL;
-       struct iser_device  *device = iser_conn->ib_conn->device;
-       int rx_data_size, err = 0;
-
-       rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
-       if (rx_desc == NULL) {
-               iser_err("Failed to alloc desc for post recv\n");
-               return -ENOMEM;
-       }
-       rx_desc->type = ISCSI_RX;
-
-       /* for the login sequence we must support rx of upto 8K; login is done
-        * after conn create/bind (connect) and conn stop/bind (reconnect),
-        * what's common for both schemes is that the connection is not started
-        */
-       if (conn->c_stage != ISCSI_CONN_STARTED)
-               rx_data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
-       else /* FIXME till user space sets conn->max_recv_dlength correctly */
-               rx_data_size = 128;
-
-       rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
-       if (rx_desc->data == NULL) {
-               iser_err("Failed to alloc data buf for post recv\n");
-               err = -ENOMEM;
-               goto post_rx_kmalloc_failure;
-       }
-
-       recv_dto = &rx_desc->dto;
-       recv_dto->ib_conn = iser_conn->ib_conn;
-       recv_dto->regd_vector_len = 0;
-
-       regd_hdr = &rx_desc->hdr_regd_buf;
-       memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
-       regd_hdr->device  = device;
-       regd_hdr->virt_addr  = rx_desc; /* == &rx_desc->iser_header */
-       regd_hdr->data_size  = ISER_TOTAL_HEADERS_LEN;
-
-       iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE);
-
-       iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
-
-       regd_data = &rx_desc->data_regd_buf;
-       memset(regd_data, 0, sizeof(struct iser_regd_buf));
-       regd_data->device  = device;
-       regd_data->virt_addr  = rx_desc->data;
-       regd_data->data_size  = rx_data_size;
-
-       iser_reg_single(device, regd_data, DMA_FROM_DEVICE);
-
-       iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
-
-       err = iser_post_recv(rx_desc);
-       if (!err)
-               return 0;
-
-       /* iser_post_recv failed */
-       iser_dto_buffs_release(recv_dto);
-       kfree(rx_desc->data);
-post_rx_kmalloc_failure:
-       kmem_cache_free(ig.desc_cache, rx_desc);
-       return err;
-}
-
 /* creates a new tx descriptor and adds header regd buffer */
 static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
                                  struct iser_desc       *tx_desc)
@@ -258,7 +179,7 @@ static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
        memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
        regd_hdr->device  = iser_conn->ib_conn->device;
        regd_hdr->virt_addr  = tx_desc; /* == &tx_desc->iser_header */
-       regd_hdr->data_size  = ISER_TOTAL_HEADERS_LEN;
+       regd_hdr->data_size  = ISER_HEADERS_LEN;
 
        send_dto->ib_conn         = iser_conn->ib_conn;
        send_dto->notify_enable   = 1;
@@ -270,6 +191,72 @@ static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
        iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0);
 }
 
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+{
+       int i, j;
+       u64 dma_addr;
+       struct iser_rx_desc *rx_desc;
+       struct ib_sge       *rx_sg;
+       struct iser_device  *device = ib_conn->device;
+
+       ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
+                               sizeof(struct iser_rx_desc), GFP_KERNEL);
+       if (!ib_conn->rx_descs)
+               goto rx_desc_alloc_fail;
+
+       rx_desc = ib_conn->rx_descs;
+
+       for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
+               dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
+                                       ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+               if (ib_dma_mapping_error(device->ib_device, dma_addr))
+                       goto rx_desc_dma_map_failed;
+
+               rx_desc->dma_addr = dma_addr;
+
+               rx_sg = &rx_desc->rx_sg;
+               rx_sg->addr   = rx_desc->dma_addr;
+               rx_sg->length = ISER_RX_PAYLOAD_SIZE;
+               rx_sg->lkey   = device->mr->lkey;
+       }
+
+       ib_conn->rx_desc_head = 0;
+       return 0;
+
+rx_desc_dma_map_failed:
+       rx_desc = ib_conn->rx_descs;
+       for (j = 0; j < i; j++, rx_desc++)
+               ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+                       ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+       kfree(ib_conn->rx_descs);
+       ib_conn->rx_descs = NULL;
+rx_desc_alloc_fail:
+       iser_err("failed allocating rx descriptors / data buffers\n");
+       return -ENOMEM;
+}
+
+void iser_free_rx_descriptors(struct iser_conn *ib_conn)
+{
+       int i;
+       struct iser_rx_desc *rx_desc;
+       struct iser_device *device = ib_conn->device;
+
+       if (ib_conn->login_buf) {
+               ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
+                       ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+               kfree(ib_conn->login_buf);
+       }
+
+       if (!ib_conn->rx_descs)
+               return;
+
+       rx_desc = ib_conn->rx_descs;
+       for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
+               ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+                       ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+       kfree(ib_conn->rx_descs);
+}
+
 /**
  *  iser_conn_set_full_featured_mode - (iSER API)
  */
@@ -277,107 +264,89 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
 
-       int i;
-       /* no need to keep it in a var, we are after login so if this should
-        * be negotiated, by now the result should be available here */
-       int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
-
-       iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
+       iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
 
        /* Check that there is no posted recv or send buffers left - */
        /* they must be consumed during the login phase */
        BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0);
        BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
 
+       if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
+               return -ENOMEM;
+
        /* Initial post receive buffers */
-       for (i = 0; i < initial_post_recv_bufs_num; i++) {
-               if (iser_post_receive_control(conn) != 0) {
-                       iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
-                                i, conn);
-                       return -ENOMEM;
-               }
-       }
-       iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn);
+       if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+               return -ENOMEM;
+
        return 0;
 }
 
 static int
 iser_check_xmit(struct iscsi_conn *conn, void *task)
 {
-       int rc = 0;
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
 
-       write_lock_bh(conn->recv_lock);
        if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
            ISER_QP_MAX_REQ_DTOS) {
-               iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task);
-               set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-               rc = -EAGAIN;
+               iser_dbg("%ld can't xmit task %p\n",jiffies,task);
+               return -ENOBUFS;
        }
-       write_unlock_bh(conn->recv_lock);
-       return rc;
+       return 0;
 }
 
 
 /**
  * iser_send_command - send command PDU
  */
-int iser_send_command(struct iscsi_conn     *conn,
-                     struct iscsi_cmd_task *ctask)
+int iser_send_command(struct iscsi_conn *conn,
+                     struct iscsi_task *task)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_dto *send_dto = NULL;
        unsigned long edtl;
-       int err = 0;
+       int err;
        struct iser_data_buf *data_buf;
-
-       struct iscsi_cmd *hdr =  ctask->hdr;
-       struct scsi_cmnd *sc  =  ctask->sc;
+       struct iscsi_cmd *hdr =  (struct iscsi_cmd *)task->hdr;
+       struct scsi_cmnd *sc  =  task->sc;
 
        if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
                iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
                return -EPERM;
        }
-       if (iser_check_xmit(conn, ctask))
-               return -EAGAIN;
+       if (iser_check_xmit(conn, task))
+               return -ENOBUFS;
 
        edtl = ntohl(hdr->data_length);
 
        /* build the tx desc regd header and add it to the tx desc dto */
-       iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
-       send_dto = &iser_ctask->desc.dto;
-       send_dto->ctask = iser_ctask;
-       iser_create_send_desc(iser_conn, &iser_ctask->desc);
+       iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+       send_dto = &iser_task->desc.dto;
+       send_dto->task = iser_task;
+       iser_create_send_desc(iser_conn, &iser_task->desc);
 
        if (hdr->flags & ISCSI_FLAG_CMD_READ)
-               data_buf = &iser_ctask->data[ISER_DIR_IN];
+               data_buf = &iser_task->data[ISER_DIR_IN];
        else
-               data_buf = &iser_ctask->data[ISER_DIR_OUT];
-
-       if (sc->use_sg) { /* using a scatter list */
-               data_buf->buf  = sc->request_buffer;
-               data_buf->size = sc->use_sg;
-       } else if (sc->request_bufflen) {
-               /* using a single buffer - convert it into one entry SG */
-               sg_init_one(&data_buf->sg_single,
-                           sc->request_buffer, sc->request_bufflen);
-               data_buf->buf   = &data_buf->sg_single;
-               data_buf->size  = 1;
+               data_buf = &iser_task->data[ISER_DIR_OUT];
+
+       if (scsi_sg_count(sc)) { /* using a scatter list */
+               data_buf->buf  = scsi_sglist(sc);
+               data_buf->size = scsi_sg_count(sc);
        }
 
-       data_buf->data_len = sc->request_bufflen;
+       data_buf->data_len = scsi_bufflen(sc);
 
        if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-               err = iser_prepare_read_cmd(ctask, edtl);
+               err = iser_prepare_read_cmd(task, edtl);
                if (err)
                        goto send_command_error;
        }
        if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
-               err = iser_prepare_write_cmd(ctask,
-                                            ctask->imm_count,
-                                            ctask->imm_count +
-                                            ctask->unsol_count,
+               err = iser_prepare_write_cmd(task,
+                                            task->imm_count,
+                                            task->imm_count +
+                                            task->unsol_r2t.data_length,
                                             edtl);
                if (err)
                        goto send_command_error;
@@ -386,38 +355,32 @@ int iser_send_command(struct iscsi_conn     *conn,
        iser_reg_single(iser_conn->ib_conn->device,
                        send_dto->regd[0], DMA_TO_DEVICE);
 
-       if (iser_post_receive_control(conn) != 0) {
-               iser_err("post_recv failed!\n");
-               err = -ENOMEM;
-               goto send_command_error;
-       }
-
-       iser_ctask->status = ISER_TASK_STATUS_STARTED;
+       iser_task->status = ISER_TASK_STATUS_STARTED;
 
-       err = iser_post_send(&iser_ctask->desc);
+       err = iser_post_send(&iser_task->desc);
        if (!err)
                return 0;
 
 send_command_error:
        iser_dto_buffs_release(send_dto);
-       iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+       iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
        return err;
 }
 
 /**
  * iser_send_data_out - send data out PDU
  */
-int iser_send_data_out(struct iscsi_conn     *conn,
-                      struct iscsi_cmd_task *ctask,
+int iser_send_data_out(struct iscsi_conn *conn,
+                      struct iscsi_task *task,
                       struct iscsi_data *hdr)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
-       struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
        struct iser_desc *tx_desc = NULL;
        struct iser_dto *send_dto = NULL;
        unsigned long buf_offset;
        unsigned long data_seg_len;
-       unsigned int itt;
+       uint32_t itt;
        int err = 0;
 
        if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
@@ -425,10 +388,10 @@ int iser_send_data_out(struct iscsi_conn     *conn,
                return -EPERM;
        }
 
-       if (iser_check_xmit(conn, ctask))
-               return -EAGAIN;
+       if (iser_check_xmit(conn, task))
+               return -ENOBUFS;
 
-       itt = ntohl(hdr->itt);
+       itt = (__force uint32_t)hdr->itt;
        data_seg_len = ntoh24(hdr->dlength);
        buf_offset   = ntohl(hdr->offset);
 
@@ -446,7 +409,7 @@ int iser_send_data_out(struct iscsi_conn     *conn,
 
        /* build the tx desc regd header and add it to the tx desc dto */
        send_dto = &tx_desc->dto;
-       send_dto->ctask = iser_ctask;
+       send_dto->task = iser_task;
        iser_create_send_desc(iser_conn, tx_desc);
 
        iser_reg_single(iser_conn->ib_conn->device,
@@ -454,15 +417,15 @@ int iser_send_data_out(struct iscsi_conn     *conn,
 
        /* all data was registered for RDMA, we can use the lkey */
        iser_dto_add_regd_buff(send_dto,
-                              &iser_ctask->rdma_regd[ISER_DIR_OUT],
+                              &iser_task->rdma_regd[ISER_DIR_OUT],
                               buf_offset,
                               data_seg_len);
 
-       if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+       if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
                iser_err("Offset:%ld & DSL:%ld in Data-Out "
                         "inconsistent with total len:%ld, itt:%d\n",
                         buf_offset, data_seg_len,
-                        iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+                        iser_task->data[ISER_DIR_OUT].data_len, itt);
                err = -EINVAL;
                goto send_data_out_error;
        }
@@ -482,15 +445,14 @@ send_data_out_error:
 }
 
 int iser_send_control(struct iscsi_conn *conn,
-                     struct iscsi_mgmt_task *mtask)
+                     struct iscsi_task *task)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
-       struct iser_desc *mdesc = mtask->dd_data;
+       struct iscsi_iser_task *iser_task = task->dd_data;
+       struct iser_desc *mdesc = &iser_task->desc;
        struct iser_dto *send_dto = NULL;
-       unsigned int itt;
        unsigned long data_seg_len;
-       int err = 0;
-       unsigned char opcode;
+       int err;
        struct iser_regd_buf *regd_buf;
        struct iser_device *device;
 
@@ -499,29 +461,27 @@ int iser_send_control(struct iscsi_conn *conn,
                return -EPERM;
        }
 
-       if (iser_check_xmit(conn,mtask))
-               return -EAGAIN;
+       if (iser_check_xmit(conn, task))
+               return -ENOBUFS;
 
        /* build the tx desc regd header and add it to the tx desc dto */
        mdesc->type = ISCSI_TX_CONTROL;
        send_dto = &mdesc->dto;
-       send_dto->ctask = NULL;
+       send_dto->task = NULL;
        iser_create_send_desc(iser_conn, mdesc);
 
        device = iser_conn->ib_conn->device;
 
        iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
 
-       itt = ntohl(mtask->hdr->itt);
-       opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
-       data_seg_len = ntoh24(mtask->hdr->dlength);
+       data_seg_len = ntoh24(task->hdr->dlength);
 
        if (data_seg_len > 0) {
                regd_buf = &mdesc->data_regd_buf;
                memset(regd_buf, 0, sizeof(struct iser_regd_buf));
                regd_buf->device = device;
-               regd_buf->virt_addr = mtask->data;
-               regd_buf->data_size = mtask->data_count;
+               regd_buf->virt_addr = task->data;
+               regd_buf->data_size = task->data_count;
                iser_reg_single(device, regd_buf,
                                DMA_TO_DEVICE);
                iser_dto_add_regd_buff(send_dto, regd_buf,
@@ -529,10 +489,10 @@ int iser_send_control(struct iscsi_conn *conn,
                                       data_seg_len);
        }
 
-       if (iser_post_receive_control(conn) != 0) {
-               iser_err("post_rcv_buff failed!\n");
-               err = -ENOMEM;
-               goto send_control_error;
+       if (task == conn->login_task) {
+               err = iser_post_recvl(iser_conn->ib_conn);
+               if (err)
+                       goto send_control_error;
        }
 
        err = iser_post_send(mdesc);
@@ -548,58 +508,80 @@ send_control_error:
 /**
  * iser_rcv_dto_completion - recv DTO completion
  */
-void iser_rcv_completion(struct iser_desc *rx_desc,
-                        unsigned long dto_xfer_len)
+void iser_rcv_completion(struct iser_rx_desc *rx_desc,
+                        unsigned long rx_xfer_len,
+                        struct iser_conn *ib_conn)
 {
-       struct iser_dto        *dto = &rx_desc->dto;
-       struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
-       struct iscsi_session *session = conn->iscsi_conn->session;
-       struct iscsi_cmd_task *ctask;
-       struct iscsi_iser_cmd_task *iser_ctask;
+       struct iscsi_iser_conn *conn = ib_conn->iser_conn;
+       struct iscsi_task *task;
+       struct iscsi_iser_task *iser_task;
        struct iscsi_hdr *hdr;
-       char   *rx_data = NULL;
-       int     rx_data_len = 0;
-       unsigned int itt;
        unsigned char opcode;
+       u64 rx_dma;
+       int rx_buflen, outstanding, count, err;
+
+       /* differentiate between login to all other PDUs */
+       if ((char *)rx_desc == ib_conn->login_buf) {
+               rx_dma = ib_conn->login_dma;
+               rx_buflen = ISER_RX_LOGIN_SIZE;
+       } else {
+               rx_dma = rx_desc->dma_addr;
+               rx_buflen = ISER_RX_PAYLOAD_SIZE;
+       }
 
-       hdr = &rx_desc->iscsi_header;
+       ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
+                       rx_buflen, DMA_FROM_DEVICE);
 
-       iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt);
+       hdr = &rx_desc->iscsi_header;
 
-       if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */
-               rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
-               rx_data     = dto->regd[1]->virt_addr;
-               rx_data    += dto->offset[1];
-       }
+       iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
+                       hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
 
        opcode = hdr->opcode & ISCSI_OPCODE_MASK;
 
        if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
-               itt = hdr->itt & ISCSI_ITT_MASK; /* mask out cid and age bits */
-               if (!(itt < session->cmds_max))
-                       iser_err("itt can't be matched to task!!!"
-                                "conn %p opcode %d cmds_max %d itt %d\n",
-                                conn->iscsi_conn,opcode,session->cmds_max,itt);
-               /* use the mapping given with the cmds array indexed by itt */
-               ctask = (struct iscsi_cmd_task *)session->cmds[itt];
-               iser_ctask = ctask->dd_data;
-               iser_dbg("itt %d ctask %p\n",itt,ctask);
-               iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
-               iser_ctask_rdma_finalize(iser_ctask);
+               spin_lock(&conn->iscsi_conn->session->lock);
+               task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+               if (task)
+                       __iscsi_get_task(task);
+               spin_unlock(&conn->iscsi_conn->session->lock);
+
+               if (!task)
+                       iser_err("itt can't be matched to task!!! "
+                                "conn %p opcode %d itt %d\n",
+                                conn->iscsi_conn, opcode, hdr->itt);
+               else {
+                       iser_task = task->dd_data;
+                       iser_dbg("itt %d task %p\n",hdr->itt, task);
+                       iser_task->status = ISER_TASK_STATUS_COMPLETED;
+                       iser_task_rdma_finalize(iser_task);
+                       iscsi_put_task(task);
+               }
        }
 
-       iser_dto_buffs_release(dto);
-
-       iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+       iscsi_iser_recv(conn->iscsi_conn, hdr,
+               rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
 
-       kfree(rx_desc->data);
-       kmem_cache_free(ig.desc_cache, rx_desc);
+       ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
+                       rx_buflen, DMA_FROM_DEVICE);
 
        /* decrementing conn->post_recv_buf_count only --after-- freeing the   *
         * task eliminates the need to worry on tasks which are completed in   *
         * parallel to the execution of iser_conn_term. So the code that waits *
         * for the posted rx bufs refcount to become zero handles everything   */
        atomic_dec(&conn->ib_conn->post_recv_buf_count);
+
+       if (rx_dma == ib_conn->login_dma)
+               return;
+
+       outstanding = atomic_read(&ib_conn->post_recv_buf_count);
+       if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
+               count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
+                                               ISER_MIN_POSTED_RX);
+               err = iser_post_recvm(ib_conn, count);
+               if (err)
+                       iser_err("posting %d rx bufs err %d\n", count, err);
+       }
 }
 
 void iser_snd_completion(struct iser_desc *tx_desc)
@@ -608,7 +590,8 @@ void iser_snd_completion(struct iser_desc *tx_desc)
        struct iser_conn       *ib_conn = dto->ib_conn;
        struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
        struct iscsi_conn      *conn = iser_conn->iscsi_conn;
-       struct iscsi_mgmt_task *mtask;
+       struct iscsi_task *task;
+       int resume_tx = 0;
 
        iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
 
@@ -617,87 +600,82 @@ void iser_snd_completion(struct iser_desc *tx_desc)
        if (tx_desc->type == ISCSI_TX_DATAOUT)
                kmem_cache_free(ig.desc_cache, tx_desc);
 
+       if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
+           ISER_QP_MAX_REQ_DTOS)
+               resume_tx = 1;
+
        atomic_dec(&ib_conn->post_send_buf_count);
 
-       write_lock(conn->recv_lock);
-       if (conn->suspend_tx) {
+       if (resume_tx) {
                iser_dbg("%ld resuming tx\n",jiffies);
-               clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-               scsi_queue_work(conn->session->host, &conn->xmitwork);
+               iscsi_conn_queue_work(conn);
        }
-       write_unlock(conn->recv_lock);
 
        if (tx_desc->type == ISCSI_TX_CONTROL) {
                /* this arithmetic is legal by libiscsi dd_data allocation */
-               mtask = (void *) ((long)(void *)tx_desc -
-                                 sizeof(struct iscsi_mgmt_task));
-               if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
-                       struct iscsi_session *session = conn->session;
-
-                       spin_lock(&conn->session->lock);
-                       list_del(&mtask->running);
-                       __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
-                                   sizeof(void*));
-                       spin_unlock(&session->lock);
-               }
+               task = (void *) ((long)(void *)tx_desc -
+                                 sizeof(struct iscsi_task));
+               if (task->hdr->itt == RESERVED_ITT)
+                       iscsi_put_task(task);
        }
 }
 
-void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
 
 {
-       iser_ctask->status = ISER_TASK_STATUS_INIT;
+       iser_task->status = ISER_TASK_STATUS_INIT;
 
-       iser_ctask->dir[ISER_DIR_IN] = 0;
-       iser_ctask->dir[ISER_DIR_OUT] = 0;
+       iser_task->dir[ISER_DIR_IN] = 0;
+       iser_task->dir[ISER_DIR_OUT] = 0;
 
-       iser_ctask->data[ISER_DIR_IN].data_len  = 0;
-       iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+       iser_task->data[ISER_DIR_IN].data_len  = 0;
+       iser_task->data[ISER_DIR_OUT].data_len = 0;
 
-       memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+       memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
               sizeof(struct iser_regd_buf));
-       memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+       memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
               sizeof(struct iser_regd_buf));
 }
 
-void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
 {
        int deferred;
        int is_rdma_aligned = 1;
+       struct iser_regd_buf *regd;
 
        /* if we were reading, copy back to unaligned sglist,
         * anyway dma_unmap and free the copy
         */
-       if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+       if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
                is_rdma_aligned = 0;
-               iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+               iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
        }
-       if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+       if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
                is_rdma_aligned = 0;
-               iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+               iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
        }
 
-       if (iser_ctask->dir[ISER_DIR_IN]) {
-               deferred = iser_regd_buff_release
-                       (&iser_ctask->rdma_regd[ISER_DIR_IN]);
+       if (iser_task->dir[ISER_DIR_IN]) {
+               regd = &iser_task->rdma_regd[ISER_DIR_IN];
+               deferred = iser_regd_buff_release(regd);
                if (deferred) {
-                       iser_err("References remain for BUF-IN rdma reg\n");
-                       BUG();
+                       iser_err("%d references remain for BUF-IN rdma reg\n",
+                                atomic_read(&regd->ref_count));
                }
        }
 
-       if (iser_ctask->dir[ISER_DIR_OUT]) {
-               deferred = iser_regd_buff_release
-                       (&iser_ctask->rdma_regd[ISER_DIR_OUT]);
+       if (iser_task->dir[ISER_DIR_OUT]) {
+               regd = &iser_task->rdma_regd[ISER_DIR_OUT];
+               deferred = iser_regd_buff_release(regd);
                if (deferred) {
-                       iser_err("References remain for BUF-OUT rdma reg\n");
-                       BUG();
+                       iser_err("%d references remain for BUF-OUT rdma reg\n",
+                                atomic_read(&regd->ref_count));
                }
        }
 
        /* if the data was unaligned, it was already unmapped and then copied */
        if (is_rdma_aligned)
-               iser_dma_unmap_task_data(iser_ctask);
+               iser_dma_unmap_task_data(iser_task);
 }
 
 void iser_dto_buffs_release(struct iser_dto *dto)