Merge branch 'for-next' into for-linus
[safe/jmp/linux-2.6] / drivers / scsi / libfc / fc_fcp.c
index ac5c148..774e7ac 100644 (file)
@@ -48,11 +48,10 @@ struct kmem_cache *scsi_pkt_cachep;
 #define FC_SRB_CMD_SENT                (1 << 0)        /* cmd has been sent */
 #define FC_SRB_RCV_STATUS      (1 << 1)        /* response has arrived */
 #define FC_SRB_ABORT_PENDING   (1 << 2)        /* cmd abort sent to device */
-#define FC_SRB_ABORTED         (1 << 3)        /* abort acknowleged */
+#define FC_SRB_ABORTED         (1 << 3)        /* abort acknowledged */
 #define FC_SRB_DISCONTIG       (1 << 4)        /* non-sequential data recvd */
 #define FC_SRB_COMPL           (1 << 5)        /* fc_io_compl has been run */
 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6)     /* timer function processing */
-#define FC_SRB_NOMEM           (1 << 7)        /* dropped to out of mem */
 
 #define FC_SRB_READ            (1 << 1)
 #define FC_SRB_WRITE           (1 << 0)
@@ -69,14 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
 
 /**
  * struct fc_fcp_internal - FCP layer internal data
- * @scsi_pkt_pool:  Memory pool to draw FCP packets from
+ * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_queue_lock: Protects the scsi_pkt_queue
  * @scsi_pkt_queue: Current FCP packets
- * @throttled:     The FCP packet queue is throttled
+ * @last_can_queue_ramp_down_time: ramp down time
+ * @last_can_queue_ramp_up_time: ramp up time
+ * @max_can_queue: max can_queue size
  */
 struct fc_fcp_internal {
-       mempool_t        *scsi_pkt_pool;
-       struct list_head scsi_pkt_queue;
-       u8               throttled;
+       mempool_t               *scsi_pkt_pool;
+       spinlock_t              scsi_queue_lock;
+       struct list_head        scsi_pkt_queue;
+       unsigned long           last_can_queue_ramp_down_time;
+       unsigned long           last_can_queue_ramp_up_time;
+       int                     max_can_queue;
 };
 
 #define fc_get_scsi_internal(x)        ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -124,6 +129,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
 #define FC_SCSI_TM_TOV         (10 * HZ)
 #define FC_SCSI_REC_TOV                (2 * HZ)
 #define FC_HOST_RESET_TIMEOUT  (30 * HZ)
+#define FC_CAN_QUEUE_PERIOD    (60 * HZ)
 
 #define FC_MAX_ERROR_CNT       5
 #define FC_MAX_RECOV_RETRY     3
@@ -292,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
 {
        struct fc_lport *lport;
 
-       if (!fsp)
-               return;
-
        lport = fsp->lp;
        if ((fsp->req_flags & FC_SRB_READ) &&
            (lport->lro_enabled) && (lport->tt.ddp_setup)) {
@@ -327,6 +330,38 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
 }
 
 /**
+ * fc_fcp_can_queue_ramp_up() - increases can_queue
+ * @lport: lport to ramp up can_queue
+ *
+ * Locking notes: Called with Scsi_Host lock held
+ */
+static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
+{
+       struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+       int can_queue;
+
+       if (si->last_can_queue_ramp_up_time &&
+           (time_before(jiffies, si->last_can_queue_ramp_up_time +
+                        FC_CAN_QUEUE_PERIOD)))
+               return;
+
+       if (time_before(jiffies, si->last_can_queue_ramp_down_time +
+                       FC_CAN_QUEUE_PERIOD))
+               return;
+
+       si->last_can_queue_ramp_up_time = jiffies;
+
+       can_queue = lport->host->can_queue << 1;
+       if (can_queue >= si->max_can_queue) {
+               can_queue = si->max_can_queue;
+               si->last_can_queue_ramp_down_time = 0;
+       }
+       lport->host->can_queue = can_queue;
+       shost_printk(KERN_ERR, lport->host, "libfc: increased "
+                    "can_queue to %d.\n", can_queue);
+}
+
+/**
  * fc_fcp_can_queue_ramp_down() - reduces can_queue
  * @lport: lport to reduce can_queue
  *
@@ -335,17 +370,20 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
  * commands complete or timeout, then try again with a reduced
  * can_queue. Eventually we will hit the point where we run
  * on all reserved structs.
+ *
+ * Locking notes: Called with Scsi_Host lock held
  */
 static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
 {
        struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
-       unsigned long flags;
        int can_queue;
 
-       spin_lock_irqsave(lport->host->host_lock, flags);
-       if (si->throttled)
-               goto done;
-       si->throttled = 1;
+       if (si->last_can_queue_ramp_down_time &&
+           (time_before(jiffies, si->last_can_queue_ramp_down_time +
+                        FC_CAN_QUEUE_PERIOD)))
+               return;
+
+       si->last_can_queue_ramp_down_time = jiffies;
 
        can_queue = lport->host->can_queue;
        can_queue >>= 1;
@@ -354,8 +392,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
        lport->host->can_queue = can_queue;
        shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
                     "Reducing can_queue to %d.\n", can_queue);
-done:
-       spin_unlock_irqrestore(lport->host->host_lock, flags);
 }
 
 /*
@@ -370,11 +406,17 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
                                                  size_t len)
 {
        struct fc_frame *fp;
+       unsigned long flags;
 
        fp = fc_frame_alloc(lport, len);
-       if (!fp)
-               fc_fcp_can_queue_ramp_down(lport);
-       return fp;
+       if (likely(fp))
+               return fp;
+
+       /* error case */
+       spin_lock_irqsave(lport->host->host_lock, flags);
+       fc_fcp_can_queue_ramp_down(lport);
+       spin_unlock_irqrestore(lport->host->host_lock, flags);
+       return NULL;
 }
 
 /**
@@ -477,7 +519,7 @@ crc_err:
  *
  * Called after receiving a Transfer Ready data descriptor.
  * If the LLD is capable of sequence offload then send down the
- * seq_blen ammount of data in single frame, otherwise send
+ * seq_blen amount of data in single frame, otherwise send
  * multiple frames of the maximum frame payload supported by
  * the target port.
  */
@@ -489,11 +531,13 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
        struct scatterlist *sg;
        struct fc_frame *fp = NULL;
        struct fc_lport *lport = fsp->lp;
+       struct page *page;
        size_t remaining;
        size_t t_blen;
        size_t tlen;
        size_t sg_bytes;
        size_t frame_offset, fh_parm_offset;
+       size_t off;
        int error;
        void *data = NULL;
        void *page_addr;
@@ -564,28 +608,26 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
                        fh_parm_offset = frame_offset;
                        fr_max_payload(fp) = fsp->max_payload;
                }
+
+               off = offset + sg->offset;
                sg_bytes = min(tlen, sg->length - offset);
+               sg_bytes = min(sg_bytes,
+                              (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
+               page = sg_page(sg) + (off >> PAGE_SHIFT);
                if (using_sg) {
-                       get_page(sg_page(sg));
+                       get_page(page);
                        skb_fill_page_desc(fp_skb(fp),
                                           skb_shinfo(fp_skb(fp))->nr_frags,
-                                          sg_page(sg), sg->offset + offset,
-                                          sg_bytes);
+                                          page, off & ~PAGE_MASK, sg_bytes);
                        fp_skb(fp)->data_len += sg_bytes;
                        fr_len(fp) += sg_bytes;
                        fp_skb(fp)->truesize += PAGE_SIZE;
                } else {
-                       size_t off = offset + sg->offset;
-
                        /*
                         * The scatterlist item may be bigger than PAGE_SIZE,
                         * but we must not cross pages inside the kmap.
                         */
-                       sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
-                                                          (off & ~PAGE_MASK)));
-                       page_addr = kmap_atomic(sg_page(sg) +
-                                               (off >> PAGE_SHIFT),
-                                               KM_SOFTIRQ0);
+                       page_addr = kmap_atomic(page, KM_SOFTIRQ0);
                        memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
                               sg_bytes);
                        kunmap_atomic(page_addr, KM_SOFTIRQ0);
@@ -720,8 +762,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
                                      (size_t) ntohl(dd->ft_burst_len));
                if (!rc)
                        seq->rec_data = fsp->xfer_len;
-               else if (rc == -ENOMEM)
-                       fsp->state |= FC_SRB_NOMEM;
        } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
                /*
                 * received a DATA frame
@@ -951,7 +991,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
        struct scsi_cmnd *sc_cmd;
        unsigned long flags;
 
-       spin_lock_irqsave(lport->host->host_lock, flags);
+       spin_lock_irqsave(&si->scsi_queue_lock, flags);
 restart:
        list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
                sc_cmd = fsp->cmd;
@@ -962,7 +1002,7 @@ restart:
                        continue;
 
                fc_fcp_pkt_hold(fsp);
-               spin_unlock_irqrestore(lport->host->host_lock, flags);
+               spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
                if (!fc_fcp_lock_pkt(fsp)) {
                        fc_fcp_cleanup_cmd(fsp, error);
@@ -971,14 +1011,14 @@ restart:
                }
 
                fc_fcp_pkt_release(fsp);
-               spin_lock_irqsave(lport->host->host_lock, flags);
+               spin_lock_irqsave(&si->scsi_queue_lock, flags);
                /*
                 * while we dropped the lock multiple pkts could
                 * have been released, so we have to start over.
                 */
                goto restart;
        }
-       spin_unlock_irqrestore(lport->host->host_lock, flags);
+       spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 }
 
 /**
@@ -996,11 +1036,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
  * @fsp:   The FCP packet to send
  *
  * Return:  Zero for success and -1 for failure
- * Locks:   Called with the host lock and irqs disabled.
+ * Locks:   Called without locks held
  */
 static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
 {
        struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+       unsigned long flags;
        int rc;
 
        fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1010,13 +1051,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
        int_to_scsilun(fsp->cmd->device->lun,
                       (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
        memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
-       list_add_tail(&fsp->list, &si->scsi_pkt_queue);
 
-       spin_unlock_irq(lport->host->host_lock);
+       spin_lock_irqsave(&si->scsi_queue_lock, flags);
+       list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+       spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
        rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
-       spin_lock_irq(lport->host->host_lock);
-       if (rc)
+       if (unlikely(rc)) {
+               spin_lock_irqsave(&si->scsi_queue_lock, flags);
                list_del(&fsp->list);
+               spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+       }
 
        return rc;
 }
@@ -1713,6 +1757,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
        struct fcoe_dev_stats *stats;
 
        lport = shost_priv(sc_cmd->device->host);
+       spin_unlock_irq(lport->host->host_lock);
 
        rval = fc_remote_port_chkready(rport);
        if (rval) {
@@ -1734,6 +1779,8 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
        rpriv = rport->dd_data;
 
        if (!fc_fcp_lport_queue_ready(lport)) {
+               if (lport->qfull)
+                       fc_fcp_can_queue_ramp_down(lport);
                rc = SCSI_MLQUEUE_HOST_BUSY;
                goto out;
        }
@@ -1793,6 +1840,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
                rc = SCSI_MLQUEUE_HOST_BUSY;
        }
 out:
+       spin_lock_irq(lport->host->host_lock);
        return rc;
 }
 EXPORT_SYMBOL(fc_queuecommand);
@@ -1823,28 +1871,21 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 
        lport = fsp->lp;
        si = fc_get_scsi_internal(lport);
-       spin_lock_irqsave(lport->host->host_lock, flags);
-       if (!fsp->cmd) {
-               spin_unlock_irqrestore(lport->host->host_lock, flags);
+       if (!fsp->cmd)
                return;
-       }
 
        /*
-        * if a command timed out while we had to try and throttle IO
-        * and it is now getting cleaned up, then we are about to
-        * try again so clear the throttled flag incase we get more
-        * time outs.
+        * if can_queue ramp down is done then try can_queue ramp up
+        * since commands are completing now.
         */
-       if (si->throttled && fsp->state & FC_SRB_NOMEM)
-               si->throttled = 0;
+       if (si->last_can_queue_ramp_down_time)
+               fc_fcp_can_queue_ramp_up(lport);
 
        sc_cmd = fsp->cmd;
        fsp->cmd = NULL;
 
-       if (!sc_cmd->SCp.ptr) {
-               spin_unlock_irqrestore(lport->host->host_lock, flags);
+       if (!sc_cmd->SCp.ptr)
                return;
-       }
 
        CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
        switch (fsp->status_code) {
@@ -1906,10 +1947,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
                break;
        }
 
+       spin_lock_irqsave(&si->scsi_queue_lock, flags);
        list_del(&fsp->list);
+       spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
        sc_cmd->SCp.ptr = NULL;
        sc_cmd->scsi_done(sc_cmd);
-       spin_unlock_irqrestore(lport->host->host_lock, flags);
 
        /* release ref from initial allocation in queue command */
        fc_fcp_pkt_release(fsp);
@@ -1990,7 +2032,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
        fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
        if (fsp == NULL) {
                printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
-               sc_cmd->result = DID_NO_CONNECT << 16;
                goto out;
        }
 
@@ -2176,7 +2217,9 @@ int fc_fcp_init(struct fc_lport *lport)
        if (!si)
                return -ENOMEM;
        lport->scsi_priv = si;
+       si->max_can_queue = lport->host->can_queue;
        INIT_LIST_HEAD(&si->scsi_pkt_queue);
+       spin_lock_init(&si->scsi_queue_lock);
 
        si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
        if (!si->scsi_pkt_pool) {