[SCSI] qla2xxx: convert to use the data buffer accessors
[safe/jmp/linux-2.6] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2005 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
15 static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
16 static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
17 static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18 static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
19
20 /**
21  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
22  * @cmd: SCSI command
23  *
24  * Returns the proper CF_* direction based on CDB.
25  */
26 static inline uint16_t
27 qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
28 {
29         uint16_t cflags;
30
31         cflags = 0;
32
33         /* Set transfer direction */
34         if (cmd->sc_data_direction == DMA_TO_DEVICE)
35                 cflags = CF_WRITE;
36         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
37                 cflags = CF_READ;
38         return (cflags);
39 }
40
41 /**
42  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
43  * Continuation Type 0 IOCBs to allocate.
44  *
45  * @dsds: number of data segment decriptors needed
46  *
47  * Returns the number of IOCB entries needed to store @dsds.
48  */
49 uint16_t
50 qla2x00_calc_iocbs_32(uint16_t dsds)
51 {
52         uint16_t iocbs;
53
54         iocbs = 1;
55         if (dsds > 3) {
56                 iocbs += (dsds - 3) / 7;
57                 if ((dsds - 3) % 7)
58                         iocbs++;
59         }
60         return (iocbs);
61 }
62
63 /**
64  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
65  * Continuation Type 1 IOCBs to allocate.
66  *
67  * @dsds: number of data segment decriptors needed
68  *
69  * Returns the number of IOCB entries needed to store @dsds.
70  */
71 uint16_t
72 qla2x00_calc_iocbs_64(uint16_t dsds)
73 {
74         uint16_t iocbs;
75
76         iocbs = 1;
77         if (dsds > 2) {
78                 iocbs += (dsds - 2) / 5;
79                 if ((dsds - 2) % 5)
80                         iocbs++;
81         }
82         return (iocbs);
83 }
84
85 /**
86  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
87  * @ha: HA context
88  *
89  * Returns a pointer to the Continuation Type 0 IOCB packet.
90  */
91 static inline cont_entry_t *
92 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
93 {
94         cont_entry_t *cont_pkt;
95
96         /* Adjust ring index. */
97         ha->req_ring_index++;
98         if (ha->req_ring_index == ha->request_q_length) {
99                 ha->req_ring_index = 0;
100                 ha->request_ring_ptr = ha->request_ring;
101         } else {
102                 ha->request_ring_ptr++;
103         }
104
105         cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
106
107         /* Load packet defaults. */
108         *((uint32_t *)(&cont_pkt->entry_type)) =
109             __constant_cpu_to_le32(CONTINUE_TYPE);
110
111         return (cont_pkt);
112 }
113
114 /**
115  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116  * @ha: HA context
117  *
118  * Returns a pointer to the continuation type 1 IOCB packet.
119  */
120 static inline cont_a64_entry_t *
121 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
122 {
123         cont_a64_entry_t *cont_pkt;
124
125         /* Adjust ring index. */
126         ha->req_ring_index++;
127         if (ha->req_ring_index == ha->request_q_length) {
128                 ha->req_ring_index = 0;
129                 ha->request_ring_ptr = ha->request_ring;
130         } else {
131                 ha->request_ring_ptr++;
132         }
133
134         cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
135
136         /* Load packet defaults. */
137         *((uint32_t *)(&cont_pkt->entry_type)) =
138             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
139
140         return (cont_pkt);
141 }
142
143 /**
144  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
145  * capable IOCB types.
146  *
147  * @sp: SRB command to process
148  * @cmd_pkt: Command type 2 IOCB
149  * @tot_dsds: Total number of segments to transfer
150  */
151 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
152     uint16_t tot_dsds)
153 {
154         uint16_t        avail_dsds;
155         uint32_t        *cur_dsd;
156         scsi_qla_host_t *ha;
157         struct scsi_cmnd *cmd;
158         struct scatterlist *sg;
159         int i;
160
161         cmd = sp->cmd;
162
163         /* Update entry type to indicate Command Type 2 IOCB */
164         *((uint32_t *)(&cmd_pkt->entry_type)) =
165             __constant_cpu_to_le32(COMMAND_TYPE);
166
167         /* No data transfer */
168         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
169                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
170                 return;
171         }
172
173         ha = sp->ha;
174
175         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
176
177         /* Three DSDs are available in the Command Type 2 IOCB */
178         avail_dsds = 3;
179         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
180
181         /* Load data segments */
182
183         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
184                 cont_entry_t *cont_pkt;
185
186                 /* Allocate additional continuation packets? */
187                 if (avail_dsds == 0) {
188                         /*
189                          * Seven DSDs are available in the Continuation
190                          * Type 0 IOCB.
191                          */
192                         cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
193                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
194                         avail_dsds = 7;
195                 }
196
197                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
198                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
199                 avail_dsds--;
200         }
201 }
202
203 /**
204  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
205  * capable IOCB types.
206  *
207  * @sp: SRB command to process
208  * @cmd_pkt: Command type 3 IOCB
209  * @tot_dsds: Total number of segments to transfer
210  */
211 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
212     uint16_t tot_dsds)
213 {
214         uint16_t        avail_dsds;
215         uint32_t        *cur_dsd;
216         scsi_qla_host_t *ha;
217         struct scsi_cmnd *cmd;
218         struct scatterlist *sg;
219         int i;
220
221         cmd = sp->cmd;
222
223         /* Update entry type to indicate Command Type 3 IOCB */
224         *((uint32_t *)(&cmd_pkt->entry_type)) =
225             __constant_cpu_to_le32(COMMAND_A64_TYPE);
226
227         /* No data transfer */
228         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
229                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
230                 return;
231         }
232
233         ha = sp->ha;
234
235         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
236
237         /* Two DSDs are available in the Command Type 3 IOCB */
238         avail_dsds = 2;
239         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
240
241         /* Load data segments */
242         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
243                 dma_addr_t      sle_dma;
244                 cont_a64_entry_t *cont_pkt;
245
246                 /* Allocate additional continuation packets? */
247                 if (avail_dsds == 0) {
248                         /*
249                          * Five DSDs are available in the Continuation
250                          * Type 1 IOCB.
251                          */
252                         cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
253                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
254                         avail_dsds = 5;
255                 }
256
257                 sle_dma = sg_dma_address(sg);
258                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
259                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
260                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
261                 avail_dsds--;
262         }
263 }
264
265 /**
266  * qla2x00_start_scsi() - Send a SCSI command to the ISP
267  * @sp: command to send to the ISP
268  *
269  * Returns non-zero if a failure occured, else zero.
270  */
271 int
272 qla2x00_start_scsi(srb_t *sp)
273 {
274         int             ret, nseg;
275         unsigned long   flags;
276         scsi_qla_host_t *ha;
277         struct scsi_cmnd *cmd;
278         uint32_t        *clr_ptr;
279         uint32_t        index;
280         uint32_t        handle;
281         cmd_entry_t     *cmd_pkt;
282         uint16_t        cnt;
283         uint16_t        req_cnt;
284         uint16_t        tot_dsds;
285         struct device_reg_2xxx __iomem *reg;
286
287         /* Setup device pointers. */
288         ret = 0;
289         ha = sp->ha;
290         reg = &ha->iobase->isp;
291         cmd = sp->cmd;
292         /* So we know we haven't pci_map'ed anything yet */
293         tot_dsds = 0;
294
295         /* Send marker if required */
296         if (ha->marker_needed != 0) {
297                 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
298                         return (QLA_FUNCTION_FAILED);
299                 }
300                 ha->marker_needed = 0;
301         }
302
303         /* Acquire ring specific lock */
304         spin_lock_irqsave(&ha->hardware_lock, flags);
305
306         /* Check for room in outstanding command list. */
307         handle = ha->current_outstanding_cmd;
308         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
309                 handle++;
310                 if (handle == MAX_OUTSTANDING_COMMANDS)
311                         handle = 1;
312                 if (ha->outstanding_cmds[handle] == 0)
313                         break;
314         }
315         if (index == MAX_OUTSTANDING_COMMANDS)
316                 goto queuing_error;
317
318         /* Map the sg table so we have an accurate count of sg entries needed */
319         nseg = scsi_dma_map(cmd);
320         if (nseg < 0)
321                 goto queuing_error;
322         tot_dsds = nseg;
323
324         /* Calculate the number of request entries needed. */
325         req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
326         if (ha->req_q_cnt < (req_cnt + 2)) {
327                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
328                 if (ha->req_ring_index < cnt)
329                         ha->req_q_cnt = cnt - ha->req_ring_index;
330                 else
331                         ha->req_q_cnt = ha->request_q_length -
332                             (ha->req_ring_index - cnt);
333         }
334         if (ha->req_q_cnt < (req_cnt + 2))
335                 goto queuing_error;
336
337         /* Build command packet */
338         ha->current_outstanding_cmd = handle;
339         ha->outstanding_cmds[handle] = sp;
340         sp->ha = ha;
341         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
342         ha->req_q_cnt -= req_cnt;
343
344         cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
345         cmd_pkt->handle = handle;
346         /* Zero out remaining portion of packet. */
347         clr_ptr = (uint32_t *)cmd_pkt + 2;
348         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
349         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
350
351         /* Set target ID and LUN number*/
352         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
353         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
354
355         /* Update tagged queuing modifier */
356         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
357
358         /* Load SCSI command packet. */
359         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
360         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
361
362         /* Build IOCB segments */
363         ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
364
365         /* Set total data segment count. */
366         cmd_pkt->entry_count = (uint8_t)req_cnt;
367         wmb();
368
369         /* Adjust ring index. */
370         ha->req_ring_index++;
371         if (ha->req_ring_index == ha->request_q_length) {
372                 ha->req_ring_index = 0;
373                 ha->request_ring_ptr = ha->request_ring;
374         } else
375                 ha->request_ring_ptr++;
376
377         sp->flags |= SRB_DMA_VALID;
378
379         /* Set chip new ring index. */
380         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
381         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
382
383         /* Manage unprocessed RIO/ZIO commands in response queue. */
384         if (ha->flags.process_response_queue &&
385             ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
386                 qla2x00_process_response_queue(ha);
387
388         spin_unlock_irqrestore(&ha->hardware_lock, flags);
389         return (QLA_SUCCESS);
390
391 queuing_error:
392         if (tot_dsds)
393                 scsi_dma_unmap(cmd);
394
395         spin_unlock_irqrestore(&ha->hardware_lock, flags);
396
397         return (QLA_FUNCTION_FAILED);
398 }
399
400 /**
401  * qla2x00_marker() - Send a marker IOCB to the firmware.
402  * @ha: HA context
403  * @loop_id: loop ID
404  * @lun: LUN
405  * @type: marker modifier
406  *
407  * Can be called from both normal and interrupt context.
408  *
409  * Returns non-zero if a failure occured, else zero.
410  */
411 int
412 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
413     uint8_t type)
414 {
415         mrk_entry_t *mrk;
416         struct mrk_entry_24xx *mrk24;
417
418         mrk24 = NULL;
419         mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
420         if (mrk == NULL) {
421                 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
422                     __func__, ha->host_no));
423
424                 return (QLA_FUNCTION_FAILED);
425         }
426
427         mrk->entry_type = MARKER_TYPE;
428         mrk->modifier = type;
429         if (type != MK_SYNC_ALL) {
430                 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
431                         mrk24 = (struct mrk_entry_24xx *) mrk;
432                         mrk24->nport_handle = cpu_to_le16(loop_id);
433                         mrk24->lun[1] = LSB(lun);
434                         mrk24->lun[2] = MSB(lun);
435                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
436                 } else {
437                         SET_TARGET_ID(ha, mrk->target, loop_id);
438                         mrk->lun = cpu_to_le16(lun);
439                 }
440         }
441         wmb();
442
443         qla2x00_isp_cmd(ha);
444
445         return (QLA_SUCCESS);
446 }
447
448 int
449 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
450     uint8_t type)
451 {
452         int ret;
453         unsigned long flags = 0;
454
455         spin_lock_irqsave(&ha->hardware_lock, flags);
456         ret = __qla2x00_marker(ha, loop_id, lun, type);
457         spin_unlock_irqrestore(&ha->hardware_lock, flags);
458
459         return (ret);
460 }
461
462 /**
463  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
464  * @ha: HA context
465  *
466  * Note: The caller must hold the hardware lock before calling this routine.
467  *
468  * Returns NULL if function failed, else, a pointer to the request packet.
469  */
470 static request_t *
471 qla2x00_req_pkt(scsi_qla_host_t *ha)
472 {
473         device_reg_t __iomem *reg = ha->iobase;
474         request_t       *pkt = NULL;
475         uint16_t        cnt;
476         uint32_t        *dword_ptr;
477         uint32_t        timer;
478         uint16_t        req_cnt = 1;
479
480         /* Wait 1 second for slot. */
481         for (timer = HZ; timer; timer--) {
482                 if ((req_cnt + 2) >= ha->req_q_cnt) {
483                         /* Calculate number of free request entries. */
484                         if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
485                                 cnt = (uint16_t)RD_REG_DWORD(
486                                     &reg->isp24.req_q_out);
487                         else
488                                 cnt = qla2x00_debounce_register(
489                                     ISP_REQ_Q_OUT(ha, &reg->isp));
490                         if  (ha->req_ring_index < cnt)
491                                 ha->req_q_cnt = cnt - ha->req_ring_index;
492                         else
493                                 ha->req_q_cnt = ha->request_q_length -
494                                     (ha->req_ring_index - cnt);
495                 }
496                 /* If room for request in request ring. */
497                 if ((req_cnt + 2) < ha->req_q_cnt) {
498                         ha->req_q_cnt--;
499                         pkt = ha->request_ring_ptr;
500
501                         /* Zero out packet. */
502                         dword_ptr = (uint32_t *)pkt;
503                         for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
504                                 *dword_ptr++ = 0;
505
506                         /* Set system defined field. */
507                         pkt->sys_define = (uint8_t)ha->req_ring_index;
508
509                         /* Set entry count. */
510                         pkt->entry_count = 1;
511
512                         break;
513                 }
514
515                 /* Release ring specific lock */
516                 spin_unlock(&ha->hardware_lock);
517
518                 udelay(2);   /* 2 us */
519
520                 /* Check for pending interrupts. */
521                 /* During init we issue marker directly */
522                 if (!ha->marker_needed)
523                         qla2x00_poll(ha);
524
525                 spin_lock_irq(&ha->hardware_lock);
526         }
527         if (!pkt) {
528                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
529         }
530
531         return (pkt);
532 }
533
534 /**
535  * qla2x00_isp_cmd() - Modify the request ring pointer.
536  * @ha: HA context
537  *
538  * Note: The caller must hold the hardware lock before calling this routine.
539  */
540 static void
541 qla2x00_isp_cmd(scsi_qla_host_t *ha)
542 {
543         device_reg_t __iomem *reg = ha->iobase;
544
545         DEBUG5(printk("%s(): IOCB data:\n", __func__));
546         DEBUG5(qla2x00_dump_buffer(
547             (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
548
549         /* Adjust ring index. */
550         ha->req_ring_index++;
551         if (ha->req_ring_index == ha->request_q_length) {
552                 ha->req_ring_index = 0;
553                 ha->request_ring_ptr = ha->request_ring;
554         } else
555                 ha->request_ring_ptr++;
556
557         /* Set chip new ring index. */
558         if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
559                 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
560                 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
561         } else {
562                 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
563                 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
564         }
565
566 }
567
568 /**
569  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
570  * Continuation Type 1 IOCBs to allocate.
571  *
572  * @dsds: number of data segment decriptors needed
573  *
574  * Returns the number of IOCB entries needed to store @dsds.
575  */
576 static inline uint16_t
577 qla24xx_calc_iocbs(uint16_t dsds)
578 {
579         uint16_t iocbs;
580
581         iocbs = 1;
582         if (dsds > 1) {
583                 iocbs += (dsds - 1) / 5;
584                 if ((dsds - 1) % 5)
585                         iocbs++;
586         }
587         return iocbs;
588 }
589
590 /**
591  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
592  * IOCB types.
593  *
594  * @sp: SRB command to process
595  * @cmd_pkt: Command type 3 IOCB
596  * @tot_dsds: Total number of segments to transfer
597  */
598 static inline void
599 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
600     uint16_t tot_dsds)
601 {
602         uint16_t        avail_dsds;
603         uint32_t        *cur_dsd;
604         scsi_qla_host_t *ha;
605         struct scsi_cmnd *cmd;
606         struct scatterlist *sg;
607         int i;
608
609         cmd = sp->cmd;
610
611         /* Update entry type to indicate Command Type 3 IOCB */
612         *((uint32_t *)(&cmd_pkt->entry_type)) =
613             __constant_cpu_to_le32(COMMAND_TYPE_7);
614
615         /* No data transfer */
616         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
617                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
618                 return;
619         }
620
621         ha = sp->ha;
622
623         /* Set transfer direction */
624         if (cmd->sc_data_direction == DMA_TO_DEVICE)
625                 cmd_pkt->task_mgmt_flags =
626                     __constant_cpu_to_le16(TMF_WRITE_DATA);
627         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
628                 cmd_pkt->task_mgmt_flags =
629                     __constant_cpu_to_le16(TMF_READ_DATA);
630
631         /* One DSD is available in the Command Type 3 IOCB */
632         avail_dsds = 1;
633         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
634
635         /* Load data segments */
636
637         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
638                 dma_addr_t      sle_dma;
639                 cont_a64_entry_t *cont_pkt;
640
641                 /* Allocate additional continuation packets? */
642                 if (avail_dsds == 0) {
643                         /*
644                          * Five DSDs are available in the Continuation
645                          * Type 1 IOCB.
646                          */
647                         cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
648                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
649                         avail_dsds = 5;
650                 }
651
652                 sle_dma = sg_dma_address(sg);
653                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
654                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
655                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
656                 avail_dsds--;
657         }
658 }
659
660
661 /**
662  * qla24xx_start_scsi() - Send a SCSI command to the ISP
663  * @sp: command to send to the ISP
664  *
665  * Returns non-zero if a failure occured, else zero.
666  */
667 int
668 qla24xx_start_scsi(srb_t *sp)
669 {
670         int             ret, nseg;
671         unsigned long   flags;
672         scsi_qla_host_t *ha;
673         struct scsi_cmnd *cmd;
674         uint32_t        *clr_ptr;
675         uint32_t        index;
676         uint32_t        handle;
677         struct cmd_type_7 *cmd_pkt;
678         uint16_t        cnt;
679         uint16_t        req_cnt;
680         uint16_t        tot_dsds;
681         struct device_reg_24xx __iomem *reg;
682
683         /* Setup device pointers. */
684         ret = 0;
685         ha = sp->ha;
686         reg = &ha->iobase->isp24;
687         cmd = sp->cmd;
688         /* So we know we haven't pci_map'ed anything yet */
689         tot_dsds = 0;
690
691         /* Send marker if required */
692         if (ha->marker_needed != 0) {
693                 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
694                         return QLA_FUNCTION_FAILED;
695                 }
696                 ha->marker_needed = 0;
697         }
698
699         /* Acquire ring specific lock */
700         spin_lock_irqsave(&ha->hardware_lock, flags);
701
702         /* Check for room in outstanding command list. */
703         handle = ha->current_outstanding_cmd;
704         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
705                 handle++;
706                 if (handle == MAX_OUTSTANDING_COMMANDS)
707                         handle = 1;
708                 if (ha->outstanding_cmds[handle] == 0)
709                         break;
710         }
711         if (index == MAX_OUTSTANDING_COMMANDS)
712                 goto queuing_error;
713
714         /* Map the sg table so we have an accurate count of sg entries needed */
715         nseg = scsi_dma_map(cmd);
716         if (nseg < 0)
717                         goto queuing_error;
718         tot_dsds = nseg;
719
720         req_cnt = qla24xx_calc_iocbs(tot_dsds);
721         if (ha->req_q_cnt < (req_cnt + 2)) {
722                 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
723                 if (ha->req_ring_index < cnt)
724                         ha->req_q_cnt = cnt - ha->req_ring_index;
725                 else
726                         ha->req_q_cnt = ha->request_q_length -
727                                 (ha->req_ring_index - cnt);
728         }
729         if (ha->req_q_cnt < (req_cnt + 2))
730                 goto queuing_error;
731
732         /* Build command packet. */
733         ha->current_outstanding_cmd = handle;
734         ha->outstanding_cmds[handle] = sp;
735         sp->ha = ha;
736         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
737         ha->req_q_cnt -= req_cnt;
738
739         cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
740         cmd_pkt->handle = handle;
741
742         /* Zero out remaining portion of packet. */
743         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
744         clr_ptr = (uint32_t *)cmd_pkt + 2;
745         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
746         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
747
748         /* Set NPORT-ID and LUN number*/
749         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
750         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
751         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
752         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
753
754         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
755         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
756
757         /* Load SCSI command packet. */
758         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
759         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
760
761         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
762
763         /* Build IOCB segments */
764         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
765
766         /* Set total data segment count. */
767         cmd_pkt->entry_count = (uint8_t)req_cnt;
768         wmb();
769
770         /* Adjust ring index. */
771         ha->req_ring_index++;
772         if (ha->req_ring_index == ha->request_q_length) {
773                 ha->req_ring_index = 0;
774                 ha->request_ring_ptr = ha->request_ring;
775         } else
776                 ha->request_ring_ptr++;
777
778         sp->flags |= SRB_DMA_VALID;
779
780         /* Set chip new ring index. */
781         WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
782         RD_REG_DWORD_RELAXED(&reg->req_q_in);           /* PCI Posting. */
783
784         /* Manage unprocessed RIO/ZIO commands in response queue. */
785         if (ha->flags.process_response_queue &&
786             ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
787                 qla24xx_process_response_queue(ha);
788
789         spin_unlock_irqrestore(&ha->hardware_lock, flags);
790         return QLA_SUCCESS;
791
792 queuing_error:
793         if (tot_dsds)
794                 scsi_dma_unmap(cmd);
795
796         spin_unlock_irqrestore(&ha->hardware_lock, flags);
797
798         return QLA_FUNCTION_FAILED;
799 }