include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / scsi / qla2xxx / qla_attr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13
14 static int qla24xx_vport_disable(struct fc_vport *, bool);
15 static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
16 int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17 static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
18 /* SYSFS attributes --------------------------------------------------------- */
19
20 static ssize_t
21 qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
22                            struct bin_attribute *bin_attr,
23                            char *buf, loff_t off, size_t count)
24 {
25         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
26             struct device, kobj)));
27         struct qla_hw_data *ha = vha->hw;
28
29         if (ha->fw_dump_reading == 0)
30                 return 0;
31
32         return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
33                                         ha->fw_dump_len);
34 }
35
36 static ssize_t
37 qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
38                             struct bin_attribute *bin_attr,
39                             char *buf, loff_t off, size_t count)
40 {
41         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
42             struct device, kobj)));
43         struct qla_hw_data *ha = vha->hw;
44         int reading;
45
46         if (off != 0)
47                 return (0);
48
49         reading = simple_strtol(buf, NULL, 10);
50         switch (reading) {
51         case 0:
52                 if (!ha->fw_dump_reading)
53                         break;
54
55                 qla_printk(KERN_INFO, ha,
56                     "Firmware dump cleared on (%ld).\n", vha->host_no);
57
58                 ha->fw_dump_reading = 0;
59                 ha->fw_dumped = 0;
60                 break;
61         case 1:
62                 if (ha->fw_dumped && !ha->fw_dump_reading) {
63                         ha->fw_dump_reading = 1;
64
65                         qla_printk(KERN_INFO, ha,
66                             "Raw firmware dump ready for read on (%ld).\n",
67                             vha->host_no);
68                 }
69                 break;
70         case 2:
71                 qla2x00_alloc_fw_dump(vha);
72                 break;
73         case 3:
74                 qla2x00_system_error(vha);
75                 break;
76         }
77         return (count);
78 }
79
80 static struct bin_attribute sysfs_fw_dump_attr = {
81         .attr = {
82                 .name = "fw_dump",
83                 .mode = S_IRUSR | S_IWUSR,
84         },
85         .size = 0,
86         .read = qla2x00_sysfs_read_fw_dump,
87         .write = qla2x00_sysfs_write_fw_dump,
88 };
89
90 static ssize_t
91 qla2x00_sysfs_read_nvram(struct kobject *kobj,
92                          struct bin_attribute *bin_attr,
93                          char *buf, loff_t off, size_t count)
94 {
95         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
96             struct device, kobj)));
97         struct qla_hw_data *ha = vha->hw;
98
99         if (!capable(CAP_SYS_ADMIN))
100                 return 0;
101
102         if (IS_NOCACHE_VPD_TYPE(ha))
103                 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
104                     ha->nvram_size);
105         return memory_read_from_buffer(buf, count, &off, ha->nvram,
106                                         ha->nvram_size);
107 }
108
109 static ssize_t
110 qla2x00_sysfs_write_nvram(struct kobject *kobj,
111                           struct bin_attribute *bin_attr,
112                           char *buf, loff_t off, size_t count)
113 {
114         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
115             struct device, kobj)));
116         struct qla_hw_data *ha = vha->hw;
117         uint16_t        cnt;
118
119         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
120             !ha->isp_ops->write_nvram)
121                 return 0;
122
123         /* Checksum NVRAM. */
124         if (IS_FWI2_CAPABLE(ha)) {
125                 uint32_t *iter;
126                 uint32_t chksum;
127
128                 iter = (uint32_t *)buf;
129                 chksum = 0;
130                 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
131                         chksum += le32_to_cpu(*iter++);
132                 chksum = ~chksum + 1;
133                 *iter = cpu_to_le32(chksum);
134         } else {
135                 uint8_t *iter;
136                 uint8_t chksum;
137
138                 iter = (uint8_t *)buf;
139                 chksum = 0;
140                 for (cnt = 0; cnt < count - 1; cnt++)
141                         chksum += *iter++;
142                 chksum = ~chksum + 1;
143                 *iter = chksum;
144         }
145
146         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
147                 qla_printk(KERN_WARNING, ha,
148                     "HBA not online, failing NVRAM update.\n");
149                 return -EAGAIN;
150         }
151
152         /* Write NVRAM. */
153         ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
154         ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
155             count);
156
157         /* NVRAM settings take effect immediately. */
158         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
159         qla2xxx_wake_dpc(vha);
160         qla2x00_wait_for_chip_reset(vha);
161
162         return (count);
163 }
164
165 static struct bin_attribute sysfs_nvram_attr = {
166         .attr = {
167                 .name = "nvram",
168                 .mode = S_IRUSR | S_IWUSR,
169         },
170         .size = 512,
171         .read = qla2x00_sysfs_read_nvram,
172         .write = qla2x00_sysfs_write_nvram,
173 };
174
175 static ssize_t
176 qla2x00_sysfs_read_optrom(struct kobject *kobj,
177                           struct bin_attribute *bin_attr,
178                           char *buf, loff_t off, size_t count)
179 {
180         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
181             struct device, kobj)));
182         struct qla_hw_data *ha = vha->hw;
183
184         if (ha->optrom_state != QLA_SREADING)
185                 return 0;
186
187         return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
188                                         ha->optrom_region_size);
189 }
190
191 static ssize_t
192 qla2x00_sysfs_write_optrom(struct kobject *kobj,
193                            struct bin_attribute *bin_attr,
194                            char *buf, loff_t off, size_t count)
195 {
196         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
197             struct device, kobj)));
198         struct qla_hw_data *ha = vha->hw;
199
200         if (ha->optrom_state != QLA_SWRITING)
201                 return -EINVAL;
202         if (off > ha->optrom_region_size)
203                 return -ERANGE;
204         if (off + count > ha->optrom_region_size)
205                 count = ha->optrom_region_size - off;
206
207         memcpy(&ha->optrom_buffer[off], buf, count);
208
209         return count;
210 }
211
212 static struct bin_attribute sysfs_optrom_attr = {
213         .attr = {
214                 .name = "optrom",
215                 .mode = S_IRUSR | S_IWUSR,
216         },
217         .size = 0,
218         .read = qla2x00_sysfs_read_optrom,
219         .write = qla2x00_sysfs_write_optrom,
220 };
221
222 static ssize_t
223 qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
224                                struct bin_attribute *bin_attr,
225                                char *buf, loff_t off, size_t count)
226 {
227         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
228             struct device, kobj)));
229         struct qla_hw_data *ha = vha->hw;
230
231         uint32_t start = 0;
232         uint32_t size = ha->optrom_size;
233         int val, valid;
234
235         if (off)
236                 return 0;
237
238         if (unlikely(pci_channel_offline(ha->pdev)))
239                 return 0;
240
241         if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
242                 return -EINVAL;
243         if (start > ha->optrom_size)
244                 return -EINVAL;
245
246         switch (val) {
247         case 0:
248                 if (ha->optrom_state != QLA_SREADING &&
249                     ha->optrom_state != QLA_SWRITING)
250                         break;
251
252                 ha->optrom_state = QLA_SWAITING;
253
254                 DEBUG2(qla_printk(KERN_INFO, ha,
255                     "Freeing flash region allocation -- 0x%x bytes.\n",
256                     ha->optrom_region_size));
257
258                 vfree(ha->optrom_buffer);
259                 ha->optrom_buffer = NULL;
260                 break;
261         case 1:
262                 if (ha->optrom_state != QLA_SWAITING)
263                         break;
264
265                 ha->optrom_region_start = start;
266                 ha->optrom_region_size = start + size > ha->optrom_size ?
267                     ha->optrom_size - start : size;
268
269                 ha->optrom_state = QLA_SREADING;
270                 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
271                 if (ha->optrom_buffer == NULL) {
272                         qla_printk(KERN_WARNING, ha,
273                             "Unable to allocate memory for optrom retrieval "
274                             "(%x).\n", ha->optrom_region_size);
275
276                         ha->optrom_state = QLA_SWAITING;
277                         return count;
278                 }
279
280                 DEBUG2(qla_printk(KERN_INFO, ha,
281                     "Reading flash region -- 0x%x/0x%x.\n",
282                     ha->optrom_region_start, ha->optrom_region_size));
283
284                 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
285                 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
286                     ha->optrom_region_start, ha->optrom_region_size);
287                 break;
288         case 2:
289                 if (ha->optrom_state != QLA_SWAITING)
290                         break;
291
292                 /*
293                  * We need to be more restrictive on which FLASH regions are
294                  * allowed to be updated via user-space.  Regions accessible
295                  * via this method include:
296                  *
297                  * ISP21xx/ISP22xx/ISP23xx type boards:
298                  *
299                  *      0x000000 -> 0x020000 -- Boot code.
300                  *
301                  * ISP2322/ISP24xx type boards:
302                  *
303                  *      0x000000 -> 0x07ffff -- Boot code.
304                  *      0x080000 -> 0x0fffff -- Firmware.
305                  *
306                  * ISP25xx type boards:
307                  *
308                  *      0x000000 -> 0x07ffff -- Boot code.
309                  *      0x080000 -> 0x0fffff -- Firmware.
310                  *      0x120000 -> 0x12ffff -- VPD and HBA parameters.
311                  */
312                 valid = 0;
313                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
314                         valid = 1;
315                 else if (start == (ha->flt_region_boot * 4) ||
316                     start == (ha->flt_region_fw * 4))
317                         valid = 1;
318                 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
319                     valid = 1;
320                 if (!valid) {
321                         qla_printk(KERN_WARNING, ha,
322                             "Invalid start region 0x%x/0x%x.\n", start, size);
323                         return -EINVAL;
324                 }
325
326                 ha->optrom_region_start = start;
327                 ha->optrom_region_size = start + size > ha->optrom_size ?
328                     ha->optrom_size - start : size;
329
330                 ha->optrom_state = QLA_SWRITING;
331                 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
332                 if (ha->optrom_buffer == NULL) {
333                         qla_printk(KERN_WARNING, ha,
334                             "Unable to allocate memory for optrom update "
335                             "(%x).\n", ha->optrom_region_size);
336
337                         ha->optrom_state = QLA_SWAITING;
338                         return count;
339                 }
340
341                 DEBUG2(qla_printk(KERN_INFO, ha,
342                     "Staging flash region write -- 0x%x/0x%x.\n",
343                     ha->optrom_region_start, ha->optrom_region_size));
344
345                 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
346                 break;
347         case 3:
348                 if (ha->optrom_state != QLA_SWRITING)
349                         break;
350
351                 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
352                         qla_printk(KERN_WARNING, ha,
353                             "HBA not online, failing flash update.\n");
354                         return -EAGAIN;
355                 }
356
357                 DEBUG2(qla_printk(KERN_INFO, ha,
358                     "Writing flash region -- 0x%x/0x%x.\n",
359                     ha->optrom_region_start, ha->optrom_region_size));
360
361                 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
362                     ha->optrom_region_start, ha->optrom_region_size);
363                 break;
364         default:
365                 count = -EINVAL;
366         }
367         return count;
368 }
369
370 static struct bin_attribute sysfs_optrom_ctl_attr = {
371         .attr = {
372                 .name = "optrom_ctl",
373                 .mode = S_IWUSR,
374         },
375         .size = 0,
376         .write = qla2x00_sysfs_write_optrom_ctl,
377 };
378
379 static ssize_t
380 qla2x00_sysfs_read_vpd(struct kobject *kobj,
381                        struct bin_attribute *bin_attr,
382                        char *buf, loff_t off, size_t count)
383 {
384         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
385             struct device, kobj)));
386         struct qla_hw_data *ha = vha->hw;
387
388         if (unlikely(pci_channel_offline(ha->pdev)))
389                 return 0;
390
391         if (!capable(CAP_SYS_ADMIN))
392                 return 0;
393
394         if (IS_NOCACHE_VPD_TYPE(ha))
395                 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
396                     ha->vpd_size);
397         return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
398 }
399
400 static ssize_t
401 qla2x00_sysfs_write_vpd(struct kobject *kobj,
402                         struct bin_attribute *bin_attr,
403                         char *buf, loff_t off, size_t count)
404 {
405         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
406             struct device, kobj)));
407         struct qla_hw_data *ha = vha->hw;
408         uint8_t *tmp_data;
409
410         if (unlikely(pci_channel_offline(ha->pdev)))
411                 return 0;
412
413         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
414             !ha->isp_ops->write_nvram)
415                 return 0;
416
417         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
418                 qla_printk(KERN_WARNING, ha,
419                     "HBA not online, failing VPD update.\n");
420                 return -EAGAIN;
421         }
422
423         /* Write NVRAM. */
424         ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
425         ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
426
427         /* Update flash version information for 4Gb & above. */
428         if (!IS_FWI2_CAPABLE(ha))
429                 goto done;
430
431         tmp_data = vmalloc(256);
432         if (!tmp_data) {
433                 qla_printk(KERN_WARNING, ha,
434                     "Unable to allocate memory for VPD information update.\n");
435                 goto done;
436         }
437         ha->isp_ops->get_flash_version(vha, tmp_data);
438         vfree(tmp_data);
439 done:
440         return count;
441 }
442
443 static struct bin_attribute sysfs_vpd_attr = {
444         .attr = {
445                 .name = "vpd",
446                 .mode = S_IRUSR | S_IWUSR,
447         },
448         .size = 0,
449         .read = qla2x00_sysfs_read_vpd,
450         .write = qla2x00_sysfs_write_vpd,
451 };
452
453 static ssize_t
454 qla2x00_sysfs_read_sfp(struct kobject *kobj,
455                        struct bin_attribute *bin_attr,
456                        char *buf, loff_t off, size_t count)
457 {
458         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
459             struct device, kobj)));
460         struct qla_hw_data *ha = vha->hw;
461         uint16_t iter, addr, offset;
462         int rval;
463
464         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
465                 return 0;
466
467         if (ha->sfp_data)
468                 goto do_read;
469
470         ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
471             &ha->sfp_data_dma);
472         if (!ha->sfp_data) {
473                 qla_printk(KERN_WARNING, ha,
474                     "Unable to allocate memory for SFP read-data.\n");
475                 return 0;
476         }
477
478 do_read:
479         memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
480         addr = 0xa0;
481         for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
482             iter++, offset += SFP_BLOCK_SIZE) {
483                 if (iter == 4) {
484                         /* Skip to next device address. */
485                         addr = 0xa2;
486                         offset = 0;
487                 }
488
489                 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
490                     SFP_BLOCK_SIZE);
491                 if (rval != QLA_SUCCESS) {
492                         qla_printk(KERN_WARNING, ha,
493                             "Unable to read SFP data (%x/%x/%x).\n", rval,
494                             addr, offset);
495                         count = 0;
496                         break;
497                 }
498                 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
499                 buf += SFP_BLOCK_SIZE;
500         }
501
502         return count;
503 }
504
505 static struct bin_attribute sysfs_sfp_attr = {
506         .attr = {
507                 .name = "sfp",
508                 .mode = S_IRUSR | S_IWUSR,
509         },
510         .size = SFP_DEV_SIZE * 2,
511         .read = qla2x00_sysfs_read_sfp,
512 };
513
514 static ssize_t
515 qla2x00_sysfs_write_reset(struct kobject *kobj,
516                         struct bin_attribute *bin_attr,
517                         char *buf, loff_t off, size_t count)
518 {
519         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
520             struct device, kobj)));
521         struct qla_hw_data *ha = vha->hw;
522         int type;
523
524         if (off != 0)
525                 return 0;
526
527         type = simple_strtol(buf, NULL, 10);
528         switch (type) {
529         case 0x2025c:
530                 qla_printk(KERN_INFO, ha,
531                     "Issuing ISP reset on (%ld).\n", vha->host_no);
532
533                 scsi_block_requests(vha->host);
534                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
535                 qla2xxx_wake_dpc(vha);
536                 qla2x00_wait_for_chip_reset(vha);
537                 scsi_unblock_requests(vha->host);
538                 break;
539         case 0x2025d:
540                 if (!IS_QLA81XX(ha))
541                         break;
542
543                 qla_printk(KERN_INFO, ha,
544                     "Issuing MPI reset on (%ld).\n", vha->host_no);
545
546                 /* Make sure FC side is not in reset */
547                 qla2x00_wait_for_hba_online(vha);
548
549                 /* Issue MPI reset */
550                 scsi_block_requests(vha->host);
551                 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
552                         qla_printk(KERN_WARNING, ha,
553                             "MPI reset failed on (%ld).\n", vha->host_no);
554                 scsi_unblock_requests(vha->host);
555                 break;
556         }
557         return count;
558 }
559
560 static struct bin_attribute sysfs_reset_attr = {
561         .attr = {
562                 .name = "reset",
563                 .mode = S_IWUSR,
564         },
565         .size = 0,
566         .write = qla2x00_sysfs_write_reset,
567 };
568
569 static ssize_t
570 qla2x00_sysfs_write_edc(struct kobject *kobj,
571                         struct bin_attribute *bin_attr,
572                         char *buf, loff_t off, size_t count)
573 {
574         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
575             struct device, kobj)));
576         struct qla_hw_data *ha = vha->hw;
577         uint16_t dev, adr, opt, len;
578         int rval;
579
580         ha->edc_data_len = 0;
581
582         if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
583                 return 0;
584
585         if (!ha->edc_data) {
586                 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
587                     &ha->edc_data_dma);
588                 if (!ha->edc_data) {
589                         DEBUG2(qla_printk(KERN_INFO, ha,
590                             "Unable to allocate memory for EDC write.\n"));
591                         return 0;
592                 }
593         }
594
595         dev = le16_to_cpup((void *)&buf[0]);
596         adr = le16_to_cpup((void *)&buf[2]);
597         opt = le16_to_cpup((void *)&buf[4]);
598         len = le16_to_cpup((void *)&buf[6]);
599
600         if (!(opt & BIT_0))
601                 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
602                         return -EINVAL;
603
604         memcpy(ha->edc_data, &buf[8], len);
605
606         rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
607             ha->edc_data, len, opt);
608         if (rval != QLA_SUCCESS) {
609                 DEBUG2(qla_printk(KERN_INFO, ha,
610                     "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
611                     rval, dev, adr, opt, len, *buf));
612                 return 0;
613         }
614
615         return count;
616 }
617
618 static struct bin_attribute sysfs_edc_attr = {
619         .attr = {
620                 .name = "edc",
621                 .mode = S_IWUSR,
622         },
623         .size = 0,
624         .write = qla2x00_sysfs_write_edc,
625 };
626
627 static ssize_t
628 qla2x00_sysfs_write_edc_status(struct kobject *kobj,
629                         struct bin_attribute *bin_attr,
630                         char *buf, loff_t off, size_t count)
631 {
632         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
633             struct device, kobj)));
634         struct qla_hw_data *ha = vha->hw;
635         uint16_t dev, adr, opt, len;
636         int rval;
637
638         ha->edc_data_len = 0;
639
640         if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
641                 return 0;
642
643         if (!ha->edc_data) {
644                 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
645                     &ha->edc_data_dma);
646                 if (!ha->edc_data) {
647                         DEBUG2(qla_printk(KERN_INFO, ha,
648                             "Unable to allocate memory for EDC status.\n"));
649                         return 0;
650                 }
651         }
652
653         dev = le16_to_cpup((void *)&buf[0]);
654         adr = le16_to_cpup((void *)&buf[2]);
655         opt = le16_to_cpup((void *)&buf[4]);
656         len = le16_to_cpup((void *)&buf[6]);
657
658         if (!(opt & BIT_0))
659                 if (len == 0 || len > DMA_POOL_SIZE)
660                         return -EINVAL;
661
662         memset(ha->edc_data, 0, len);
663         rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
664             ha->edc_data, len, opt);
665         if (rval != QLA_SUCCESS) {
666                 DEBUG2(qla_printk(KERN_INFO, ha,
667                     "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
668                     rval, dev, adr, opt, len));
669                 return 0;
670         }
671
672         ha->edc_data_len = len;
673
674         return count;
675 }
676
677 static ssize_t
678 qla2x00_sysfs_read_edc_status(struct kobject *kobj,
679                            struct bin_attribute *bin_attr,
680                            char *buf, loff_t off, size_t count)
681 {
682         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
683             struct device, kobj)));
684         struct qla_hw_data *ha = vha->hw;
685
686         if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
687                 return 0;
688
689         if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
690                 return -EINVAL;
691
692         memcpy(buf, ha->edc_data, ha->edc_data_len);
693
694         return ha->edc_data_len;
695 }
696
697 static struct bin_attribute sysfs_edc_status_attr = {
698         .attr = {
699                 .name = "edc_status",
700                 .mode = S_IRUSR | S_IWUSR,
701         },
702         .size = 0,
703         .write = qla2x00_sysfs_write_edc_status,
704         .read = qla2x00_sysfs_read_edc_status,
705 };
706
707 static ssize_t
708 qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
709                        struct bin_attribute *bin_attr,
710                        char *buf, loff_t off, size_t count)
711 {
712         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
713             struct device, kobj)));
714         struct qla_hw_data *ha = vha->hw;
715         int rval;
716         uint16_t actual_size;
717
718         if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
719                 return 0;
720
721         if (ha->xgmac_data)
722                 goto do_read;
723
724         ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
725             &ha->xgmac_data_dma, GFP_KERNEL);
726         if (!ha->xgmac_data) {
727                 qla_printk(KERN_WARNING, ha,
728                     "Unable to allocate memory for XGMAC read-data.\n");
729                 return 0;
730         }
731
732 do_read:
733         actual_size = 0;
734         memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
735
736         rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
737             XGMAC_DATA_SIZE, &actual_size);
738         if (rval != QLA_SUCCESS) {
739                 qla_printk(KERN_WARNING, ha,
740                     "Unable to read XGMAC data (%x).\n", rval);
741                 count = 0;
742         }
743
744         count = actual_size > count ? count: actual_size;
745         memcpy(buf, ha->xgmac_data, count);
746
747         return count;
748 }
749
750 static struct bin_attribute sysfs_xgmac_stats_attr = {
751         .attr = {
752                 .name = "xgmac_stats",
753                 .mode = S_IRUSR,
754         },
755         .size = 0,
756         .read = qla2x00_sysfs_read_xgmac_stats,
757 };
758
759 static ssize_t
760 qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
761                        struct bin_attribute *bin_attr,
762                        char *buf, loff_t off, size_t count)
763 {
764         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
765             struct device, kobj)));
766         struct qla_hw_data *ha = vha->hw;
767         int rval;
768         uint16_t actual_size;
769
770         if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
771                 return 0;
772
773         if (ha->dcbx_tlv)
774                 goto do_read;
775
776         ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
777             &ha->dcbx_tlv_dma, GFP_KERNEL);
778         if (!ha->dcbx_tlv) {
779                 qla_printk(KERN_WARNING, ha,
780                     "Unable to allocate memory for DCBX TLV read-data.\n");
781                 return 0;
782         }
783
784 do_read:
785         actual_size = 0;
786         memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
787
788         rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
789             DCBX_TLV_DATA_SIZE);
790         if (rval != QLA_SUCCESS) {
791                 qla_printk(KERN_WARNING, ha,
792                     "Unable to read DCBX TLV data (%x).\n", rval);
793                 count = 0;
794         }
795
796         memcpy(buf, ha->dcbx_tlv, count);
797
798         return count;
799 }
800
801 static struct bin_attribute sysfs_dcbx_tlv_attr = {
802         .attr = {
803                 .name = "dcbx_tlv",
804                 .mode = S_IRUSR,
805         },
806         .size = 0,
807         .read = qla2x00_sysfs_read_dcbx_tlv,
808 };
809
810 static struct sysfs_entry {
811         char *name;
812         struct bin_attribute *attr;
813         int is4GBp_only;
814 } bin_file_entries[] = {
815         { "fw_dump", &sysfs_fw_dump_attr, },
816         { "nvram", &sysfs_nvram_attr, },
817         { "optrom", &sysfs_optrom_attr, },
818         { "optrom_ctl", &sysfs_optrom_ctl_attr, },
819         { "vpd", &sysfs_vpd_attr, 1 },
820         { "sfp", &sysfs_sfp_attr, 1 },
821         { "reset", &sysfs_reset_attr, },
822         { "edc", &sysfs_edc_attr, 2 },
823         { "edc_status", &sysfs_edc_status_attr, 2 },
824         { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
825         { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
826         { NULL },
827 };
828
829 void
830 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
831 {
832         struct Scsi_Host *host = vha->host;
833         struct sysfs_entry *iter;
834         int ret;
835
836         for (iter = bin_file_entries; iter->name; iter++) {
837                 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
838                         continue;
839                 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
840                         continue;
841                 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
842                         continue;
843
844                 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
845                     iter->attr);
846                 if (ret)
847                         qla_printk(KERN_INFO, vha->hw,
848                             "Unable to create sysfs %s binary attribute "
849                             "(%d).\n", iter->name, ret);
850         }
851 }
852
853 void
854 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
855 {
856         struct Scsi_Host *host = vha->host;
857         struct sysfs_entry *iter;
858         struct qla_hw_data *ha = vha->hw;
859
860         for (iter = bin_file_entries; iter->name; iter++) {
861                 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
862                         continue;
863                 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
864                         continue;
865                 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
866                         continue;
867
868                 sysfs_remove_bin_file(&host->shost_gendev.kobj,
869                     iter->attr);
870         }
871
872         if (ha->beacon_blink_led == 1)
873                 ha->isp_ops->beacon_off(vha);
874 }
875
876 /* Scsi_Host attributes. */
877
878 static ssize_t
879 qla2x00_drvr_version_show(struct device *dev,
880                           struct device_attribute *attr, char *buf)
881 {
882         return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
883 }
884
885 static ssize_t
886 qla2x00_fw_version_show(struct device *dev,
887                         struct device_attribute *attr, char *buf)
888 {
889         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
890         struct qla_hw_data *ha = vha->hw;
891         char fw_str[128];
892
893         return snprintf(buf, PAGE_SIZE, "%s\n",
894             ha->isp_ops->fw_version_str(vha, fw_str));
895 }
896
897 static ssize_t
898 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
899                         char *buf)
900 {
901         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
902         struct qla_hw_data *ha = vha->hw;
903         uint32_t sn;
904
905         if (IS_FWI2_CAPABLE(ha)) {
906                 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
907                 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
908         }
909
910         sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
911         return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
912             sn % 100000);
913 }
914
915 static ssize_t
916 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
917                       char *buf)
918 {
919         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
920         return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
921 }
922
923 static ssize_t
924 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
925                     char *buf)
926 {
927         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
928         struct qla_hw_data *ha = vha->hw;
929         return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
930             ha->product_id[0], ha->product_id[1], ha->product_id[2],
931             ha->product_id[3]);
932 }
933
934 static ssize_t
935 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
936                         char *buf)
937 {
938         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
939         return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
940 }
941
942 static ssize_t
943 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
944                         char *buf)
945 {
946         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
947         return snprintf(buf, PAGE_SIZE, "%s\n",
948             vha->hw->model_desc ? vha->hw->model_desc : "");
949 }
950
951 static ssize_t
952 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
953                       char *buf)
954 {
955         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
956         char pci_info[30];
957
958         return snprintf(buf, PAGE_SIZE, "%s\n",
959             vha->hw->isp_ops->pci_info_str(vha, pci_info));
960 }
961
962 static ssize_t
963 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
964                         char *buf)
965 {
966         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
967         struct qla_hw_data *ha = vha->hw;
968         int len = 0;
969
970         if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
971             atomic_read(&vha->loop_state) == LOOP_DEAD)
972                 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
973         else if (atomic_read(&vha->loop_state) != LOOP_READY ||
974             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
975             test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
976                 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
977         else {
978                 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
979
980                 switch (ha->current_topology) {
981                 case ISP_CFG_NL:
982                         len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
983                         break;
984                 case ISP_CFG_FL:
985                         len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
986                         break;
987                 case ISP_CFG_N:
988                         len += snprintf(buf + len, PAGE_SIZE-len,
989                             "N_Port to N_Port\n");
990                         break;
991                 case ISP_CFG_F:
992                         len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
993                         break;
994                 default:
995                         len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
996                         break;
997                 }
998         }
999         return len;
1000 }
1001
1002 static ssize_t
1003 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1004                  char *buf)
1005 {
1006         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1007         int len = 0;
1008
1009         switch (vha->hw->zio_mode) {
1010         case QLA_ZIO_MODE_6:
1011                 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1012                 break;
1013         case QLA_ZIO_DISABLED:
1014                 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1015                 break;
1016         }
1017         return len;
1018 }
1019
1020 static ssize_t
1021 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1022                   const char *buf, size_t count)
1023 {
1024         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1025         struct qla_hw_data *ha = vha->hw;
1026         int val = 0;
1027         uint16_t zio_mode;
1028
1029         if (!IS_ZIO_SUPPORTED(ha))
1030                 return -ENOTSUPP;
1031
1032         if (sscanf(buf, "%d", &val) != 1)
1033                 return -EINVAL;
1034
1035         if (val)
1036                 zio_mode = QLA_ZIO_MODE_6;
1037         else
1038                 zio_mode = QLA_ZIO_DISABLED;
1039
1040         /* Update per-hba values and queue a reset. */
1041         if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1042                 ha->zio_mode = zio_mode;
1043                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1044         }
1045         return strlen(buf);
1046 }
1047
1048 static ssize_t
1049 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1050                        char *buf)
1051 {
1052         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1053
1054         return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1055 }
1056
1057 static ssize_t
1058 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1059                         const char *buf, size_t count)
1060 {
1061         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1062         int val = 0;
1063         uint16_t zio_timer;
1064
1065         if (sscanf(buf, "%d", &val) != 1)
1066                 return -EINVAL;
1067         if (val > 25500 || val < 100)
1068                 return -ERANGE;
1069
1070         zio_timer = (uint16_t)(val / 100);
1071         vha->hw->zio_timer = zio_timer;
1072
1073         return strlen(buf);
1074 }
1075
1076 static ssize_t
1077 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1078                     char *buf)
1079 {
1080         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1081         int len = 0;
1082
1083         if (vha->hw->beacon_blink_led)
1084                 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1085         else
1086                 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1087         return len;
1088 }
1089
1090 static ssize_t
1091 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1092                      const char *buf, size_t count)
1093 {
1094         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1095         struct qla_hw_data *ha = vha->hw;
1096         int val = 0;
1097         int rval;
1098
1099         if (IS_QLA2100(ha) || IS_QLA2200(ha))
1100                 return -EPERM;
1101
1102         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1103                 qla_printk(KERN_WARNING, ha,
1104                     "Abort ISP active -- ignoring beacon request.\n");
1105                 return -EBUSY;
1106         }
1107
1108         if (sscanf(buf, "%d", &val) != 1)
1109                 return -EINVAL;
1110
1111         if (val)
1112                 rval = ha->isp_ops->beacon_on(vha);
1113         else
1114                 rval = ha->isp_ops->beacon_off(vha);
1115
1116         if (rval != QLA_SUCCESS)
1117                 count = 0;
1118
1119         return count;
1120 }
1121
1122 static ssize_t
1123 qla2x00_optrom_bios_version_show(struct device *dev,
1124                                  struct device_attribute *attr, char *buf)
1125 {
1126         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1127         struct qla_hw_data *ha = vha->hw;
1128         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1129             ha->bios_revision[0]);
1130 }
1131
1132 static ssize_t
1133 qla2x00_optrom_efi_version_show(struct device *dev,
1134                                 struct device_attribute *attr, char *buf)
1135 {
1136         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1137         struct qla_hw_data *ha = vha->hw;
1138         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1139             ha->efi_revision[0]);
1140 }
1141
1142 static ssize_t
1143 qla2x00_optrom_fcode_version_show(struct device *dev,
1144                                   struct device_attribute *attr, char *buf)
1145 {
1146         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1147         struct qla_hw_data *ha = vha->hw;
1148         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1149             ha->fcode_revision[0]);
1150 }
1151
1152 static ssize_t
1153 qla2x00_optrom_fw_version_show(struct device *dev,
1154                                struct device_attribute *attr, char *buf)
1155 {
1156         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1157         struct qla_hw_data *ha = vha->hw;
1158         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1159             ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1160             ha->fw_revision[3]);
1161 }
1162
1163 static ssize_t
1164 qla2x00_total_isp_aborts_show(struct device *dev,
1165                               struct device_attribute *attr, char *buf)
1166 {
1167         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1168         struct qla_hw_data *ha = vha->hw;
1169         return snprintf(buf, PAGE_SIZE, "%d\n",
1170             ha->qla_stats.total_isp_aborts);
1171 }
1172
1173 static ssize_t
1174 qla24xx_84xx_fw_version_show(struct device *dev,
1175         struct device_attribute *attr, char *buf)
1176 {
1177         int rval = QLA_SUCCESS;
1178         uint16_t status[2] = {0, 0};
1179         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180         struct qla_hw_data *ha = vha->hw;
1181
1182         if (IS_QLA84XX(ha) && ha->cs84xx) {
1183                 if (ha->cs84xx->op_fw_version == 0) {
1184                         rval = qla84xx_verify_chip(vha, status);
1185         }
1186
1187         if ((rval == QLA_SUCCESS) && (status[0] == 0))
1188                 return snprintf(buf, PAGE_SIZE, "%u\n",
1189                         (uint32_t)ha->cs84xx->op_fw_version);
1190         }
1191
1192         return snprintf(buf, PAGE_SIZE, "\n");
1193 }
1194
1195 static ssize_t
1196 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1197     char *buf)
1198 {
1199         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1200         struct qla_hw_data *ha = vha->hw;
1201
1202         if (!IS_QLA81XX(ha))
1203                 return snprintf(buf, PAGE_SIZE, "\n");
1204
1205         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1206             ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1207             ha->mpi_capabilities);
1208 }
1209
1210 static ssize_t
1211 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1212     char *buf)
1213 {
1214         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1215         struct qla_hw_data *ha = vha->hw;
1216
1217         if (!IS_QLA81XX(ha))
1218                 return snprintf(buf, PAGE_SIZE, "\n");
1219
1220         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1221             ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1222 }
1223
1224 static ssize_t
1225 qla2x00_flash_block_size_show(struct device *dev,
1226                               struct device_attribute *attr, char *buf)
1227 {
1228         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1229         struct qla_hw_data *ha = vha->hw;
1230
1231         return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1232 }
1233
1234 static ssize_t
1235 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1236     char *buf)
1237 {
1238         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1239
1240         if (!IS_QLA81XX(vha->hw))
1241                 return snprintf(buf, PAGE_SIZE, "\n");
1242
1243         return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1244 }
1245
1246 static ssize_t
1247 qla2x00_vn_port_mac_address_show(struct device *dev,
1248     struct device_attribute *attr, char *buf)
1249 {
1250         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1251
1252         if (!IS_QLA81XX(vha->hw))
1253                 return snprintf(buf, PAGE_SIZE, "\n");
1254
1255         return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1256             vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1257             vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1258             vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1259 }
1260
1261 static ssize_t
1262 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1263     char *buf)
1264 {
1265         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1266
1267         return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1268 }
1269
1270 static ssize_t
1271 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1272     char *buf)
1273 {
1274         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1275         int rval = QLA_FUNCTION_FAILED;
1276         uint16_t state[5];
1277
1278         if (!vha->hw->flags.eeh_busy)
1279                 rval = qla2x00_get_firmware_state(vha, state);
1280         if (rval != QLA_SUCCESS)
1281                 memset(state, -1, sizeof(state));
1282
1283         return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1284             state[1], state[2], state[3], state[4]);
1285 }
1286
1287 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1288 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1289 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1290 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1291 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1292 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1293 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1294 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1295 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1296 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1297 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1298                    qla2x00_zio_timer_store);
1299 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1300                    qla2x00_beacon_store);
1301 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1302                    qla2x00_optrom_bios_version_show, NULL);
1303 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1304                    qla2x00_optrom_efi_version_show, NULL);
1305 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1306                    qla2x00_optrom_fcode_version_show, NULL);
1307 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1308                    NULL);
1309 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1310                    NULL);
1311 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1312                    NULL);
1313 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1314 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1315 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1316                    NULL);
1317 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1318 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1319                    qla2x00_vn_port_mac_address_show, NULL);
1320 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1321 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1322
1323 struct device_attribute *qla2x00_host_attrs[] = {
1324         &dev_attr_driver_version,
1325         &dev_attr_fw_version,
1326         &dev_attr_serial_num,
1327         &dev_attr_isp_name,
1328         &dev_attr_isp_id,
1329         &dev_attr_model_name,
1330         &dev_attr_model_desc,
1331         &dev_attr_pci_info,
1332         &dev_attr_link_state,
1333         &dev_attr_zio,
1334         &dev_attr_zio_timer,
1335         &dev_attr_beacon,
1336         &dev_attr_optrom_bios_version,
1337         &dev_attr_optrom_efi_version,
1338         &dev_attr_optrom_fcode_version,
1339         &dev_attr_optrom_fw_version,
1340         &dev_attr_84xx_fw_version,
1341         &dev_attr_total_isp_aborts,
1342         &dev_attr_mpi_version,
1343         &dev_attr_phy_version,
1344         &dev_attr_flash_block_size,
1345         &dev_attr_vlan_id,
1346         &dev_attr_vn_port_mac_address,
1347         &dev_attr_fabric_param,
1348         &dev_attr_fw_state,
1349         NULL,
1350 };
1351
1352 /* Host attributes. */
1353
1354 static void
1355 qla2x00_get_host_port_id(struct Scsi_Host *shost)
1356 {
1357         scsi_qla_host_t *vha = shost_priv(shost);
1358
1359         fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1360             vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1361 }
1362
1363 static void
1364 qla2x00_get_host_speed(struct Scsi_Host *shost)
1365 {
1366         struct qla_hw_data *ha = ((struct scsi_qla_host *)
1367                                         (shost_priv(shost)))->hw;
1368         u32 speed = FC_PORTSPEED_UNKNOWN;
1369
1370         switch (ha->link_data_rate) {
1371         case PORT_SPEED_1GB:
1372                 speed = FC_PORTSPEED_1GBIT;
1373                 break;
1374         case PORT_SPEED_2GB:
1375                 speed = FC_PORTSPEED_2GBIT;
1376                 break;
1377         case PORT_SPEED_4GB:
1378                 speed = FC_PORTSPEED_4GBIT;
1379                 break;
1380         case PORT_SPEED_8GB:
1381                 speed = FC_PORTSPEED_8GBIT;
1382                 break;
1383         case PORT_SPEED_10GB:
1384                 speed = FC_PORTSPEED_10GBIT;
1385                 break;
1386         }
1387         fc_host_speed(shost) = speed;
1388 }
1389
1390 static void
1391 qla2x00_get_host_port_type(struct Scsi_Host *shost)
1392 {
1393         scsi_qla_host_t *vha = shost_priv(shost);
1394         uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1395
1396         if (vha->vp_idx) {
1397                 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1398                 return;
1399         }
1400         switch (vha->hw->current_topology) {
1401         case ISP_CFG_NL:
1402                 port_type = FC_PORTTYPE_LPORT;
1403                 break;
1404         case ISP_CFG_FL:
1405                 port_type = FC_PORTTYPE_NLPORT;
1406                 break;
1407         case ISP_CFG_N:
1408                 port_type = FC_PORTTYPE_PTP;
1409                 break;
1410         case ISP_CFG_F:
1411                 port_type = FC_PORTTYPE_NPORT;
1412                 break;
1413         }
1414         fc_host_port_type(shost) = port_type;
1415 }
1416
1417 static void
1418 qla2x00_get_starget_node_name(struct scsi_target *starget)
1419 {
1420         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1421         scsi_qla_host_t *vha = shost_priv(host);
1422         fc_port_t *fcport;
1423         u64 node_name = 0;
1424
1425         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1426                 if (fcport->rport &&
1427                     starget->id == fcport->rport->scsi_target_id) {
1428                         node_name = wwn_to_u64(fcport->node_name);
1429                         break;
1430                 }
1431         }
1432
1433         fc_starget_node_name(starget) = node_name;
1434 }
1435
1436 static void
1437 qla2x00_get_starget_port_name(struct scsi_target *starget)
1438 {
1439         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1440         scsi_qla_host_t *vha = shost_priv(host);
1441         fc_port_t *fcport;
1442         u64 port_name = 0;
1443
1444         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1445                 if (fcport->rport &&
1446                     starget->id == fcport->rport->scsi_target_id) {
1447                         port_name = wwn_to_u64(fcport->port_name);
1448                         break;
1449                 }
1450         }
1451
1452         fc_starget_port_name(starget) = port_name;
1453 }
1454
1455 static void
1456 qla2x00_get_starget_port_id(struct scsi_target *starget)
1457 {
1458         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1459         scsi_qla_host_t *vha = shost_priv(host);
1460         fc_port_t *fcport;
1461         uint32_t port_id = ~0U;
1462
1463         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1464                 if (fcport->rport &&
1465                     starget->id == fcport->rport->scsi_target_id) {
1466                         port_id = fcport->d_id.b.domain << 16 |
1467                             fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1468                         break;
1469                 }
1470         }
1471
1472         fc_starget_port_id(starget) = port_id;
1473 }
1474
1475 static void
1476 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1477 {
1478         if (timeout)
1479                 rport->dev_loss_tmo = timeout;
1480         else
1481                 rport->dev_loss_tmo = 1;
1482 }
1483
1484 static void
1485 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1486 {
1487         struct Scsi_Host *host = rport_to_shost(rport);
1488         fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1489
1490         if (!fcport)
1491                 return;
1492
1493         if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1494                 return;
1495
1496         if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1497                 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1498                 return;
1499         }
1500
1501         /*
1502          * Transport has effectively 'deleted' the rport, clear
1503          * all local references.
1504          */
1505         spin_lock_irq(host->host_lock);
1506         fcport->rport = NULL;
1507         *((fc_port_t **)rport->dd_data) = NULL;
1508         spin_unlock_irq(host->host_lock);
1509 }
1510
1511 static void
1512 qla2x00_terminate_rport_io(struct fc_rport *rport)
1513 {
1514         fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1515
1516         if (!fcport)
1517                 return;
1518
1519         if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1520                 return;
1521
1522         if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1523                 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1524                 return;
1525         }
1526         /*
1527          * At this point all fcport's software-states are cleared.  Perform any
1528          * final cleanup of firmware resources (PCBs and XCBs).
1529          */
1530         if (fcport->loop_id != FC_NO_LOOP_ID &&
1531             !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1532                 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1533                         fcport->loop_id, fcport->d_id.b.domain,
1534                         fcport->d_id.b.area, fcport->d_id.b.al_pa);
1535 }
1536
1537 static int
1538 qla2x00_issue_lip(struct Scsi_Host *shost)
1539 {
1540         scsi_qla_host_t *vha = shost_priv(shost);
1541
1542         qla2x00_loop_reset(vha);
1543         return 0;
1544 }
1545
1546 static struct fc_host_statistics *
1547 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1548 {
1549         scsi_qla_host_t *vha = shost_priv(shost);
1550         struct qla_hw_data *ha = vha->hw;
1551         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1552         int rval;
1553         struct link_statistics *stats;
1554         dma_addr_t stats_dma;
1555         struct fc_host_statistics *pfc_host_stat;
1556
1557         pfc_host_stat = &ha->fc_host_stat;
1558         memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1559
1560         if (test_bit(UNLOADING, &vha->dpc_flags))
1561                 goto done;
1562
1563         if (unlikely(pci_channel_offline(ha->pdev)))
1564                 goto done;
1565
1566         stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1567         if (stats == NULL) {
1568                 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
1569                     __func__, base_vha->host_no));
1570                 goto done;
1571         }
1572         memset(stats, 0, DMA_POOL_SIZE);
1573
1574         rval = QLA_FUNCTION_FAILED;
1575         if (IS_FWI2_CAPABLE(ha)) {
1576                 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1577         } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1578                     !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1579                     !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1580                     !ha->dpc_active) {
1581                 /* Must be in a 'READY' state for statistics retrieval. */
1582                 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1583                                                 stats, stats_dma);
1584         }
1585
1586         if (rval != QLA_SUCCESS)
1587                 goto done_free;
1588
1589         pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1590         pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1591         pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1592         pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1593         pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1594         pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1595         if (IS_FWI2_CAPABLE(ha)) {
1596                 pfc_host_stat->lip_count = stats->lip_cnt;
1597                 pfc_host_stat->tx_frames = stats->tx_frames;
1598                 pfc_host_stat->rx_frames = stats->rx_frames;
1599                 pfc_host_stat->dumped_frames = stats->dumped_frames;
1600                 pfc_host_stat->nos_count = stats->nos_rcvd;
1601         }
1602         pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1603         pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1604
1605 done_free:
1606         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1607 done:
1608         return pfc_host_stat;
1609 }
1610
1611 static void
1612 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1613 {
1614         scsi_qla_host_t *vha = shost_priv(shost);
1615
1616         qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1617 }
1618
1619 static void
1620 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1621 {
1622         scsi_qla_host_t *vha = shost_priv(shost);
1623
1624         set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1625 }
1626
1627 static void
1628 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1629 {
1630         scsi_qla_host_t *vha = shost_priv(shost);
1631         u64 node_name;
1632
1633         if (vha->device_flags & SWITCH_FOUND)
1634                 node_name = wwn_to_u64(vha->fabric_node_name);
1635         else
1636                 node_name = wwn_to_u64(vha->node_name);
1637
1638         fc_host_fabric_name(shost) = node_name;
1639 }
1640
1641 static void
1642 qla2x00_get_host_port_state(struct Scsi_Host *shost)
1643 {
1644         scsi_qla_host_t *vha = shost_priv(shost);
1645         struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1646
1647         if (!base_vha->flags.online)
1648                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1649         else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1650                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1651         else
1652                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1653 }
1654
1655 static int
1656 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1657 {
1658         int     ret = 0;
1659         uint8_t qos = 0;
1660         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1661         scsi_qla_host_t *vha = NULL;
1662         struct qla_hw_data *ha = base_vha->hw;
1663         uint16_t options = 0;
1664         int     cnt;
1665         struct req_que *req = ha->req_q_map[0];
1666
1667         ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1668         if (ret) {
1669                 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
1670                     "status %x\n", ret));
1671                 return (ret);
1672         }
1673
1674         vha = qla24xx_create_vhost(fc_vport);
1675         if (vha == NULL) {
1676                 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
1677                     vha));
1678                 return FC_VPORT_FAILED;
1679         }
1680         if (disable) {
1681                 atomic_set(&vha->vp_state, VP_OFFLINE);
1682                 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1683         } else
1684                 atomic_set(&vha->vp_state, VP_FAILED);
1685
1686         /* ready to create vport */
1687         qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1688                                                         vha->vp_idx);
1689
1690         /* initialized vport states */
1691         atomic_set(&vha->loop_state, LOOP_DOWN);
1692         vha->vp_err_state=  VP_ERR_PORTDWN;
1693         vha->vp_prev_err_state=  VP_ERR_UNKWN;
1694         /* Check if physical ha port is Up */
1695         if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1696             atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1697                 /* Don't retry or attempt login of this virtual port */
1698                 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1699                     base_vha->host_no));
1700                 atomic_set(&vha->loop_state, LOOP_DEAD);
1701                 if (!disable)
1702                         fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1703         }
1704
1705         if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1706                                    &ha->pdev->dev)) {
1707                 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1708                         vha->host_no, vha->vp_idx));
1709                 goto vport_create_failed_2;
1710         }
1711
1712         /* initialize attributes */
1713         fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1714         fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1715         fc_host_supported_classes(vha->host) =
1716                 fc_host_supported_classes(base_vha->host);
1717         fc_host_supported_speeds(vha->host) =
1718                 fc_host_supported_speeds(base_vha->host);
1719
1720         qla24xx_vport_disable(fc_vport, disable);
1721
1722         if (ha->flags.cpu_affinity_enabled) {
1723                 req = ha->req_q_map[1];
1724                 goto vport_queue;
1725         } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1726                 goto vport_queue;
1727         /* Create a request queue in QoS mode for the vport */
1728         for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1729                 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1730                         && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1731                                         8) == 0) {
1732                         qos = ha->npiv_info[cnt].q_qos;
1733                         break;
1734                 }
1735         }
1736         if (qos) {
1737                 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1738                         qos);
1739                 if (!ret)
1740                         qla_printk(KERN_WARNING, ha,
1741                         "Can't create request queue for vp_idx:%d\n",
1742                         vha->vp_idx);
1743                 else {
1744                         DEBUG2(qla_printk(KERN_INFO, ha,
1745                         "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1746                         ret, qos, vha->vp_idx));
1747                         req = ha->req_q_map[ret];
1748                 }
1749         }
1750
1751 vport_queue:
1752         vha->req = req;
1753         return 0;
1754
1755 vport_create_failed_2:
1756         qla24xx_disable_vp(vha);
1757         qla24xx_deallocate_vp_id(vha);
1758         scsi_host_put(vha->host);
1759         return FC_VPORT_FAILED;
1760 }
1761
1762 static int
1763 qla24xx_vport_delete(struct fc_vport *fc_vport)
1764 {
1765         scsi_qla_host_t *vha = fc_vport->dd_data;
1766         fc_port_t *fcport, *tfcport;
1767         struct qla_hw_data *ha = vha->hw;
1768         uint16_t id = vha->vp_idx;
1769
1770         while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1771             test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1772                 msleep(1000);
1773
1774         qla24xx_disable_vp(vha);
1775
1776         fc_remove_host(vha->host);
1777
1778         scsi_remove_host(vha->host);
1779
1780         list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1781                 list_del(&fcport->list);
1782                 kfree(fcport);
1783                 fcport = NULL;
1784         }
1785
1786         qla24xx_deallocate_vp_id(vha);
1787
1788         mutex_lock(&ha->vport_lock);
1789         ha->cur_vport_count--;
1790         clear_bit(vha->vp_idx, ha->vp_idx_map);
1791         mutex_unlock(&ha->vport_lock);
1792
1793         if (vha->timer_active) {
1794                 qla2x00_vp_stop_timer(vha);
1795                 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1796                     "has stopped\n",
1797                     vha->host_no, vha->vp_idx, vha));
1798         }
1799
1800         if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1801                 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1802                         qla_printk(KERN_WARNING, ha,
1803                                 "Queue delete failed.\n");
1804         }
1805
1806         scsi_host_put(vha->host);
1807         qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
1808         return 0;
1809 }
1810
1811 static int
1812 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1813 {
1814         scsi_qla_host_t *vha = fc_vport->dd_data;
1815
1816         if (disable)
1817                 qla24xx_disable_vp(vha);
1818         else
1819                 qla24xx_enable_vp(vha);
1820
1821         return 0;
1822 }
1823
1824 /* BSG support for ELS/CT pass through */
1825 inline srb_t *
1826 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1827 {
1828         srb_t *sp;
1829         struct qla_hw_data *ha = vha->hw;
1830         struct srb_bsg_ctx *ctx;
1831
1832         sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1833         if (!sp)
1834                 goto done;
1835         ctx = kzalloc(size, GFP_KERNEL);
1836         if (!ctx) {
1837                 mempool_free(sp, ha->srb_mempool);
1838                 goto done;
1839         }
1840
1841         memset(sp, 0, sizeof(*sp));
1842         sp->fcport = fcport;
1843         sp->ctx = ctx;
1844 done:
1845         return sp;
1846 }
1847
1848 static int
1849 qla2x00_process_els(struct fc_bsg_job *bsg_job)
1850 {
1851         struct fc_rport *rport;
1852         fc_port_t *fcport;
1853         struct Scsi_Host *host;
1854         scsi_qla_host_t *vha;
1855         struct qla_hw_data *ha;
1856         srb_t *sp;
1857         const char *type;
1858         int req_sg_cnt, rsp_sg_cnt;
1859         int rval =  (DRIVER_ERROR << 16);
1860         uint16_t nextlid = 0;
1861         struct srb_bsg *els;
1862
1863         /*  Multiple SG's are not supported for ELS requests */
1864         if (bsg_job->request_payload.sg_cnt > 1 ||
1865                 bsg_job->reply_payload.sg_cnt > 1) {
1866                 DEBUG2(printk(KERN_INFO
1867                     "multiple SG's are not supported for ELS requests"
1868                     " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1869                     bsg_job->request_payload.sg_cnt,
1870                     bsg_job->reply_payload.sg_cnt));
1871                 rval = -EPERM;
1872                 goto done;
1873         }
1874
1875         /* ELS request for rport */
1876         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1877                 rport = bsg_job->rport;
1878                 fcport = *(fc_port_t **) rport->dd_data;
1879                 host = rport_to_shost(rport);
1880                 vha = shost_priv(host);
1881                 ha = vha->hw;
1882                 type = "FC_BSG_RPT_ELS";
1883
1884                 /* make sure the rport is logged in,
1885                  * if not perform fabric login
1886                  */
1887                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1888                         DEBUG2(qla_printk(KERN_WARNING, ha,
1889                             "failed to login port %06X for ELS passthru\n",
1890                             fcport->d_id.b24));
1891                         rval = -EIO;
1892                         goto done;
1893                 }
1894         } else {
1895                 host = bsg_job->shost;
1896                 vha = shost_priv(host);
1897                 ha = vha->hw;
1898                 type = "FC_BSG_HST_ELS_NOLOGIN";
1899
1900                 /* Allocate a dummy fcport structure, since functions
1901                  * preparing the IOCB and mailbox command retrieves port
1902                  * specific information from fcport structure. For Host based
1903                  * ELS commands there will be no fcport structure allocated
1904                  */
1905                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1906                 if (!fcport) {
1907                         rval = -ENOMEM;
1908                         goto done;
1909                 }
1910
1911                 /* Initialize all required  fields of fcport */
1912                 fcport->vha = vha;
1913                 fcport->vp_idx = vha->vp_idx;
1914                 fcport->d_id.b.al_pa =
1915                     bsg_job->request->rqst_data.h_els.port_id[0];
1916                 fcport->d_id.b.area =
1917                     bsg_job->request->rqst_data.h_els.port_id[1];
1918                 fcport->d_id.b.domain =
1919                     bsg_job->request->rqst_data.h_els.port_id[2];
1920                 fcport->loop_id =
1921                     (fcport->d_id.b.al_pa == 0xFD) ?
1922                     NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1923         }
1924
1925         if (!vha->flags.online) {
1926                 DEBUG2(qla_printk(KERN_WARNING, ha,
1927                     "host not online\n"));
1928                 rval = -EIO;
1929                 goto done;
1930         }
1931
1932         req_sg_cnt =
1933             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1934             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1935         if (!req_sg_cnt) {
1936                 rval = -ENOMEM;
1937                 goto done_free_fcport;
1938         }
1939         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1940             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1941         if (!rsp_sg_cnt) {
1942                 rval = -ENOMEM;
1943                 goto done_free_fcport;
1944         }
1945
1946         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1947             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1948         {
1949                 DEBUG2(printk(KERN_INFO
1950                     "dma mapping resulted in different sg counts \
1951                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
1952                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1953                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1954                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1955                 rval = -EAGAIN;
1956                 goto done_unmap_sg;
1957         }
1958
1959         /* Alloc SRB structure */
1960         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1961         if (!sp) {
1962                 rval = -ENOMEM;
1963                 goto done_unmap_sg;
1964         }
1965
1966         els = sp->ctx;
1967         els->ctx.type =
1968             (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1969             SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1970         els->bsg_job = bsg_job;
1971
1972         DEBUG2(qla_printk(KERN_INFO, ha,
1973             "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1974             "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1975             bsg_job->request->rqst_data.h_els.command_code,
1976             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1977             fcport->d_id.b.al_pa));
1978
1979         rval = qla2x00_start_sp(sp);
1980         if (rval != QLA_SUCCESS) {
1981                 kfree(sp->ctx);
1982                 mempool_free(sp, ha->srb_mempool);
1983                 rval = -EIO;
1984                 goto done_unmap_sg;
1985         }
1986         return rval;
1987
1988 done_unmap_sg:
1989         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1990                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1991         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1992                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1993         goto done_free_fcport;
1994
1995 done_free_fcport:
1996         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
1997                 kfree(fcport);
1998 done:
1999         return rval;
2000 }
2001
2002 static int
2003 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2004 {
2005         srb_t *sp;
2006         struct Scsi_Host *host = bsg_job->shost;
2007         scsi_qla_host_t *vha = shost_priv(host);
2008         struct qla_hw_data *ha = vha->hw;
2009         int rval = (DRIVER_ERROR << 16);
2010         int req_sg_cnt, rsp_sg_cnt;
2011         uint16_t loop_id;
2012         struct fc_port *fcport;
2013         char  *type = "FC_BSG_HST_CT";
2014         struct srb_bsg *ct;
2015
2016         /* pass through is supported only for ISP 4Gb or higher */
2017         if (!IS_FWI2_CAPABLE(ha)) {
2018                 DEBUG2(qla_printk(KERN_INFO, ha,
2019                     "scsi(%ld):Firmware is not capable to support FC "
2020                     "CT pass thru\n", vha->host_no));
2021                 rval = -EPERM;
2022                 goto done;
2023         }
2024
2025         req_sg_cnt =
2026             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2027             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2028         if (!req_sg_cnt) {
2029                 rval = -ENOMEM;
2030                 goto done;
2031         }
2032
2033         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2034             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2035         if (!rsp_sg_cnt) {
2036                 rval = -ENOMEM;
2037                 goto done;
2038         }
2039
2040         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
2041                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2042         {
2043                 DEBUG2(qla_printk(KERN_WARNING, ha,
2044                     "dma mapping resulted in different sg counts \
2045                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
2046                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2047                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
2048                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2049                 rval = -EAGAIN;
2050                 goto done_unmap_sg;
2051         }
2052
2053         if (!vha->flags.online) {
2054                 DEBUG2(qla_printk(KERN_WARNING, ha,
2055                     "host not online\n"));
2056                 rval = -EIO;
2057                 goto done_unmap_sg;
2058         }
2059
2060         loop_id =
2061             (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2062             >> 24;
2063         switch (loop_id) {
2064                 case 0xFC:
2065                         loop_id = cpu_to_le16(NPH_SNS);
2066                         break;
2067                 case 0xFA:
2068                         loop_id = vha->mgmt_svr_loop_id;
2069                         break;
2070                 default:
2071                         DEBUG2(qla_printk(KERN_INFO, ha,
2072                             "Unknown loop id: %x\n", loop_id));
2073                         rval = -EINVAL;
2074                         goto done_unmap_sg;
2075         }
2076
2077         /* Allocate a dummy fcport structure, since functions preparing the
2078          * IOCB and mailbox command retrieves port specific information
2079          * from fcport structure. For Host based ELS commands there will be
2080          * no fcport structure allocated
2081          */
2082         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2083         if (!fcport)
2084         {
2085                 rval = -ENOMEM;
2086                 goto  done_unmap_sg;
2087         }
2088
2089         /* Initialize all required  fields of fcport */
2090         fcport->vha = vha;
2091         fcport->vp_idx = vha->vp_idx;
2092         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2093         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2094         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2095         fcport->loop_id = loop_id;
2096
2097         /* Alloc SRB structure */
2098         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2099         if (!sp) {
2100                 rval = -ENOMEM;
2101                 goto done_free_fcport;
2102         }
2103
2104         ct = sp->ctx;
2105         ct->ctx.type = SRB_CT_CMD;
2106         ct->bsg_job = bsg_job;
2107
2108         DEBUG2(qla_printk(KERN_INFO, ha,
2109             "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2110             "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2111             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2112             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2113             fcport->d_id.b.al_pa));
2114
2115         rval = qla2x00_start_sp(sp);
2116         if (rval != QLA_SUCCESS) {
2117                 kfree(sp->ctx);
2118                 mempool_free(sp, ha->srb_mempool);
2119                 rval = -EIO;
2120                 goto done_free_fcport;
2121         }
2122         return rval;
2123
2124 done_free_fcport:
2125         kfree(fcport);
2126 done_unmap_sg:
2127         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2128             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2129         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2130             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2131 done:
2132         return rval;
2133 }
2134
2135 static int
2136 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2137 {
2138         struct Scsi_Host *host = bsg_job->shost;
2139         scsi_qla_host_t *vha = shost_priv(host);
2140         struct qla_hw_data *ha = vha->hw;
2141         int rval;
2142         uint8_t command_sent;
2143         uint32_t vendor_cmd;
2144         char *type;
2145         struct msg_echo_lb elreq;
2146         uint16_t response[MAILBOX_REGISTER_COUNT];
2147         uint8_t* fw_sts_ptr;
2148         uint8_t *req_data;
2149         dma_addr_t req_data_dma;
2150         uint32_t req_data_len;
2151         uint8_t *rsp_data;
2152         dma_addr_t rsp_data_dma;
2153         uint32_t rsp_data_len;
2154
2155         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2156             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2157             test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2158                 rval = -EBUSY;
2159                 goto done;
2160         }
2161
2162         if (!vha->flags.online) {
2163                 DEBUG2(qla_printk(KERN_WARNING, ha,
2164                     "host not online\n"));
2165                 rval = -EIO;
2166                 goto done;
2167         }
2168
2169         elreq.req_sg_cnt =
2170             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2171             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2172         if (!elreq.req_sg_cnt) {
2173                 rval = -ENOMEM;
2174                 goto done;
2175         }
2176         elreq.rsp_sg_cnt =
2177             dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2178             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2179         if (!elreq.rsp_sg_cnt) {
2180                 rval = -ENOMEM;
2181                 goto done;
2182         }
2183
2184         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
2185             (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2186         {
2187                 DEBUG2(printk(KERN_INFO
2188                     "dma mapping resulted in different sg counts \
2189                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
2190                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2191                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2192                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2193                 rval = -EAGAIN;
2194                 goto done_unmap_sg;
2195         }
2196         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2197         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2198             &req_data_dma, GFP_KERNEL);
2199
2200         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2201             &rsp_data_dma, GFP_KERNEL);
2202
2203         /* Copy the request buffer in req_data now */
2204         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2205             bsg_job->request_payload.sg_cnt, req_data,
2206             req_data_len);
2207
2208         elreq.send_dma = req_data_dma;
2209         elreq.rcv_dma = rsp_data_dma;
2210         elreq.transfer_size = req_data_len;
2211
2212         /* Vendor cmd : loopback or ECHO diagnostic
2213          * Options:
2214          *      Loopback : Either internal or external loopback
2215          *      ECHO: ECHO ELS or Vendor specific FC4  link data
2216          */
2217         vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2218         elreq.options =
2219             *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2220             + 1);
2221
2222         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2223         case QL_VND_LOOPBACK:
2224                 if (ha->current_topology != ISP_CFG_F) {
2225                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
2226
2227                         DEBUG2(qla_printk(KERN_INFO, ha,
2228                                 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2229                                 vha->host_no, type, vendor_cmd, elreq.options));
2230
2231                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
2232                         rval = qla2x00_loopback_test(vha, &elreq, response);
2233                         if (IS_QLA81XX(ha)) {
2234                                 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2235                                         DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2236                                                 "ISP\n", __func__, vha->host_no));
2237                                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2238                                         qla2xxx_wake_dpc(vha);
2239                                  }
2240                         }
2241                 } else {
2242                         type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2243                         DEBUG2(qla_printk(KERN_INFO, ha,
2244                                 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2245                                 vha->host_no, type, vendor_cmd, elreq.options));
2246
2247                         command_sent = INT_DEF_LB_ECHO_CMD;
2248                         rval = qla2x00_echo_test(vha, &elreq, response);
2249                 }
2250                 break;
2251         case QLA84_RESET:
2252                 if (!IS_QLA84XX(vha->hw)) {
2253                         rval = -EINVAL;
2254                         DEBUG16(printk(
2255                                 "%s(%ld): 8xxx exiting.\n",
2256                                 __func__, vha->host_no));
2257                         return rval;
2258                 }
2259                 rval = qla84xx_reset(vha, &elreq, bsg_job);
2260                 break;
2261         case QLA84_MGMT_CMD:
2262                 if (!IS_QLA84XX(vha->hw)) {
2263                         rval = -EINVAL;
2264                         DEBUG16(printk(
2265                                 "%s(%ld): 8xxx exiting.\n",
2266                                 __func__, vha->host_no));
2267                         return rval;
2268                 }
2269                 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2270                 break;
2271         default:
2272                 rval = -ENOSYS;
2273         }
2274
2275         if (rval != QLA_SUCCESS) {
2276                 DEBUG2(qla_printk(KERN_WARNING, ha,
2277                         "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2278                 rval = 0;
2279                 bsg_job->reply->result = (DID_ERROR << 16);
2280                 bsg_job->reply->reply_payload_rcv_len = 0;
2281                 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2282                 memcpy( fw_sts_ptr, response, sizeof(response));
2283                 fw_sts_ptr += sizeof(response);
2284                 *fw_sts_ptr = command_sent;
2285         } else {
2286                 DEBUG2(qla_printk(KERN_WARNING, ha,
2287                         "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2288                 rval = bsg_job->reply->result = 0;
2289                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2290                 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2291                 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2292                 memcpy(fw_sts_ptr, response, sizeof(response));
2293                 fw_sts_ptr += sizeof(response);
2294                 *fw_sts_ptr = command_sent;
2295                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2296                 bsg_job->reply_payload.sg_cnt, rsp_data,
2297                 rsp_data_len);
2298         }
2299         bsg_job->job_done(bsg_job);
2300
2301 done_unmap_sg:
2302
2303         if(req_data)
2304                 dma_free_coherent(&ha->pdev->dev, req_data_len,
2305                         req_data, req_data_dma);
2306         dma_unmap_sg(&ha->pdev->dev,
2307             bsg_job->request_payload.sg_list,
2308             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2309         dma_unmap_sg(&ha->pdev->dev,
2310             bsg_job->reply_payload.sg_list,
2311             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2312
2313 done:
2314         return rval;
2315 }
2316
2317 static int
2318 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2319 {
2320         int ret = -EINVAL;
2321
2322         switch (bsg_job->request->msgcode) {
2323                 case FC_BSG_RPT_ELS:
2324                 case FC_BSG_HST_ELS_NOLOGIN:
2325                         ret = qla2x00_process_els(bsg_job);
2326                         break;
2327                 case FC_BSG_HST_CT:
2328                         ret = qla2x00_process_ct(bsg_job);
2329                         break;
2330                 case FC_BSG_HST_VENDOR:
2331                         ret = qla2x00_process_vendor_specific(bsg_job);
2332                         break;
2333                 case FC_BSG_HST_ADD_RPORT:
2334                 case FC_BSG_HST_DEL_RPORT:
2335                 case FC_BSG_RPT_CT:
2336                 default:
2337                         DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2338                         break;
2339         }
2340         return ret;
2341 }
2342
2343 static int
2344 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2345 {
2346         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2347         struct qla_hw_data *ha = vha->hw;
2348         srb_t *sp;
2349         int cnt, que;
2350         unsigned long flags;
2351         struct req_que *req;
2352         struct srb_bsg *sp_bsg;
2353
2354         /* find the bsg job from the active list of commands */
2355         spin_lock_irqsave(&ha->hardware_lock, flags);
2356         for (que = 0; que < ha->max_req_queues; que++) {
2357                 req = ha->req_q_map[que];
2358                 if (!req)
2359                         continue;
2360
2361                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2362                         sp = req->outstanding_cmds[cnt];
2363
2364                         if (sp) {
2365                                 sp_bsg = (struct srb_bsg*)sp->ctx;
2366
2367                                 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2368                                     (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2369                                     || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2370                                     (sp_bsg->bsg_job == bsg_job)) {
2371                                         if (ha->isp_ops->abort_command(sp)) {
2372                                                 DEBUG2(qla_printk(KERN_INFO, ha,
2373                                                 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2374                                                 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2375                                         } else {
2376                                                 DEBUG2(qla_printk(KERN_INFO, ha,
2377                                                 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2378                                                 bsg_job->req->errors = bsg_job->reply->result = 0;
2379                                         }
2380                                         goto done;
2381                                 }
2382                         }
2383                 }
2384         }
2385         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2386         DEBUG2(qla_printk(KERN_INFO, ha,
2387                 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2388         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2389         return 0;
2390
2391 done:
2392         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2393                 kfree(sp->fcport);
2394         kfree(sp->ctx);
2395         mempool_free(sp, ha->srb_mempool);
2396         return 0;
2397 }
2398
2399 struct fc_function_template qla2xxx_transport_functions = {
2400
2401         .show_host_node_name = 1,
2402         .show_host_port_name = 1,
2403         .show_host_supported_classes = 1,
2404         .show_host_supported_speeds = 1,
2405
2406         .get_host_port_id = qla2x00_get_host_port_id,
2407         .show_host_port_id = 1,
2408         .get_host_speed = qla2x00_get_host_speed,
2409         .show_host_speed = 1,
2410         .get_host_port_type = qla2x00_get_host_port_type,
2411         .show_host_port_type = 1,
2412         .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2413         .show_host_symbolic_name = 1,
2414         .set_host_system_hostname = qla2x00_set_host_system_hostname,
2415         .show_host_system_hostname = 1,
2416         .get_host_fabric_name = qla2x00_get_host_fabric_name,
2417         .show_host_fabric_name = 1,
2418         .get_host_port_state = qla2x00_get_host_port_state,
2419         .show_host_port_state = 1,
2420
2421         .dd_fcrport_size = sizeof(struct fc_port *),
2422         .show_rport_supported_classes = 1,
2423
2424         .get_starget_node_name = qla2x00_get_starget_node_name,
2425         .show_starget_node_name = 1,
2426         .get_starget_port_name = qla2x00_get_starget_port_name,
2427         .show_starget_port_name = 1,
2428         .get_starget_port_id  = qla2x00_get_starget_port_id,
2429         .show_starget_port_id = 1,
2430
2431         .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2432         .show_rport_dev_loss_tmo = 1,
2433
2434         .issue_fc_host_lip = qla2x00_issue_lip,
2435         .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2436         .terminate_rport_io = qla2x00_terminate_rport_io,
2437         .get_fc_host_stats = qla2x00_get_fc_host_stats,
2438
2439         .vport_create = qla24xx_vport_create,
2440         .vport_disable = qla24xx_vport_disable,
2441         .vport_delete = qla24xx_vport_delete,
2442         .bsg_request = qla24xx_bsg_request,
2443         .bsg_timeout = qla24xx_bsg_timeout,
2444 };
2445
2446 struct fc_function_template qla2xxx_transport_vport_functions = {
2447
2448         .show_host_node_name = 1,
2449         .show_host_port_name = 1,
2450         .show_host_supported_classes = 1,
2451
2452         .get_host_port_id = qla2x00_get_host_port_id,
2453         .show_host_port_id = 1,
2454         .get_host_speed = qla2x00_get_host_speed,
2455         .show_host_speed = 1,
2456         .get_host_port_type = qla2x00_get_host_port_type,
2457         .show_host_port_type = 1,
2458         .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2459         .show_host_symbolic_name = 1,
2460         .set_host_system_hostname = qla2x00_set_host_system_hostname,
2461         .show_host_system_hostname = 1,
2462         .get_host_fabric_name = qla2x00_get_host_fabric_name,
2463         .show_host_fabric_name = 1,
2464         .get_host_port_state = qla2x00_get_host_port_state,
2465         .show_host_port_state = 1,
2466
2467         .dd_fcrport_size = sizeof(struct fc_port *),
2468         .show_rport_supported_classes = 1,
2469
2470         .get_starget_node_name = qla2x00_get_starget_node_name,
2471         .show_starget_node_name = 1,
2472         .get_starget_port_name = qla2x00_get_starget_port_name,
2473         .show_starget_port_name = 1,
2474         .get_starget_port_id  = qla2x00_get_starget_port_id,
2475         .show_starget_port_id = 1,
2476
2477         .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2478         .show_rport_dev_loss_tmo = 1,
2479
2480         .issue_fc_host_lip = qla2x00_issue_lip,
2481         .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2482         .terminate_rport_io = qla2x00_terminate_rport_io,
2483         .get_fc_host_stats = qla2x00_get_fc_host_stats,
2484         .bsg_request = qla24xx_bsg_request,
2485         .bsg_timeout = qla24xx_bsg_timeout,
2486 };
2487
2488 void
2489 qla2x00_init_host_attr(scsi_qla_host_t *vha)
2490 {
2491         struct qla_hw_data *ha = vha->hw;
2492         u32 speed = FC_PORTSPEED_UNKNOWN;
2493
2494         fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2495         fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2496         fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2497         fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2498         fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2499
2500         if (IS_QLA81XX(ha))
2501                 speed = FC_PORTSPEED_10GBIT;
2502         else if (IS_QLA25XX(ha))
2503                 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2504                     FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2505         else if (IS_QLA24XX_TYPE(ha))
2506                 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2507                     FC_PORTSPEED_1GBIT;
2508         else if (IS_QLA23XX(ha))
2509                 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2510         else
2511                 speed = FC_PORTSPEED_1GBIT;
2512         fc_host_supported_speeds(vha->host) = speed;
2513 }
2514 static int
2515 qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2516 {
2517         int             ret = 0;
2518         int             cmd;
2519         uint16_t        cmd_status;
2520
2521         DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2522
2523         cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2524                         == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2525                                 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2526         ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2527         &cmd_status);
2528         return ret;
2529 }
2530
2531 static int
2532 qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2533 {
2534         struct access_chip_84xx *mn;
2535         dma_addr_t mn_dma, mgmt_dma;
2536         void *mgmt_b = NULL;
2537         int ret = 0;
2538         int rsp_hdr_len, len = 0;
2539         struct qla84_msg_mgmt *ql84_mgmt;
2540
2541         ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2542         ql84_mgmt->cmd =
2543                 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2544         ql84_mgmt->mgmtp.u.mem.start_addr =
2545                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2546         ql84_mgmt->len =
2547                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2548         ql84_mgmt->mgmtp.u.config.id =
2549                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2550         ql84_mgmt->mgmtp.u.config.param0 =
2551                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2552         ql84_mgmt->mgmtp.u.config.param1 =
2553                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2554         ql84_mgmt->mgmtp.u.info.type =
2555                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2556         ql84_mgmt->mgmtp.u.info.context =
2557                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2558
2559         rsp_hdr_len = bsg_job->request_payload.payload_len;
2560
2561         mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2562         if (mn == NULL) {
2563                 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2564                 "failed%lu\n", __func__, ha->host_no));
2565                 return -ENOMEM;
2566         }
2567
2568         memset(mn, 0, sizeof (struct access_chip_84xx));
2569
2570         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2571         mn->entry_count = 1;
2572
2573         switch (ql84_mgmt->cmd) {
2574         case QLA84_MGMT_READ_MEM:
2575                 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2576                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2577                 break;
2578         case QLA84_MGMT_WRITE_MEM:
2579                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2580                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2581                 break;
2582         case QLA84_MGMT_CHNG_CONFIG:
2583                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2584                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2585                 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2586                 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2587                 break;
2588         case QLA84_MGMT_GET_INFO:
2589                 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2590                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2591                 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2592                 break;
2593         default:
2594                 ret = -EIO;
2595                 goto exit_mgmt0;
2596         }
2597
2598         if ((len == ql84_mgmt->len) &&
2599                 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2600                 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2601                                 &mgmt_dma, GFP_KERNEL);
2602                 if (mgmt_b == NULL) {
2603                         DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2604                         "failed%lu\n", __func__, ha->host_no));
2605                         ret = -ENOMEM;
2606                         goto exit_mgmt0;
2607                 }
2608                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2609                 mn->dseg_count = cpu_to_le16(1);
2610                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2611                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2612                 mn->dseg_length = cpu_to_le32(len);
2613
2614                 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2615                         memcpy(mgmt_b, ql84_mgmt->payload, len);
2616                 }
2617         }
2618
2619         ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2620         if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2621                 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2622                         if (ret != QLA_SUCCESS)
2623                                 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2624                                         __func__, ha->host_no));
2625         } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2626                         (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2627         }
2628
2629         if (mgmt_b)
2630                 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2631
2632 exit_mgmt0:
2633         dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2634         return ret;
2635 }