[SCSI] qla2xxx: Add BSG support for FC ELS/CT passthrough and vendor commands.
[safe/jmp/linux-2.6] / drivers / scsi / qla2xxx / qla_attr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 static int qla24xx_vport_disable(struct fc_vport *, bool);
14 static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
15 int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
16 static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
17 /* SYSFS attributes --------------------------------------------------------- */
18
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
21                            struct bin_attribute *bin_attr,
22                            char *buf, loff_t off, size_t count)
23 {
24         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25             struct device, kobj)));
26         struct qla_hw_data *ha = vha->hw;
27
28         if (ha->fw_dump_reading == 0)
29                 return 0;
30
31         return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
32                                         ha->fw_dump_len);
33 }
34
35 static ssize_t
36 qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
37                             struct bin_attribute *bin_attr,
38                             char *buf, loff_t off, size_t count)
39 {
40         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
41             struct device, kobj)));
42         struct qla_hw_data *ha = vha->hw;
43         int reading;
44
45         if (off != 0)
46                 return (0);
47
48         reading = simple_strtol(buf, NULL, 10);
49         switch (reading) {
50         case 0:
51                 if (!ha->fw_dump_reading)
52                         break;
53
54                 qla_printk(KERN_INFO, ha,
55                     "Firmware dump cleared on (%ld).\n", vha->host_no);
56
57                 ha->fw_dump_reading = 0;
58                 ha->fw_dumped = 0;
59                 break;
60         case 1:
61                 if (ha->fw_dumped && !ha->fw_dump_reading) {
62                         ha->fw_dump_reading = 1;
63
64                         qla_printk(KERN_INFO, ha,
65                             "Raw firmware dump ready for read on (%ld).\n",
66                             vha->host_no);
67                 }
68                 break;
69         case 2:
70                 qla2x00_alloc_fw_dump(vha);
71                 break;
72         case 3:
73                 qla2x00_system_error(vha);
74                 break;
75         }
76         return (count);
77 }
78
79 static struct bin_attribute sysfs_fw_dump_attr = {
80         .attr = {
81                 .name = "fw_dump",
82                 .mode = S_IRUSR | S_IWUSR,
83         },
84         .size = 0,
85         .read = qla2x00_sysfs_read_fw_dump,
86         .write = qla2x00_sysfs_write_fw_dump,
87 };
88
89 static ssize_t
90 qla2x00_sysfs_read_nvram(struct kobject *kobj,
91                          struct bin_attribute *bin_attr,
92                          char *buf, loff_t off, size_t count)
93 {
94         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
95             struct device, kobj)));
96         struct qla_hw_data *ha = vha->hw;
97
98         if (!capable(CAP_SYS_ADMIN))
99                 return 0;
100
101         if (IS_NOCACHE_VPD_TYPE(ha))
102                 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
103                     ha->nvram_size);
104         return memory_read_from_buffer(buf, count, &off, ha->nvram,
105                                         ha->nvram_size);
106 }
107
108 static ssize_t
109 qla2x00_sysfs_write_nvram(struct kobject *kobj,
110                           struct bin_attribute *bin_attr,
111                           char *buf, loff_t off, size_t count)
112 {
113         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
114             struct device, kobj)));
115         struct qla_hw_data *ha = vha->hw;
116         uint16_t        cnt;
117
118         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
119             !ha->isp_ops->write_nvram)
120                 return 0;
121
122         /* Checksum NVRAM. */
123         if (IS_FWI2_CAPABLE(ha)) {
124                 uint32_t *iter;
125                 uint32_t chksum;
126
127                 iter = (uint32_t *)buf;
128                 chksum = 0;
129                 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
130                         chksum += le32_to_cpu(*iter++);
131                 chksum = ~chksum + 1;
132                 *iter = cpu_to_le32(chksum);
133         } else {
134                 uint8_t *iter;
135                 uint8_t chksum;
136
137                 iter = (uint8_t *)buf;
138                 chksum = 0;
139                 for (cnt = 0; cnt < count - 1; cnt++)
140                         chksum += *iter++;
141                 chksum = ~chksum + 1;
142                 *iter = chksum;
143         }
144
145         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
146                 qla_printk(KERN_WARNING, ha,
147                     "HBA not online, failing NVRAM update.\n");
148                 return -EAGAIN;
149         }
150
151         /* Write NVRAM. */
152         ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
153         ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
154             count);
155
156         /* NVRAM settings take effect immediately. */
157         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
158         qla2xxx_wake_dpc(vha);
159         qla2x00_wait_for_chip_reset(vha);
160
161         return (count);
162 }
163
164 static struct bin_attribute sysfs_nvram_attr = {
165         .attr = {
166                 .name = "nvram",
167                 .mode = S_IRUSR | S_IWUSR,
168         },
169         .size = 512,
170         .read = qla2x00_sysfs_read_nvram,
171         .write = qla2x00_sysfs_write_nvram,
172 };
173
174 static ssize_t
175 qla2x00_sysfs_read_optrom(struct kobject *kobj,
176                           struct bin_attribute *bin_attr,
177                           char *buf, loff_t off, size_t count)
178 {
179         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
180             struct device, kobj)));
181         struct qla_hw_data *ha = vha->hw;
182
183         if (ha->optrom_state != QLA_SREADING)
184                 return 0;
185
186         return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
187                                         ha->optrom_region_size);
188 }
189
190 static ssize_t
191 qla2x00_sysfs_write_optrom(struct kobject *kobj,
192                            struct bin_attribute *bin_attr,
193                            char *buf, loff_t off, size_t count)
194 {
195         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
196             struct device, kobj)));
197         struct qla_hw_data *ha = vha->hw;
198
199         if (ha->optrom_state != QLA_SWRITING)
200                 return -EINVAL;
201         if (off > ha->optrom_region_size)
202                 return -ERANGE;
203         if (off + count > ha->optrom_region_size)
204                 count = ha->optrom_region_size - off;
205
206         memcpy(&ha->optrom_buffer[off], buf, count);
207
208         return count;
209 }
210
211 static struct bin_attribute sysfs_optrom_attr = {
212         .attr = {
213                 .name = "optrom",
214                 .mode = S_IRUSR | S_IWUSR,
215         },
216         .size = 0,
217         .read = qla2x00_sysfs_read_optrom,
218         .write = qla2x00_sysfs_write_optrom,
219 };
220
221 static ssize_t
222 qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
223                                struct bin_attribute *bin_attr,
224                                char *buf, loff_t off, size_t count)
225 {
226         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
227             struct device, kobj)));
228         struct qla_hw_data *ha = vha->hw;
229
230         uint32_t start = 0;
231         uint32_t size = ha->optrom_size;
232         int val, valid;
233
234         if (off)
235                 return 0;
236
237         if (unlikely(pci_channel_offline(ha->pdev)))
238                 return 0;
239
240         if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
241                 return -EINVAL;
242         if (start > ha->optrom_size)
243                 return -EINVAL;
244
245         switch (val) {
246         case 0:
247                 if (ha->optrom_state != QLA_SREADING &&
248                     ha->optrom_state != QLA_SWRITING)
249                         break;
250
251                 ha->optrom_state = QLA_SWAITING;
252
253                 DEBUG2(qla_printk(KERN_INFO, ha,
254                     "Freeing flash region allocation -- 0x%x bytes.\n",
255                     ha->optrom_region_size));
256
257                 vfree(ha->optrom_buffer);
258                 ha->optrom_buffer = NULL;
259                 break;
260         case 1:
261                 if (ha->optrom_state != QLA_SWAITING)
262                         break;
263
264                 ha->optrom_region_start = start;
265                 ha->optrom_region_size = start + size > ha->optrom_size ?
266                     ha->optrom_size - start : size;
267
268                 ha->optrom_state = QLA_SREADING;
269                 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
270                 if (ha->optrom_buffer == NULL) {
271                         qla_printk(KERN_WARNING, ha,
272                             "Unable to allocate memory for optrom retrieval "
273                             "(%x).\n", ha->optrom_region_size);
274
275                         ha->optrom_state = QLA_SWAITING;
276                         return count;
277                 }
278
279                 DEBUG2(qla_printk(KERN_INFO, ha,
280                     "Reading flash region -- 0x%x/0x%x.\n",
281                     ha->optrom_region_start, ha->optrom_region_size));
282
283                 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
284                 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
285                     ha->optrom_region_start, ha->optrom_region_size);
286                 break;
287         case 2:
288                 if (ha->optrom_state != QLA_SWAITING)
289                         break;
290
291                 /*
292                  * We need to be more restrictive on which FLASH regions are
293                  * allowed to be updated via user-space.  Regions accessible
294                  * via this method include:
295                  *
296                  * ISP21xx/ISP22xx/ISP23xx type boards:
297                  *
298                  *      0x000000 -> 0x020000 -- Boot code.
299                  *
300                  * ISP2322/ISP24xx type boards:
301                  *
302                  *      0x000000 -> 0x07ffff -- Boot code.
303                  *      0x080000 -> 0x0fffff -- Firmware.
304                  *
305                  * ISP25xx type boards:
306                  *
307                  *      0x000000 -> 0x07ffff -- Boot code.
308                  *      0x080000 -> 0x0fffff -- Firmware.
309                  *      0x120000 -> 0x12ffff -- VPD and HBA parameters.
310                  */
311                 valid = 0;
312                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
313                         valid = 1;
314                 else if (start == (ha->flt_region_boot * 4) ||
315                     start == (ha->flt_region_fw * 4))
316                         valid = 1;
317                 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
318                     valid = 1;
319                 if (!valid) {
320                         qla_printk(KERN_WARNING, ha,
321                             "Invalid start region 0x%x/0x%x.\n", start, size);
322                         return -EINVAL;
323                 }
324
325                 ha->optrom_region_start = start;
326                 ha->optrom_region_size = start + size > ha->optrom_size ?
327                     ha->optrom_size - start : size;
328
329                 ha->optrom_state = QLA_SWRITING;
330                 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
331                 if (ha->optrom_buffer == NULL) {
332                         qla_printk(KERN_WARNING, ha,
333                             "Unable to allocate memory for optrom update "
334                             "(%x).\n", ha->optrom_region_size);
335
336                         ha->optrom_state = QLA_SWAITING;
337                         return count;
338                 }
339
340                 DEBUG2(qla_printk(KERN_INFO, ha,
341                     "Staging flash region write -- 0x%x/0x%x.\n",
342                     ha->optrom_region_start, ha->optrom_region_size));
343
344                 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
345                 break;
346         case 3:
347                 if (ha->optrom_state != QLA_SWRITING)
348                         break;
349
350                 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
351                         qla_printk(KERN_WARNING, ha,
352                             "HBA not online, failing flash update.\n");
353                         return -EAGAIN;
354                 }
355
356                 DEBUG2(qla_printk(KERN_INFO, ha,
357                     "Writing flash region -- 0x%x/0x%x.\n",
358                     ha->optrom_region_start, ha->optrom_region_size));
359
360                 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
361                     ha->optrom_region_start, ha->optrom_region_size);
362                 break;
363         default:
364                 count = -EINVAL;
365         }
366         return count;
367 }
368
369 static struct bin_attribute sysfs_optrom_ctl_attr = {
370         .attr = {
371                 .name = "optrom_ctl",
372                 .mode = S_IWUSR,
373         },
374         .size = 0,
375         .write = qla2x00_sysfs_write_optrom_ctl,
376 };
377
378 static ssize_t
379 qla2x00_sysfs_read_vpd(struct kobject *kobj,
380                        struct bin_attribute *bin_attr,
381                        char *buf, loff_t off, size_t count)
382 {
383         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
384             struct device, kobj)));
385         struct qla_hw_data *ha = vha->hw;
386
387         if (unlikely(pci_channel_offline(ha->pdev)))
388                 return 0;
389
390         if (!capable(CAP_SYS_ADMIN))
391                 return 0;
392
393         if (IS_NOCACHE_VPD_TYPE(ha))
394                 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
395                     ha->vpd_size);
396         return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
397 }
398
399 static ssize_t
400 qla2x00_sysfs_write_vpd(struct kobject *kobj,
401                         struct bin_attribute *bin_attr,
402                         char *buf, loff_t off, size_t count)
403 {
404         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
405             struct device, kobj)));
406         struct qla_hw_data *ha = vha->hw;
407         uint8_t *tmp_data;
408
409         if (unlikely(pci_channel_offline(ha->pdev)))
410                 return 0;
411
412         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
413             !ha->isp_ops->write_nvram)
414                 return 0;
415
416         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
417                 qla_printk(KERN_WARNING, ha,
418                     "HBA not online, failing VPD update.\n");
419                 return -EAGAIN;
420         }
421
422         /* Write NVRAM. */
423         ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
424         ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
425
426         /* Update flash version information for 4Gb & above. */
427         if (!IS_FWI2_CAPABLE(ha))
428                 goto done;
429
430         tmp_data = vmalloc(256);
431         if (!tmp_data) {
432                 qla_printk(KERN_WARNING, ha,
433                     "Unable to allocate memory for VPD information update.\n");
434                 goto done;
435         }
436         ha->isp_ops->get_flash_version(vha, tmp_data);
437         vfree(tmp_data);
438 done:
439         return count;
440 }
441
442 static struct bin_attribute sysfs_vpd_attr = {
443         .attr = {
444                 .name = "vpd",
445                 .mode = S_IRUSR | S_IWUSR,
446         },
447         .size = 0,
448         .read = qla2x00_sysfs_read_vpd,
449         .write = qla2x00_sysfs_write_vpd,
450 };
451
452 static ssize_t
453 qla2x00_sysfs_read_sfp(struct kobject *kobj,
454                        struct bin_attribute *bin_attr,
455                        char *buf, loff_t off, size_t count)
456 {
457         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
458             struct device, kobj)));
459         struct qla_hw_data *ha = vha->hw;
460         uint16_t iter, addr, offset;
461         int rval;
462
463         if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
464                 return 0;
465
466         if (ha->sfp_data)
467                 goto do_read;
468
469         ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
470             &ha->sfp_data_dma);
471         if (!ha->sfp_data) {
472                 qla_printk(KERN_WARNING, ha,
473                     "Unable to allocate memory for SFP read-data.\n");
474                 return 0;
475         }
476
477 do_read:
478         memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
479         addr = 0xa0;
480         for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
481             iter++, offset += SFP_BLOCK_SIZE) {
482                 if (iter == 4) {
483                         /* Skip to next device address. */
484                         addr = 0xa2;
485                         offset = 0;
486                 }
487
488                 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
489                     SFP_BLOCK_SIZE);
490                 if (rval != QLA_SUCCESS) {
491                         qla_printk(KERN_WARNING, ha,
492                             "Unable to read SFP data (%x/%x/%x).\n", rval,
493                             addr, offset);
494                         count = 0;
495                         break;
496                 }
497                 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
498                 buf += SFP_BLOCK_SIZE;
499         }
500
501         return count;
502 }
503
504 static struct bin_attribute sysfs_sfp_attr = {
505         .attr = {
506                 .name = "sfp",
507                 .mode = S_IRUSR | S_IWUSR,
508         },
509         .size = SFP_DEV_SIZE * 2,
510         .read = qla2x00_sysfs_read_sfp,
511 };
512
513 static ssize_t
514 qla2x00_sysfs_write_reset(struct kobject *kobj,
515                         struct bin_attribute *bin_attr,
516                         char *buf, loff_t off, size_t count)
517 {
518         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
519             struct device, kobj)));
520         struct qla_hw_data *ha = vha->hw;
521         int type;
522
523         if (off != 0)
524                 return 0;
525
526         type = simple_strtol(buf, NULL, 10);
527         switch (type) {
528         case 0x2025c:
529                 qla_printk(KERN_INFO, ha,
530                     "Issuing ISP reset on (%ld).\n", vha->host_no);
531
532                 scsi_block_requests(vha->host);
533                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
534                 qla2xxx_wake_dpc(vha);
535                 qla2x00_wait_for_chip_reset(vha);
536                 scsi_unblock_requests(vha->host);
537                 break;
538         case 0x2025d:
539                 if (!IS_QLA81XX(ha))
540                         break;
541
542                 qla_printk(KERN_INFO, ha,
543                     "Issuing MPI reset on (%ld).\n", vha->host_no);
544
545                 /* Make sure FC side is not in reset */
546                 qla2x00_wait_for_hba_online(vha);
547
548                 /* Issue MPI reset */
549                 scsi_block_requests(vha->host);
550                 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
551                         qla_printk(KERN_WARNING, ha,
552                             "MPI reset failed on (%ld).\n", vha->host_no);
553                 scsi_unblock_requests(vha->host);
554                 break;
555         }
556         return count;
557 }
558
559 static struct bin_attribute sysfs_reset_attr = {
560         .attr = {
561                 .name = "reset",
562                 .mode = S_IWUSR,
563         },
564         .size = 0,
565         .write = qla2x00_sysfs_write_reset,
566 };
567
568 static ssize_t
569 qla2x00_sysfs_write_edc(struct kobject *kobj,
570                         struct bin_attribute *bin_attr,
571                         char *buf, loff_t off, size_t count)
572 {
573         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
574             struct device, kobj)));
575         struct qla_hw_data *ha = vha->hw;
576         uint16_t dev, adr, opt, len;
577         int rval;
578
579         ha->edc_data_len = 0;
580
581         if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
582                 return 0;
583
584         if (!ha->edc_data) {
585                 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
586                     &ha->edc_data_dma);
587                 if (!ha->edc_data) {
588                         DEBUG2(qla_printk(KERN_INFO, ha,
589                             "Unable to allocate memory for EDC write.\n"));
590                         return 0;
591                 }
592         }
593
594         dev = le16_to_cpup((void *)&buf[0]);
595         adr = le16_to_cpup((void *)&buf[2]);
596         opt = le16_to_cpup((void *)&buf[4]);
597         len = le16_to_cpup((void *)&buf[6]);
598
599         if (!(opt & BIT_0))
600                 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
601                         return -EINVAL;
602
603         memcpy(ha->edc_data, &buf[8], len);
604
605         rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
606             ha->edc_data, len, opt);
607         if (rval != QLA_SUCCESS) {
608                 DEBUG2(qla_printk(KERN_INFO, ha,
609                     "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
610                     rval, dev, adr, opt, len, *buf));
611                 return 0;
612         }
613
614         return count;
615 }
616
617 static struct bin_attribute sysfs_edc_attr = {
618         .attr = {
619                 .name = "edc",
620                 .mode = S_IWUSR,
621         },
622         .size = 0,
623         .write = qla2x00_sysfs_write_edc,
624 };
625
626 static ssize_t
627 qla2x00_sysfs_write_edc_status(struct kobject *kobj,
628                         struct bin_attribute *bin_attr,
629                         char *buf, loff_t off, size_t count)
630 {
631         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
632             struct device, kobj)));
633         struct qla_hw_data *ha = vha->hw;
634         uint16_t dev, adr, opt, len;
635         int rval;
636
637         ha->edc_data_len = 0;
638
639         if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
640                 return 0;
641
642         if (!ha->edc_data) {
643                 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
644                     &ha->edc_data_dma);
645                 if (!ha->edc_data) {
646                         DEBUG2(qla_printk(KERN_INFO, ha,
647                             "Unable to allocate memory for EDC status.\n"));
648                         return 0;
649                 }
650         }
651
652         dev = le16_to_cpup((void *)&buf[0]);
653         adr = le16_to_cpup((void *)&buf[2]);
654         opt = le16_to_cpup((void *)&buf[4]);
655         len = le16_to_cpup((void *)&buf[6]);
656
657         if (!(opt & BIT_0))
658                 if (len == 0 || len > DMA_POOL_SIZE)
659                         return -EINVAL;
660
661         memset(ha->edc_data, 0, len);
662         rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
663             ha->edc_data, len, opt);
664         if (rval != QLA_SUCCESS) {
665                 DEBUG2(qla_printk(KERN_INFO, ha,
666                     "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
667                     rval, dev, adr, opt, len));
668                 return 0;
669         }
670
671         ha->edc_data_len = len;
672
673         return count;
674 }
675
676 static ssize_t
677 qla2x00_sysfs_read_edc_status(struct kobject *kobj,
678                            struct bin_attribute *bin_attr,
679                            char *buf, loff_t off, size_t count)
680 {
681         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
682             struct device, kobj)));
683         struct qla_hw_data *ha = vha->hw;
684
685         if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
686                 return 0;
687
688         if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
689                 return -EINVAL;
690
691         memcpy(buf, ha->edc_data, ha->edc_data_len);
692
693         return ha->edc_data_len;
694 }
695
696 static struct bin_attribute sysfs_edc_status_attr = {
697         .attr = {
698                 .name = "edc_status",
699                 .mode = S_IRUSR | S_IWUSR,
700         },
701         .size = 0,
702         .write = qla2x00_sysfs_write_edc_status,
703         .read = qla2x00_sysfs_read_edc_status,
704 };
705
706 static ssize_t
707 qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
708                        struct bin_attribute *bin_attr,
709                        char *buf, loff_t off, size_t count)
710 {
711         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
712             struct device, kobj)));
713         struct qla_hw_data *ha = vha->hw;
714         int rval;
715         uint16_t actual_size;
716
717         if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
718                 return 0;
719
720         if (ha->xgmac_data)
721                 goto do_read;
722
723         ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
724             &ha->xgmac_data_dma, GFP_KERNEL);
725         if (!ha->xgmac_data) {
726                 qla_printk(KERN_WARNING, ha,
727                     "Unable to allocate memory for XGMAC read-data.\n");
728                 return 0;
729         }
730
731 do_read:
732         actual_size = 0;
733         memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
734
735         rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
736             XGMAC_DATA_SIZE, &actual_size);
737         if (rval != QLA_SUCCESS) {
738                 qla_printk(KERN_WARNING, ha,
739                     "Unable to read XGMAC data (%x).\n", rval);
740                 count = 0;
741         }
742
743         count = actual_size > count ? count: actual_size;
744         memcpy(buf, ha->xgmac_data, count);
745
746         return count;
747 }
748
749 static struct bin_attribute sysfs_xgmac_stats_attr = {
750         .attr = {
751                 .name = "xgmac_stats",
752                 .mode = S_IRUSR,
753         },
754         .size = 0,
755         .read = qla2x00_sysfs_read_xgmac_stats,
756 };
757
758 static ssize_t
759 qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
760                        struct bin_attribute *bin_attr,
761                        char *buf, loff_t off, size_t count)
762 {
763         struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
764             struct device, kobj)));
765         struct qla_hw_data *ha = vha->hw;
766         int rval;
767         uint16_t actual_size;
768
769         if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
770                 return 0;
771
772         if (ha->dcbx_tlv)
773                 goto do_read;
774
775         ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
776             &ha->dcbx_tlv_dma, GFP_KERNEL);
777         if (!ha->dcbx_tlv) {
778                 qla_printk(KERN_WARNING, ha,
779                     "Unable to allocate memory for DCBX TLV read-data.\n");
780                 return 0;
781         }
782
783 do_read:
784         actual_size = 0;
785         memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
786
787         rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
788             DCBX_TLV_DATA_SIZE);
789         if (rval != QLA_SUCCESS) {
790                 qla_printk(KERN_WARNING, ha,
791                     "Unable to read DCBX TLV data (%x).\n", rval);
792                 count = 0;
793         }
794
795         memcpy(buf, ha->dcbx_tlv, count);
796
797         return count;
798 }
799
800 static struct bin_attribute sysfs_dcbx_tlv_attr = {
801         .attr = {
802                 .name = "dcbx_tlv",
803                 .mode = S_IRUSR,
804         },
805         .size = 0,
806         .read = qla2x00_sysfs_read_dcbx_tlv,
807 };
808
809 static struct sysfs_entry {
810         char *name;
811         struct bin_attribute *attr;
812         int is4GBp_only;
813 } bin_file_entries[] = {
814         { "fw_dump", &sysfs_fw_dump_attr, },
815         { "nvram", &sysfs_nvram_attr, },
816         { "optrom", &sysfs_optrom_attr, },
817         { "optrom_ctl", &sysfs_optrom_ctl_attr, },
818         { "vpd", &sysfs_vpd_attr, 1 },
819         { "sfp", &sysfs_sfp_attr, 1 },
820         { "reset", &sysfs_reset_attr, },
821         { "edc", &sysfs_edc_attr, 2 },
822         { "edc_status", &sysfs_edc_status_attr, 2 },
823         { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
824         { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
825         { NULL },
826 };
827
828 void
829 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
830 {
831         struct Scsi_Host *host = vha->host;
832         struct sysfs_entry *iter;
833         int ret;
834
835         for (iter = bin_file_entries; iter->name; iter++) {
836                 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
837                         continue;
838                 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
839                         continue;
840                 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
841                         continue;
842
843                 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
844                     iter->attr);
845                 if (ret)
846                         qla_printk(KERN_INFO, vha->hw,
847                             "Unable to create sysfs %s binary attribute "
848                             "(%d).\n", iter->name, ret);
849         }
850 }
851
852 void
853 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
854 {
855         struct Scsi_Host *host = vha->host;
856         struct sysfs_entry *iter;
857         struct qla_hw_data *ha = vha->hw;
858
859         for (iter = bin_file_entries; iter->name; iter++) {
860                 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
861                         continue;
862                 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
863                         continue;
864                 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
865                         continue;
866
867                 sysfs_remove_bin_file(&host->shost_gendev.kobj,
868                     iter->attr);
869         }
870
871         if (ha->beacon_blink_led == 1)
872                 ha->isp_ops->beacon_off(vha);
873 }
874
875 /* Scsi_Host attributes. */
876
877 static ssize_t
878 qla2x00_drvr_version_show(struct device *dev,
879                           struct device_attribute *attr, char *buf)
880 {
881         return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
882 }
883
884 static ssize_t
885 qla2x00_fw_version_show(struct device *dev,
886                         struct device_attribute *attr, char *buf)
887 {
888         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
889         struct qla_hw_data *ha = vha->hw;
890         char fw_str[128];
891
892         return snprintf(buf, PAGE_SIZE, "%s\n",
893             ha->isp_ops->fw_version_str(vha, fw_str));
894 }
895
896 static ssize_t
897 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
898                         char *buf)
899 {
900         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
901         struct qla_hw_data *ha = vha->hw;
902         uint32_t sn;
903
904         if (IS_FWI2_CAPABLE(ha)) {
905                 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
906                 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
907         }
908
909         sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
910         return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
911             sn % 100000);
912 }
913
914 static ssize_t
915 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
916                       char *buf)
917 {
918         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
919         return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
920 }
921
922 static ssize_t
923 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
924                     char *buf)
925 {
926         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
927         struct qla_hw_data *ha = vha->hw;
928         return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
929             ha->product_id[0], ha->product_id[1], ha->product_id[2],
930             ha->product_id[3]);
931 }
932
933 static ssize_t
934 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
935                         char *buf)
936 {
937         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
938         return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
939 }
940
941 static ssize_t
942 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
943                         char *buf)
944 {
945         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
946         return snprintf(buf, PAGE_SIZE, "%s\n",
947             vha->hw->model_desc ? vha->hw->model_desc : "");
948 }
949
950 static ssize_t
951 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
952                       char *buf)
953 {
954         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
955         char pci_info[30];
956
957         return snprintf(buf, PAGE_SIZE, "%s\n",
958             vha->hw->isp_ops->pci_info_str(vha, pci_info));
959 }
960
961 static ssize_t
962 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
963                         char *buf)
964 {
965         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
966         struct qla_hw_data *ha = vha->hw;
967         int len = 0;
968
969         if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
970             atomic_read(&vha->loop_state) == LOOP_DEAD)
971                 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
972         else if (atomic_read(&vha->loop_state) != LOOP_READY ||
973             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
974             test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
975                 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
976         else {
977                 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
978
979                 switch (ha->current_topology) {
980                 case ISP_CFG_NL:
981                         len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
982                         break;
983                 case ISP_CFG_FL:
984                         len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
985                         break;
986                 case ISP_CFG_N:
987                         len += snprintf(buf + len, PAGE_SIZE-len,
988                             "N_Port to N_Port\n");
989                         break;
990                 case ISP_CFG_F:
991                         len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
992                         break;
993                 default:
994                         len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
995                         break;
996                 }
997         }
998         return len;
999 }
1000
1001 static ssize_t
1002 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1003                  char *buf)
1004 {
1005         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1006         int len = 0;
1007
1008         switch (vha->hw->zio_mode) {
1009         case QLA_ZIO_MODE_6:
1010                 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1011                 break;
1012         case QLA_ZIO_DISABLED:
1013                 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1014                 break;
1015         }
1016         return len;
1017 }
1018
1019 static ssize_t
1020 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1021                   const char *buf, size_t count)
1022 {
1023         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1024         struct qla_hw_data *ha = vha->hw;
1025         int val = 0;
1026         uint16_t zio_mode;
1027
1028         if (!IS_ZIO_SUPPORTED(ha))
1029                 return -ENOTSUPP;
1030
1031         if (sscanf(buf, "%d", &val) != 1)
1032                 return -EINVAL;
1033
1034         if (val)
1035                 zio_mode = QLA_ZIO_MODE_6;
1036         else
1037                 zio_mode = QLA_ZIO_DISABLED;
1038
1039         /* Update per-hba values and queue a reset. */
1040         if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1041                 ha->zio_mode = zio_mode;
1042                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1043         }
1044         return strlen(buf);
1045 }
1046
1047 static ssize_t
1048 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1049                        char *buf)
1050 {
1051         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1052
1053         return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1054 }
1055
1056 static ssize_t
1057 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1058                         const char *buf, size_t count)
1059 {
1060         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1061         int val = 0;
1062         uint16_t zio_timer;
1063
1064         if (sscanf(buf, "%d", &val) != 1)
1065                 return -EINVAL;
1066         if (val > 25500 || val < 100)
1067                 return -ERANGE;
1068
1069         zio_timer = (uint16_t)(val / 100);
1070         vha->hw->zio_timer = zio_timer;
1071
1072         return strlen(buf);
1073 }
1074
1075 static ssize_t
1076 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1077                     char *buf)
1078 {
1079         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1080         int len = 0;
1081
1082         if (vha->hw->beacon_blink_led)
1083                 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1084         else
1085                 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1086         return len;
1087 }
1088
1089 static ssize_t
1090 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1091                      const char *buf, size_t count)
1092 {
1093         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1094         struct qla_hw_data *ha = vha->hw;
1095         int val = 0;
1096         int rval;
1097
1098         if (IS_QLA2100(ha) || IS_QLA2200(ha))
1099                 return -EPERM;
1100
1101         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1102                 qla_printk(KERN_WARNING, ha,
1103                     "Abort ISP active -- ignoring beacon request.\n");
1104                 return -EBUSY;
1105         }
1106
1107         if (sscanf(buf, "%d", &val) != 1)
1108                 return -EINVAL;
1109
1110         if (val)
1111                 rval = ha->isp_ops->beacon_on(vha);
1112         else
1113                 rval = ha->isp_ops->beacon_off(vha);
1114
1115         if (rval != QLA_SUCCESS)
1116                 count = 0;
1117
1118         return count;
1119 }
1120
1121 static ssize_t
1122 qla2x00_optrom_bios_version_show(struct device *dev,
1123                                  struct device_attribute *attr, char *buf)
1124 {
1125         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1126         struct qla_hw_data *ha = vha->hw;
1127         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1128             ha->bios_revision[0]);
1129 }
1130
1131 static ssize_t
1132 qla2x00_optrom_efi_version_show(struct device *dev,
1133                                 struct device_attribute *attr, char *buf)
1134 {
1135         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1136         struct qla_hw_data *ha = vha->hw;
1137         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1138             ha->efi_revision[0]);
1139 }
1140
1141 static ssize_t
1142 qla2x00_optrom_fcode_version_show(struct device *dev,
1143                                   struct device_attribute *attr, char *buf)
1144 {
1145         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1146         struct qla_hw_data *ha = vha->hw;
1147         return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1148             ha->fcode_revision[0]);
1149 }
1150
1151 static ssize_t
1152 qla2x00_optrom_fw_version_show(struct device *dev,
1153                                struct device_attribute *attr, char *buf)
1154 {
1155         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1156         struct qla_hw_data *ha = vha->hw;
1157         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1158             ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1159             ha->fw_revision[3]);
1160 }
1161
1162 static ssize_t
1163 qla2x00_total_isp_aborts_show(struct device *dev,
1164                               struct device_attribute *attr, char *buf)
1165 {
1166         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1167         struct qla_hw_data *ha = vha->hw;
1168         return snprintf(buf, PAGE_SIZE, "%d\n",
1169             ha->qla_stats.total_isp_aborts);
1170 }
1171
1172 static ssize_t
1173 qla24xx_84xx_fw_version_show(struct device *dev,
1174         struct device_attribute *attr, char *buf)
1175 {
1176         int rval = QLA_SUCCESS;
1177         uint16_t status[2] = {0, 0};
1178         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1179         struct qla_hw_data *ha = vha->hw;
1180
1181         if (IS_QLA84XX(ha) && ha->cs84xx) {
1182                 if (ha->cs84xx->op_fw_version == 0) {
1183                         rval = qla84xx_verify_chip(vha, status);
1184         }
1185
1186         if ((rval == QLA_SUCCESS) && (status[0] == 0))
1187                 return snprintf(buf, PAGE_SIZE, "%u\n",
1188                         (uint32_t)ha->cs84xx->op_fw_version);
1189         }
1190
1191         return snprintf(buf, PAGE_SIZE, "\n");
1192 }
1193
1194 static ssize_t
1195 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1196     char *buf)
1197 {
1198         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1199         struct qla_hw_data *ha = vha->hw;
1200
1201         if (!IS_QLA81XX(ha))
1202                 return snprintf(buf, PAGE_SIZE, "\n");
1203
1204         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1205             ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1206             ha->mpi_capabilities);
1207 }
1208
1209 static ssize_t
1210 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1211     char *buf)
1212 {
1213         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1214         struct qla_hw_data *ha = vha->hw;
1215
1216         if (!IS_QLA81XX(ha))
1217                 return snprintf(buf, PAGE_SIZE, "\n");
1218
1219         return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1220             ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1221 }
1222
1223 static ssize_t
1224 qla2x00_flash_block_size_show(struct device *dev,
1225                               struct device_attribute *attr, char *buf)
1226 {
1227         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1228         struct qla_hw_data *ha = vha->hw;
1229
1230         return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1231 }
1232
1233 static ssize_t
1234 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1235     char *buf)
1236 {
1237         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1238
1239         if (!IS_QLA81XX(vha->hw))
1240                 return snprintf(buf, PAGE_SIZE, "\n");
1241
1242         return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1243 }
1244
1245 static ssize_t
1246 qla2x00_vn_port_mac_address_show(struct device *dev,
1247     struct device_attribute *attr, char *buf)
1248 {
1249         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1250
1251         if (!IS_QLA81XX(vha->hw))
1252                 return snprintf(buf, PAGE_SIZE, "\n");
1253
1254         return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1255             vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1256             vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1257             vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1258 }
1259
1260 static ssize_t
1261 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1262     char *buf)
1263 {
1264         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1265
1266         return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1267 }
1268
1269 static ssize_t
1270 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1271     char *buf)
1272 {
1273         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1274         int rval = QLA_FUNCTION_FAILED;
1275         uint16_t state[5];
1276
1277         if (!vha->hw->flags.eeh_busy)
1278                 rval = qla2x00_get_firmware_state(vha, state);
1279         if (rval != QLA_SUCCESS)
1280                 memset(state, -1, sizeof(state));
1281
1282         return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1283             state[1], state[2], state[3], state[4]);
1284 }
1285
1286 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1287 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1288 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1289 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1290 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1291 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1292 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1293 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1294 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1295 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1296 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1297                    qla2x00_zio_timer_store);
1298 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1299                    qla2x00_beacon_store);
1300 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1301                    qla2x00_optrom_bios_version_show, NULL);
1302 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1303                    qla2x00_optrom_efi_version_show, NULL);
1304 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1305                    qla2x00_optrom_fcode_version_show, NULL);
1306 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1307                    NULL);
1308 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1309                    NULL);
1310 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1311                    NULL);
1312 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1313 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1314 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1315                    NULL);
1316 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1317 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1318                    qla2x00_vn_port_mac_address_show, NULL);
1319 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1320 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1321
1322 struct device_attribute *qla2x00_host_attrs[] = {
1323         &dev_attr_driver_version,
1324         &dev_attr_fw_version,
1325         &dev_attr_serial_num,
1326         &dev_attr_isp_name,
1327         &dev_attr_isp_id,
1328         &dev_attr_model_name,
1329         &dev_attr_model_desc,
1330         &dev_attr_pci_info,
1331         &dev_attr_link_state,
1332         &dev_attr_zio,
1333         &dev_attr_zio_timer,
1334         &dev_attr_beacon,
1335         &dev_attr_optrom_bios_version,
1336         &dev_attr_optrom_efi_version,
1337         &dev_attr_optrom_fcode_version,
1338         &dev_attr_optrom_fw_version,
1339         &dev_attr_84xx_fw_version,
1340         &dev_attr_total_isp_aborts,
1341         &dev_attr_mpi_version,
1342         &dev_attr_phy_version,
1343         &dev_attr_flash_block_size,
1344         &dev_attr_vlan_id,
1345         &dev_attr_vn_port_mac_address,
1346         &dev_attr_fabric_param,
1347         &dev_attr_fw_state,
1348         NULL,
1349 };
1350
1351 /* Host attributes. */
1352
1353 static void
1354 qla2x00_get_host_port_id(struct Scsi_Host *shost)
1355 {
1356         scsi_qla_host_t *vha = shost_priv(shost);
1357
1358         fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1359             vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1360 }
1361
1362 static void
1363 qla2x00_get_host_speed(struct Scsi_Host *shost)
1364 {
1365         struct qla_hw_data *ha = ((struct scsi_qla_host *)
1366                                         (shost_priv(shost)))->hw;
1367         u32 speed = FC_PORTSPEED_UNKNOWN;
1368
1369         switch (ha->link_data_rate) {
1370         case PORT_SPEED_1GB:
1371                 speed = FC_PORTSPEED_1GBIT;
1372                 break;
1373         case PORT_SPEED_2GB:
1374                 speed = FC_PORTSPEED_2GBIT;
1375                 break;
1376         case PORT_SPEED_4GB:
1377                 speed = FC_PORTSPEED_4GBIT;
1378                 break;
1379         case PORT_SPEED_8GB:
1380                 speed = FC_PORTSPEED_8GBIT;
1381                 break;
1382         case PORT_SPEED_10GB:
1383                 speed = FC_PORTSPEED_10GBIT;
1384                 break;
1385         }
1386         fc_host_speed(shost) = speed;
1387 }
1388
1389 static void
1390 qla2x00_get_host_port_type(struct Scsi_Host *shost)
1391 {
1392         scsi_qla_host_t *vha = shost_priv(shost);
1393         uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1394
1395         if (vha->vp_idx) {
1396                 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1397                 return;
1398         }
1399         switch (vha->hw->current_topology) {
1400         case ISP_CFG_NL:
1401                 port_type = FC_PORTTYPE_LPORT;
1402                 break;
1403         case ISP_CFG_FL:
1404                 port_type = FC_PORTTYPE_NLPORT;
1405                 break;
1406         case ISP_CFG_N:
1407                 port_type = FC_PORTTYPE_PTP;
1408                 break;
1409         case ISP_CFG_F:
1410                 port_type = FC_PORTTYPE_NPORT;
1411                 break;
1412         }
1413         fc_host_port_type(shost) = port_type;
1414 }
1415
1416 static void
1417 qla2x00_get_starget_node_name(struct scsi_target *starget)
1418 {
1419         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1420         scsi_qla_host_t *vha = shost_priv(host);
1421         fc_port_t *fcport;
1422         u64 node_name = 0;
1423
1424         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1425                 if (fcport->rport &&
1426                     starget->id == fcport->rport->scsi_target_id) {
1427                         node_name = wwn_to_u64(fcport->node_name);
1428                         break;
1429                 }
1430         }
1431
1432         fc_starget_node_name(starget) = node_name;
1433 }
1434
1435 static void
1436 qla2x00_get_starget_port_name(struct scsi_target *starget)
1437 {
1438         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1439         scsi_qla_host_t *vha = shost_priv(host);
1440         fc_port_t *fcport;
1441         u64 port_name = 0;
1442
1443         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1444                 if (fcport->rport &&
1445                     starget->id == fcport->rport->scsi_target_id) {
1446                         port_name = wwn_to_u64(fcport->port_name);
1447                         break;
1448                 }
1449         }
1450
1451         fc_starget_port_name(starget) = port_name;
1452 }
1453
1454 static void
1455 qla2x00_get_starget_port_id(struct scsi_target *starget)
1456 {
1457         struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1458         scsi_qla_host_t *vha = shost_priv(host);
1459         fc_port_t *fcport;
1460         uint32_t port_id = ~0U;
1461
1462         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1463                 if (fcport->rport &&
1464                     starget->id == fcport->rport->scsi_target_id) {
1465                         port_id = fcport->d_id.b.domain << 16 |
1466                             fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1467                         break;
1468                 }
1469         }
1470
1471         fc_starget_port_id(starget) = port_id;
1472 }
1473
1474 static void
1475 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1476 {
1477         if (timeout)
1478                 rport->dev_loss_tmo = timeout;
1479         else
1480                 rport->dev_loss_tmo = 1;
1481 }
1482
1483 static void
1484 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1485 {
1486         struct Scsi_Host *host = rport_to_shost(rport);
1487         fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1488
1489         if (!fcport)
1490                 return;
1491
1492         if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1493                 return;
1494
1495         if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1496                 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1497                 return;
1498         }
1499
1500         /*
1501          * Transport has effectively 'deleted' the rport, clear
1502          * all local references.
1503          */
1504         spin_lock_irq(host->host_lock);
1505         fcport->rport = NULL;
1506         *((fc_port_t **)rport->dd_data) = NULL;
1507         spin_unlock_irq(host->host_lock);
1508 }
1509
1510 static void
1511 qla2x00_terminate_rport_io(struct fc_rport *rport)
1512 {
1513         fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1514
1515         if (!fcport)
1516                 return;
1517
1518         if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1519                 return;
1520
1521         if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1522                 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1523                 return;
1524         }
1525         /*
1526          * At this point all fcport's software-states are cleared.  Perform any
1527          * final cleanup of firmware resources (PCBs and XCBs).
1528          */
1529         if (fcport->loop_id != FC_NO_LOOP_ID &&
1530             !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1531                 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1532                         fcport->loop_id, fcport->d_id.b.domain,
1533                         fcport->d_id.b.area, fcport->d_id.b.al_pa);
1534
1535         qla2x00_abort_fcport_cmds(fcport);
1536 }
1537
1538 static int
1539 qla2x00_issue_lip(struct Scsi_Host *shost)
1540 {
1541         scsi_qla_host_t *vha = shost_priv(shost);
1542
1543         qla2x00_loop_reset(vha);
1544         return 0;
1545 }
1546
1547 static struct fc_host_statistics *
1548 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1549 {
1550         scsi_qla_host_t *vha = shost_priv(shost);
1551         struct qla_hw_data *ha = vha->hw;
1552         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1553         int rval;
1554         struct link_statistics *stats;
1555         dma_addr_t stats_dma;
1556         struct fc_host_statistics *pfc_host_stat;
1557
1558         pfc_host_stat = &ha->fc_host_stat;
1559         memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1560
1561         if (test_bit(UNLOADING, &vha->dpc_flags))
1562                 goto done;
1563
1564         if (unlikely(pci_channel_offline(ha->pdev)))
1565                 goto done;
1566
1567         stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1568         if (stats == NULL) {
1569                 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
1570                     __func__, base_vha->host_no));
1571                 goto done;
1572         }
1573         memset(stats, 0, DMA_POOL_SIZE);
1574
1575         rval = QLA_FUNCTION_FAILED;
1576         if (IS_FWI2_CAPABLE(ha)) {
1577                 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1578         } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1579                     !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1580                     !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1581                     !ha->dpc_active) {
1582                 /* Must be in a 'READY' state for statistics retrieval. */
1583                 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1584                                                 stats, stats_dma);
1585         }
1586
1587         if (rval != QLA_SUCCESS)
1588                 goto done_free;
1589
1590         pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1591         pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1592         pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1593         pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1594         pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1595         pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1596         if (IS_FWI2_CAPABLE(ha)) {
1597                 pfc_host_stat->lip_count = stats->lip_cnt;
1598                 pfc_host_stat->tx_frames = stats->tx_frames;
1599                 pfc_host_stat->rx_frames = stats->rx_frames;
1600                 pfc_host_stat->dumped_frames = stats->dumped_frames;
1601                 pfc_host_stat->nos_count = stats->nos_rcvd;
1602         }
1603         pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1604         pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1605
1606 done_free:
1607         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1608 done:
1609         return pfc_host_stat;
1610 }
1611
1612 static void
1613 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1614 {
1615         scsi_qla_host_t *vha = shost_priv(shost);
1616
1617         qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1618 }
1619
1620 static void
1621 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1622 {
1623         scsi_qla_host_t *vha = shost_priv(shost);
1624
1625         set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1626 }
1627
1628 static void
1629 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1630 {
1631         scsi_qla_host_t *vha = shost_priv(shost);
1632         u64 node_name;
1633
1634         if (vha->device_flags & SWITCH_FOUND)
1635                 node_name = wwn_to_u64(vha->fabric_node_name);
1636         else
1637                 node_name = wwn_to_u64(vha->node_name);
1638
1639         fc_host_fabric_name(shost) = node_name;
1640 }
1641
1642 static void
1643 qla2x00_get_host_port_state(struct Scsi_Host *shost)
1644 {
1645         scsi_qla_host_t *vha = shost_priv(shost);
1646         struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1647
1648         if (!base_vha->flags.online)
1649                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1650         else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1651                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1652         else
1653                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1654 }
1655
1656 static int
1657 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1658 {
1659         int     ret = 0;
1660         uint8_t qos = 0;
1661         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1662         scsi_qla_host_t *vha = NULL;
1663         struct qla_hw_data *ha = base_vha->hw;
1664         uint16_t options = 0;
1665         int     cnt;
1666         struct req_que *req = ha->req_q_map[0];
1667
1668         ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1669         if (ret) {
1670                 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
1671                     "status %x\n", ret));
1672                 return (ret);
1673         }
1674
1675         vha = qla24xx_create_vhost(fc_vport);
1676         if (vha == NULL) {
1677                 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
1678                     vha));
1679                 return FC_VPORT_FAILED;
1680         }
1681         if (disable) {
1682                 atomic_set(&vha->vp_state, VP_OFFLINE);
1683                 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1684         } else
1685                 atomic_set(&vha->vp_state, VP_FAILED);
1686
1687         /* ready to create vport */
1688         qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1689                                                         vha->vp_idx);
1690
1691         /* initialized vport states */
1692         atomic_set(&vha->loop_state, LOOP_DOWN);
1693         vha->vp_err_state=  VP_ERR_PORTDWN;
1694         vha->vp_prev_err_state=  VP_ERR_UNKWN;
1695         /* Check if physical ha port is Up */
1696         if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1697             atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1698                 /* Don't retry or attempt login of this virtual port */
1699                 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1700                     base_vha->host_no));
1701                 atomic_set(&vha->loop_state, LOOP_DEAD);
1702                 if (!disable)
1703                         fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1704         }
1705
1706         if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1707                                    &ha->pdev->dev)) {
1708                 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1709                         vha->host_no, vha->vp_idx));
1710                 goto vport_create_failed_2;
1711         }
1712
1713         /* initialize attributes */
1714         fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1715         fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1716         fc_host_supported_classes(vha->host) =
1717                 fc_host_supported_classes(base_vha->host);
1718         fc_host_supported_speeds(vha->host) =
1719                 fc_host_supported_speeds(base_vha->host);
1720
1721         qla24xx_vport_disable(fc_vport, disable);
1722
1723         if (ha->flags.cpu_affinity_enabled) {
1724                 req = ha->req_q_map[1];
1725                 goto vport_queue;
1726         } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1727                 goto vport_queue;
1728         /* Create a request queue in QoS mode for the vport */
1729         for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1730                 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1731                         && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1732                                         8) == 0) {
1733                         qos = ha->npiv_info[cnt].q_qos;
1734                         break;
1735                 }
1736         }
1737         if (qos) {
1738                 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1739                         qos);
1740                 if (!ret)
1741                         qla_printk(KERN_WARNING, ha,
1742                         "Can't create request queue for vp_idx:%d\n",
1743                         vha->vp_idx);
1744                 else {
1745                         DEBUG2(qla_printk(KERN_INFO, ha,
1746                         "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1747                         ret, qos, vha->vp_idx));
1748                         req = ha->req_q_map[ret];
1749                 }
1750         }
1751
1752 vport_queue:
1753         vha->req = req;
1754         return 0;
1755
1756 vport_create_failed_2:
1757         qla24xx_disable_vp(vha);
1758         qla24xx_deallocate_vp_id(vha);
1759         scsi_host_put(vha->host);
1760         return FC_VPORT_FAILED;
1761 }
1762
1763 static int
1764 qla24xx_vport_delete(struct fc_vport *fc_vport)
1765 {
1766         scsi_qla_host_t *vha = fc_vport->dd_data;
1767         fc_port_t *fcport, *tfcport;
1768         struct qla_hw_data *ha = vha->hw;
1769         uint16_t id = vha->vp_idx;
1770
1771         while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1772             test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1773                 msleep(1000);
1774
1775         qla24xx_disable_vp(vha);
1776
1777         fc_remove_host(vha->host);
1778
1779         scsi_remove_host(vha->host);
1780
1781         list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1782                 list_del(&fcport->list);
1783                 kfree(fcport);
1784                 fcport = NULL;
1785         }
1786
1787         qla24xx_deallocate_vp_id(vha);
1788
1789         mutex_lock(&ha->vport_lock);
1790         ha->cur_vport_count--;
1791         clear_bit(vha->vp_idx, ha->vp_idx_map);
1792         mutex_unlock(&ha->vport_lock);
1793
1794         if (vha->timer_active) {
1795                 qla2x00_vp_stop_timer(vha);
1796                 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1797                     "has stopped\n",
1798                     vha->host_no, vha->vp_idx, vha));
1799         }
1800
1801         if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1802                 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1803                         qla_printk(KERN_WARNING, ha,
1804                                 "Queue delete failed.\n");
1805         }
1806
1807         scsi_host_put(vha->host);
1808         qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
1809         return 0;
1810 }
1811
1812 static int
1813 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1814 {
1815         scsi_qla_host_t *vha = fc_vport->dd_data;
1816
1817         if (disable)
1818                 qla24xx_disable_vp(vha);
1819         else
1820                 qla24xx_enable_vp(vha);
1821
1822         return 0;
1823 }
1824
1825 /* BSG support for ELS/CT pass through */
1826 inline srb_t *
1827 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1828 {
1829         srb_t *sp;
1830         struct qla_hw_data *ha = vha->hw;
1831         struct srb_bsg_ctx *ctx;
1832
1833         sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1834         if (!sp)
1835                 goto done;
1836         ctx = kzalloc(size, GFP_KERNEL);
1837         if (!ctx) {
1838                 mempool_free(sp, ha->srb_mempool);
1839                 goto done;
1840         }
1841
1842         memset(sp, 0, sizeof(*sp));
1843         sp->fcport = fcport;
1844         sp->ctx = ctx;
1845 done:
1846         return sp;
1847 }
1848
1849 static int
1850 qla2x00_process_els(struct fc_bsg_job *bsg_job)
1851 {
1852         struct fc_rport *rport;
1853         fc_port_t *fcport;
1854         struct Scsi_Host *host;
1855         scsi_qla_host_t *vha;
1856         struct qla_hw_data *ha;
1857         srb_t *sp;
1858         const char *type;
1859         int req_sg_cnt, rsp_sg_cnt;
1860         int rval =  (DRIVER_ERROR << 16);
1861         uint16_t nextlid = 0;
1862         struct srb_bsg *els;
1863
1864         /*  Multiple SG's are not supported for ELS requests */
1865         if (bsg_job->request_payload.sg_cnt > 1 ||
1866                 bsg_job->reply_payload.sg_cnt > 1) {
1867                 DEBUG2(printk(KERN_INFO
1868                     "multiple SG's are not supported for ELS requests"
1869                     " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1870                     bsg_job->request_payload.sg_cnt,
1871                     bsg_job->reply_payload.sg_cnt));
1872                 rval = -EPERM;
1873                 goto done;
1874         }
1875
1876         /* ELS request for rport */
1877         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1878                 rport = bsg_job->rport;
1879                 fcport = *(fc_port_t **) rport->dd_data;
1880                 host = rport_to_shost(rport);
1881                 vha = shost_priv(host);
1882                 ha = vha->hw;
1883                 type = "FC_BSG_RPT_ELS";
1884
1885                 DEBUG2(printk(KERN_INFO
1886                     "scsi(%ld): loop-id=%x portid=%02x%02x%02x.\n",
1887                     fcport->vha->host_no, fcport->loop_id,
1888                     fcport->d_id.b.domain, fcport->d_id.b.area,
1889                     fcport->d_id.b.al_pa));
1890
1891                 /* make sure the rport is logged in,
1892                  * if not perform fabric login
1893                  */
1894                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1895                         DEBUG2(qla_printk(KERN_WARNING, ha,
1896                             "failed to login port %06X for ELS passthru\n",
1897                             fcport->d_id.b24));
1898                         rval = -EIO;
1899                         goto done;
1900                 }
1901         } else {
1902                 host = bsg_job->shost;
1903                 vha = shost_priv(host);
1904                 ha = vha->hw;
1905                 type = "FC_BSG_HST_ELS_NOLOGIN";
1906
1907                 DEBUG2(printk(KERN_INFO
1908                     "scsi(%ld): loop-id=%x portid=%02x%02x%02x.\n",
1909                     vha->host_no, vha->loop_id,
1910                     vha->d_id.b.domain, vha->d_id.b.area, vha->d_id.b.al_pa));
1911
1912                 /* Allocate a dummy fcport structure, since functions
1913                  * preparing the IOCB and mailbox command retrieves port
1914                  * specific information from fcport structure. For Host based
1915                  * ELS commands there will be no fcport structure allocated
1916                  */
1917                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1918                 if (!fcport) {
1919                         rval = -ENOMEM;
1920                         goto done;
1921                 }
1922
1923                 /* Initialize all required  fields of fcport */
1924                 fcport->vha = vha;
1925                 fcport->vp_idx = vha->vp_idx;
1926                 fcport->d_id.b.al_pa =
1927                     bsg_job->request->rqst_data.h_els.port_id[0];
1928                 fcport->d_id.b.area =
1929                     bsg_job->request->rqst_data.h_els.port_id[1];
1930                 fcport->d_id.b.domain =
1931                     bsg_job->request->rqst_data.h_els.port_id[2];
1932                 fcport->loop_id =
1933                     (fcport->d_id.b.al_pa == 0xFD) ?
1934                     NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1935         }
1936
1937         DEBUG2(printk(KERN_INFO
1938             "scsi(%ld): vendor-id = %llu\n",
1939             vha->host_no, host->hostt->vendor_id));
1940
1941         req_sg_cnt =
1942             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1943             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1944         if (!req_sg_cnt) {
1945                 rval = -ENOMEM;
1946                 goto done_free_fcport;
1947         }
1948         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1949             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1950         if (!rsp_sg_cnt) {
1951                 rval = -ENOMEM;
1952                 goto done_free_fcport;
1953         }
1954
1955         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1956             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1957         {
1958                 DEBUG2(printk(KERN_INFO
1959                     "dma mapping resulted in different sg counts \
1960                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
1961                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1962                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1963                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1964                 rval = -EAGAIN;
1965                 goto done_unmap_sg;
1966         }
1967
1968         /* Alloc SRB structure */
1969         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1970         if (!sp) {
1971                 rval = -ENOMEM;
1972                 goto done_unmap_sg;
1973         }
1974
1975         els = sp->ctx;
1976         els->ctx.type =
1977             (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1978             SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1979         els->bsg_job = bsg_job;
1980
1981         DEBUG2(qla_printk(KERN_INFO, ha,
1982             "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1983             "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1984             bsg_job->request->rqst_data.h_els.command_code,
1985             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1986             fcport->d_id.b.al_pa));
1987
1988         rval = qla2x00_start_sp(sp);
1989         if (rval != QLA_SUCCESS) {
1990                 kfree(sp->ctx);
1991                 mempool_free(sp, ha->srb_mempool);
1992                 rval = -EIO;
1993                 goto done_unmap_sg;
1994         }
1995         return rval;
1996
1997 done_unmap_sg:
1998         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1999                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2000         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2001                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2002         goto done_free_fcport;
2003
2004 done_free_fcport:
2005         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
2006                 kfree(fcport);
2007 done:
2008         return rval;
2009 }
2010
2011 static int
2012 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2013 {
2014         srb_t *sp;
2015         struct Scsi_Host *host = bsg_job->shost;
2016         scsi_qla_host_t *vha = shost_priv(host);
2017         struct qla_hw_data *ha = vha->hw;
2018         int rval = (DRIVER_ERROR << 16);
2019         int req_sg_cnt, rsp_sg_cnt;
2020         uint16_t loop_id;
2021         struct fc_port *fcport;
2022         char  *type = "FC_BSG_HST_CT";
2023         struct srb_bsg *ct;
2024
2025         /* pass through is supported only for ISP 4Gb or higher */
2026         if (!IS_FWI2_CAPABLE(ha)) {
2027                 DEBUG2(qla_printk(KERN_INFO, ha,
2028                     "scsi(%ld):Firmware is not capable to support FC "
2029                     "CT pass thru\n", vha->host_no));
2030                 rval = -EPERM;
2031                 goto done;
2032         }
2033
2034         req_sg_cnt =
2035             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2036             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2037         if (!req_sg_cnt) {
2038                 rval = -ENOMEM;
2039                 goto done;
2040         }
2041
2042         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2043             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2044         if (!rsp_sg_cnt) {
2045                 rval = -ENOMEM;
2046                 goto done;
2047         }
2048
2049         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
2050                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2051         {
2052                 DEBUG2(qla_printk(KERN_WARNING, ha,
2053                     "dma mapping resulted in different sg counts \
2054                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
2055                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2056                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
2057                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2058                 rval = -EAGAIN;
2059                 goto done_unmap_sg;
2060         }
2061
2062         loop_id =
2063             (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2064             >> 24;
2065         switch (loop_id) {
2066                 case 0xFC:
2067                         loop_id = cpu_to_le16(NPH_SNS);
2068                         break;
2069                 case 0xFA:
2070                         loop_id = vha->mgmt_svr_loop_id;
2071                         break;
2072                 default:
2073                         DEBUG2(qla_printk(KERN_INFO, ha,
2074                             "Unknown loop id: %x\n", loop_id));
2075                         rval = -EINVAL;
2076                         goto done_unmap_sg;
2077         }
2078
2079         /* Allocate a dummy fcport structure, since functions preparing the
2080          * IOCB and mailbox command retrieves port specific information
2081          * from fcport structure. For Host based ELS commands there will be
2082          * no fcport structure allocated
2083          */
2084         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2085         if (!fcport)
2086         {
2087                 rval = -ENOMEM;
2088                 goto  done_unmap_sg;
2089         }
2090
2091         /* Initialize all required  fields of fcport */
2092         fcport->vha = vha;
2093         fcport->vp_idx = vha->vp_idx;
2094         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2095         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2096         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2097         fcport->loop_id = loop_id;
2098
2099         /* Alloc SRB structure */
2100         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2101         if (!sp) {
2102                 rval = -ENOMEM;
2103                 goto done_free_fcport;
2104         }
2105
2106         ct = sp->ctx;
2107         ct->ctx.type = SRB_CT_CMD;
2108         ct->bsg_job = bsg_job;
2109
2110         DEBUG2(qla_printk(KERN_INFO, ha,
2111             "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2112             "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2113             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2114             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2115             fcport->d_id.b.al_pa));
2116
2117         rval = qla2x00_start_sp(sp);
2118         if (rval != QLA_SUCCESS) {
2119                 kfree(sp->ctx);
2120                 mempool_free(sp, ha->srb_mempool);
2121                 rval = -EIO;
2122                 goto done_free_fcport;
2123         }
2124         return rval;
2125
2126 done_free_fcport:
2127         kfree(fcport);
2128 done_unmap_sg:
2129         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2130             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2131         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2132             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2133 done:
2134         return rval;
2135 }
2136
2137 static int
2138 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2139 {
2140         struct Scsi_Host *host = bsg_job->shost;
2141         scsi_qla_host_t *vha = shost_priv(host);
2142         struct qla_hw_data *ha = vha->hw;
2143         int rval;
2144         uint8_t command_sent;
2145         uint32_t vendor_cmd;
2146         char *type;
2147         struct msg_echo_lb elreq;
2148         uint16_t response[MAILBOX_REGISTER_COUNT];
2149         uint8_t* fw_sts_ptr;
2150         uint8_t *req_data;
2151         dma_addr_t req_data_dma;
2152         uint32_t req_data_len;
2153         uint8_t *rsp_data;
2154         dma_addr_t rsp_data_dma;
2155         uint32_t rsp_data_len;
2156
2157         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2158             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2159             test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2160                 rval = -EBUSY;
2161                 goto done;
2162         }
2163
2164         elreq.req_sg_cnt =
2165             dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2166             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2167         if (!elreq.req_sg_cnt) {
2168                 rval = -ENOMEM;
2169                 goto done;
2170         }
2171         elreq.rsp_sg_cnt =
2172             dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2173             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2174         if (!elreq.rsp_sg_cnt) {
2175                 rval = -ENOMEM;
2176                 goto done;
2177         }
2178
2179         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
2180             (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2181         {
2182                 DEBUG2(printk(KERN_INFO
2183                     "dma mapping resulted in different sg counts \
2184                     [request_sg_cnt: %x dma_request_sg_cnt: %x\
2185                     reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2186                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2187                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2188                 rval = -EAGAIN;
2189                 goto done_unmap_sg;
2190         }
2191         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2192         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2193             &req_data_dma, GFP_KERNEL);
2194
2195         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2196             &rsp_data_dma, GFP_KERNEL);
2197
2198         /* Copy the request buffer in req_data now */
2199         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2200             bsg_job->request_payload.sg_cnt, req_data,
2201             req_data_len);
2202
2203         elreq.send_dma = req_data_dma;
2204         elreq.rcv_dma = rsp_data_dma;
2205         elreq.transfer_size = req_data_len;
2206
2207         /* Vendor cmd : loopback or ECHO diagnostic
2208          * Options:
2209          *      Loopback : Either internal or external loopback
2210          *      ECHO: ECHO ELS or Vendor specific FC4  link data
2211          */
2212         vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2213         elreq.options =
2214             *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2215             + 1);
2216
2217         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2218         case QL_VND_LOOPBACK:
2219                 if (ha->current_topology != ISP_CFG_F) {
2220                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
2221
2222                         if ((IS_QLA81XX(ha)) &&
2223                                 ((elreq.options == 0) || (elreq.options == 2))) {
2224                                 DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld)"
2225                                 "loopback option:0x%x not supported\n", vha->host_no, elreq.options));
2226                                 rval = -EINVAL;
2227                                 goto done_unmap_sg;
2228                         }
2229
2230                         DEBUG2(qla_printk(KERN_INFO, ha,
2231                                 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2232                                 vha->host_no, type, vendor_cmd, elreq.options));
2233                         DEBUG2(qla_printk(KERN_INFO, ha,
2234                                 "scsi(%ld) tx_addr: 0x%llx rx_addr: 0x%llx tx_sg_cnt: %x rx_sg_cnt: %x\n",
2235                                 vha->host_no, (unsigned long long)elreq.send_dma, (unsigned long long)elreq.rcv_dma, elreq.req_sg_cnt, elreq.rsp_sg_cnt));
2236                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
2237                         rval = qla2x00_loopback_test(vha, &elreq, response);
2238                         if (IS_QLA81XX(ha)) {
2239                                 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2240                                         DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2241                                                 "ISP\n", __func__, vha->host_no));
2242                                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2243                                         qla2xxx_wake_dpc(vha);
2244                                  }
2245                         }
2246                 } else {
2247                         type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2248                         DEBUG2(qla_printk(KERN_INFO, ha,
2249                                 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2250                                 vha->host_no, type, vendor_cmd, elreq.options));
2251                         DEBUG2(qla_printk(KERN_INFO, ha,
2252                                 "scsi(%ld) tx_addr: 0x%llx rx_addr: 0x%llx tx_sg_cnt: %x rx_sg_cnt: %x\n",
2253                                 vha->host_no, (unsigned long long)elreq.send_dma, (unsigned long long)elreq.rcv_dma, elreq.req_sg_cnt, elreq.rsp_sg_cnt));
2254                         command_sent = INT_DEF_LB_ECHO_CMD;
2255                         rval = qla2x00_echo_test(vha, &elreq, response);
2256                 }
2257                 break;
2258         case QLA84_RESET:
2259                 if (!IS_QLA84XX(vha->hw)) {
2260                         rval = -EINVAL;
2261                         DEBUG16(printk(
2262                                 "%s(%ld): 8xxx exiting.\n",
2263                                 __func__, vha->host_no));
2264                         return rval;
2265                 }
2266                 rval = qla84xx_reset(vha, &elreq, bsg_job);
2267                 break;
2268         case QLA84_MGMT_CMD:
2269                 if (!IS_QLA84XX(vha->hw)) {
2270                         rval = -EINVAL;
2271                         DEBUG16(printk(
2272                                 "%s(%ld): 8xxx exiting.\n",
2273                                 __func__, vha->host_no));
2274                         return rval;
2275                 }
2276                 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2277                 break;
2278         default:
2279                 rval = -ENOSYS;
2280         }
2281
2282         if (rval != QLA_SUCCESS) {
2283                 DEBUG2(qla_printk(KERN_WARNING, ha,
2284                         "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2285                 rval = 0;
2286                 bsg_job->reply->result = (DID_ERROR << 16);
2287                 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2288                 memcpy( fw_sts_ptr, response, sizeof(response));
2289                 fw_sts_ptr += sizeof(response);
2290                 *fw_sts_ptr = command_sent;
2291         } else {
2292                 DEBUG2(qla_printk(KERN_WARNING, ha,
2293                         "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2294                 rval = bsg_job->reply->result = 0;
2295                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2296                 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2297                 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2298                 memcpy(fw_sts_ptr, response, sizeof(response));
2299                 fw_sts_ptr += sizeof(response);
2300                 *fw_sts_ptr = command_sent;
2301                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2302                 bsg_job->reply_payload.sg_cnt, rsp_data,
2303                 rsp_data_len);
2304         }
2305         bsg_job->job_done(bsg_job);
2306
2307 done_unmap_sg:
2308
2309         if(req_data)
2310                 dma_free_coherent(&ha->pdev->dev, req_data_len,
2311                         req_data, req_data_dma);
2312         dma_unmap_sg(&ha->pdev->dev,
2313             bsg_job->request_payload.sg_list,
2314             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2315         dma_unmap_sg(&ha->pdev->dev,
2316             bsg_job->reply_payload.sg_list,
2317             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2318
2319 done:
2320         return rval;
2321 }
2322
2323 static int
2324 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2325 {
2326         int ret = -EINVAL;
2327
2328         switch (bsg_job->request->msgcode) {
2329                 case FC_BSG_RPT_ELS:
2330                 case FC_BSG_HST_ELS_NOLOGIN:
2331                         ret = qla2x00_process_els(bsg_job);
2332                         break;
2333                 case FC_BSG_HST_CT:
2334                         ret = qla2x00_process_ct(bsg_job);
2335                         break;
2336                 case FC_BSG_HST_VENDOR:
2337                         ret = qla2x00_process_vendor_specific(bsg_job);
2338                         break;
2339                 case FC_BSG_HST_ADD_RPORT:
2340                 case FC_BSG_HST_DEL_RPORT:
2341                 case FC_BSG_RPT_CT:
2342                 default:
2343                         DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2344                         break;
2345         }
2346         return ret;
2347 }
2348
2349 static int
2350 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2351 {
2352         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2353         struct qla_hw_data *ha = vha->hw;
2354         srb_t *sp;
2355         int i;
2356         unsigned long flags;
2357         uint16_t que_id;
2358         struct req_que *req;
2359         struct rsp_que *rsp;
2360         int found = 0;
2361         struct srb_bsg *sp_bsg;
2362
2363         /* find the bsg job from the active list of commands */
2364         spin_lock_irqsave(&ha->hardware_lock, flags);
2365         req = ha->req_q_map[0];
2366         que_id = req->id;
2367         if (req->rsp)
2368                 rsp = req->rsp;
2369         else
2370                 rsp = ha->rsp_q_map[que_id];
2371
2372         for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++ ) {
2373                 sp = req->outstanding_cmds[i];
2374
2375                 if (sp == NULL)
2376                         continue;
2377
2378                 sp_bsg = (struct srb_bsg*)sp->ctx;
2379
2380                 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2381                     (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2382                     || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2383                     (sp_bsg->bsg_job == bsg_job)) {
2384                         DEBUG2(qla_printk(KERN_INFO, ha,
2385                             "scsi(%ld) req_q: %p rsp_q: %p que_id: %x sp: %p\n",
2386                             vha->host_no, req, rsp, que_id, sp));
2387                         found = 1;
2388                         break;
2389                 }
2390         }
2391         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2392         if (!found) {
2393                 DEBUG2(qla_printk(KERN_INFO, ha,
2394                         "scsi(%ld) SRB not found to abort\n", vha->host_no));
2395                 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2396                 return 0;
2397         }
2398
2399         if (ha->isp_ops->abort_command(sp)) {
2400                 DEBUG2(qla_printk(KERN_INFO, ha,
2401                 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2402                 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2403         } else {
2404                 DEBUG2(qla_printk(KERN_INFO, ha,
2405                 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2406                 bsg_job->req->errors = bsg_job->reply->result = 0;
2407         }
2408
2409         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2410                 kfree(sp->fcport);
2411         kfree(sp->ctx);
2412         mempool_free(sp, ha->srb_mempool);
2413         return 0;
2414 }
2415
2416 struct fc_function_template qla2xxx_transport_functions = {
2417
2418         .show_host_node_name = 1,
2419         .show_host_port_name = 1,
2420         .show_host_supported_classes = 1,
2421         .show_host_supported_speeds = 1,
2422
2423         .get_host_port_id = qla2x00_get_host_port_id,
2424         .show_host_port_id = 1,
2425         .get_host_speed = qla2x00_get_host_speed,
2426         .show_host_speed = 1,
2427         .get_host_port_type = qla2x00_get_host_port_type,
2428         .show_host_port_type = 1,
2429         .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2430         .show_host_symbolic_name = 1,
2431         .set_host_system_hostname = qla2x00_set_host_system_hostname,
2432         .show_host_system_hostname = 1,
2433         .get_host_fabric_name = qla2x00_get_host_fabric_name,
2434         .show_host_fabric_name = 1,
2435         .get_host_port_state = qla2x00_get_host_port_state,
2436         .show_host_port_state = 1,
2437
2438         .dd_fcrport_size = sizeof(struct fc_port *),
2439         .show_rport_supported_classes = 1,
2440
2441         .get_starget_node_name = qla2x00_get_starget_node_name,
2442         .show_starget_node_name = 1,
2443         .get_starget_port_name = qla2x00_get_starget_port_name,
2444         .show_starget_port_name = 1,
2445         .get_starget_port_id  = qla2x00_get_starget_port_id,
2446         .show_starget_port_id = 1,
2447
2448         .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2449         .show_rport_dev_loss_tmo = 1,
2450
2451         .issue_fc_host_lip = qla2x00_issue_lip,
2452         .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2453         .terminate_rport_io = qla2x00_terminate_rport_io,
2454         .get_fc_host_stats = qla2x00_get_fc_host_stats,
2455
2456         .vport_create = qla24xx_vport_create,
2457         .vport_disable = qla24xx_vport_disable,
2458         .vport_delete = qla24xx_vport_delete,
2459         .bsg_request = qla24xx_bsg_request,
2460         .bsg_timeout = qla24xx_bsg_timeout,
2461 };
2462
2463 struct fc_function_template qla2xxx_transport_vport_functions = {
2464
2465         .show_host_node_name = 1,
2466         .show_host_port_name = 1,
2467         .show_host_supported_classes = 1,
2468
2469         .get_host_port_id = qla2x00_get_host_port_id,
2470         .show_host_port_id = 1,
2471         .get_host_speed = qla2x00_get_host_speed,
2472         .show_host_speed = 1,
2473         .get_host_port_type = qla2x00_get_host_port_type,
2474         .show_host_port_type = 1,
2475         .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2476         .show_host_symbolic_name = 1,
2477         .set_host_system_hostname = qla2x00_set_host_system_hostname,
2478         .show_host_system_hostname = 1,
2479         .get_host_fabric_name = qla2x00_get_host_fabric_name,
2480         .show_host_fabric_name = 1,
2481         .get_host_port_state = qla2x00_get_host_port_state,
2482         .show_host_port_state = 1,
2483
2484         .dd_fcrport_size = sizeof(struct fc_port *),
2485         .show_rport_supported_classes = 1,
2486
2487         .get_starget_node_name = qla2x00_get_starget_node_name,
2488         .show_starget_node_name = 1,
2489         .get_starget_port_name = qla2x00_get_starget_port_name,
2490         .show_starget_port_name = 1,
2491         .get_starget_port_id  = qla2x00_get_starget_port_id,
2492         .show_starget_port_id = 1,
2493
2494         .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2495         .show_rport_dev_loss_tmo = 1,
2496
2497         .issue_fc_host_lip = qla2x00_issue_lip,
2498         .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2499         .terminate_rport_io = qla2x00_terminate_rport_io,
2500         .get_fc_host_stats = qla2x00_get_fc_host_stats,
2501         .bsg_request = qla24xx_bsg_request,
2502         .bsg_timeout = qla24xx_bsg_timeout,
2503 };
2504
2505 void
2506 qla2x00_init_host_attr(scsi_qla_host_t *vha)
2507 {
2508         struct qla_hw_data *ha = vha->hw;
2509         u32 speed = FC_PORTSPEED_UNKNOWN;
2510
2511         fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2512         fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2513         fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2514         fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2515         fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2516
2517         if (IS_QLA81XX(ha))
2518                 speed = FC_PORTSPEED_10GBIT;
2519         else if (IS_QLA25XX(ha))
2520                 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2521                     FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2522         else if (IS_QLA24XX_TYPE(ha))
2523                 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2524                     FC_PORTSPEED_1GBIT;
2525         else if (IS_QLA23XX(ha))
2526                 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2527         else
2528                 speed = FC_PORTSPEED_1GBIT;
2529         fc_host_supported_speeds(vha->host) = speed;
2530 }
2531 static int
2532 qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2533 {
2534         int             ret = 0;
2535         int             cmd;
2536         uint16_t        cmd_status;
2537
2538         DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2539
2540         cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2541                         == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2542                                 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2543         ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2544         &cmd_status);
2545         return ret;
2546 }
2547
2548 static int
2549 qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2550 {
2551         struct access_chip_84xx *mn;
2552         dma_addr_t mn_dma, mgmt_dma;
2553         void *mgmt_b = NULL;
2554         int ret = 0;
2555         int rsp_hdr_len, len = 0;
2556         struct qla84_msg_mgmt *ql84_mgmt;
2557
2558         ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2559         ql84_mgmt->cmd =
2560                 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2561         ql84_mgmt->mgmtp.u.mem.start_addr =
2562                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2563         ql84_mgmt->len =
2564                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2565         ql84_mgmt->mgmtp.u.config.id =
2566                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2567         ql84_mgmt->mgmtp.u.config.param0 =
2568                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2569         ql84_mgmt->mgmtp.u.config.param1 =
2570                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2571         ql84_mgmt->mgmtp.u.info.type =
2572                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2573         ql84_mgmt->mgmtp.u.info.context =
2574                 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2575
2576         rsp_hdr_len = bsg_job->request_payload.payload_len;
2577
2578         mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2579         if (mn == NULL) {
2580                 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2581                 "failed%lu\n", __func__, ha->host_no));
2582                 return -ENOMEM;
2583         }
2584
2585         memset(mn, 0, sizeof (struct access_chip_84xx));
2586
2587         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2588         mn->entry_count = 1;
2589
2590         switch (ql84_mgmt->cmd) {
2591         case QLA84_MGMT_READ_MEM:
2592                 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2593                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2594                 break;
2595         case QLA84_MGMT_WRITE_MEM:
2596                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2597                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2598                 break;
2599         case QLA84_MGMT_CHNG_CONFIG:
2600                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2601                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2602                 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2603                 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2604                 break;
2605         case QLA84_MGMT_GET_INFO:
2606                 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2607                 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2608                 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2609                 break;
2610         default:
2611                 ret = -EIO;
2612                 goto exit_mgmt0;
2613         }
2614
2615         if ((len == ql84_mgmt->len) &&
2616                 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2617                 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2618                                 &mgmt_dma, GFP_KERNEL);
2619                 if (mgmt_b == NULL) {
2620                         DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2621                         "failed%lu\n", __func__, ha->host_no));
2622                         ret = -ENOMEM;
2623                         goto exit_mgmt0;
2624                 }
2625                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2626                 mn->dseg_count = cpu_to_le16(1);
2627                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2628                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2629                 mn->dseg_length = cpu_to_le32(len);
2630
2631                 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2632                         memcpy(mgmt_b, ql84_mgmt->payload, len);
2633                 }
2634         }
2635
2636         ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2637         if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2638                 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2639                         if (ret != QLA_SUCCESS)
2640                                 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2641                                         __func__, ha->host_no));
2642         } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2643                         (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2644         }
2645
2646         if (mgmt_b)
2647                 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2648
2649 exit_mgmt0:
2650         dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2651         return ret;
2652 }