2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
93 #define DEF_DEV_SIZE_MB 8
94 #define DEF_EVERY_NTH 0
95 #define DEF_NUM_PARTS 0
97 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
100 #define DEF_NO_LUN_0 0
101 #define DEF_VIRTUAL_GB 0
102 #define DEF_FAKE_RW 0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_UNMAP_MAX_BLOCKS 0
112 #define DEF_UNMAP_MAX_DESC 0
113 #define DEF_UNMAP_GRANULARITY 0
114 #define DEF_UNMAP_ALIGNMENT 0
116 /* bit mask values for scsi_debug_opts */
117 #define SCSI_DEBUG_OPT_NOISE 1
118 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
119 #define SCSI_DEBUG_OPT_TIMEOUT 4
120 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
121 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
122 #define SCSI_DEBUG_OPT_DIF_ERR 32
123 #define SCSI_DEBUG_OPT_DIX_ERR 64
124 /* When "every_nth" > 0 then modulo "every_nth" commands:
125 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
126 * - a RECOVERED_ERROR is simulated on successful read and write
127 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
128 * - a TRANSPORT_ERROR is simulated on successful read and write
129 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
131 * When "every_nth" < 0 then after "- every_nth" commands:
132 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
133 * - a RECOVERED_ERROR is simulated on successful read and write
134 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
135 * - a TRANSPORT_ERROR is simulated on successful read and write
136 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
137 * This will continue until some other action occurs (e.g. the user
138 * writing a new value (other than -1 or 1) to every_nth via sysfs).
141 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
142 * sector on read commands: */
143 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
145 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
146 * or "peripheral device" addressing (value 0) */
147 #define SAM2_LUN_ADDRESS_METHOD 0
148 #define SAM2_WLUN_REPORT_LUNS 0xc101
150 /* Can queue up to this number of commands. Typically commands that
151 * that have a non-zero delay are queued. */
152 #define SCSI_DEBUG_CANQUEUE 255
154 static int scsi_debug_add_host = DEF_NUM_HOST;
155 static int scsi_debug_delay = DEF_DELAY;
156 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
157 static int scsi_debug_every_nth = DEF_EVERY_NTH;
158 static int scsi_debug_max_luns = DEF_MAX_LUNS;
159 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
160 static int scsi_debug_num_parts = DEF_NUM_PARTS;
161 static int scsi_debug_no_uld = 0;
162 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
163 static int scsi_debug_opts = DEF_OPTS;
164 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
165 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
166 static int scsi_debug_dsense = DEF_D_SENSE;
167 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
168 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
169 static int scsi_debug_fake_rw = DEF_FAKE_RW;
170 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
171 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
172 static int scsi_debug_dix = DEF_DIX;
173 static int scsi_debug_dif = DEF_DIF;
174 static int scsi_debug_guard = DEF_GUARD;
175 static int scsi_debug_ato = DEF_ATO;
176 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
177 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
178 static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
179 static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
180 static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
181 static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
183 static int scsi_debug_cmnd_count = 0;
185 #define DEV_READONLY(TGT) (0)
186 #define DEV_REMOVEABLE(TGT) (0)
188 static unsigned int sdebug_store_sectors;
189 static sector_t sdebug_capacity; /* in sectors */
191 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
192 may still need them */
193 static int sdebug_heads; /* heads per disk */
194 static int sdebug_cylinders_per; /* cylinders per surface */
195 static int sdebug_sectors_per; /* sectors per cylinder */
197 #define SDEBUG_MAX_PARTS 4
199 #define SDEBUG_SENSE_LEN 32
201 #define SCSI_DEBUG_MAX_CMD_LEN 32
203 struct sdebug_dev_info {
204 struct list_head dev_list;
205 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
206 unsigned int channel;
209 struct sdebug_host_info *sdbg_host;
216 struct sdebug_host_info {
217 struct list_head host_list;
218 struct Scsi_Host *shost;
220 struct list_head dev_info_list;
223 #define to_sdebug_host(d) \
224 container_of(d, struct sdebug_host_info, dev)
226 static LIST_HEAD(sdebug_host_list);
227 static DEFINE_SPINLOCK(sdebug_host_list_lock);
229 typedef void (* done_funct_t) (struct scsi_cmnd *);
231 struct sdebug_queued_cmd {
233 struct timer_list cmnd_timer;
234 done_funct_t done_funct;
235 struct scsi_cmnd * a_cmnd;
238 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
240 static unsigned char * fake_storep; /* ramdisk storage */
241 static unsigned char *dif_storep; /* protection info */
242 static void *map_storep; /* provisioning map */
244 static unsigned long map_size;
245 static int num_aborts = 0;
246 static int num_dev_resets = 0;
247 static int num_bus_resets = 0;
248 static int num_host_resets = 0;
249 static int dix_writes;
250 static int dix_reads;
251 static int dif_errors;
253 static DEFINE_SPINLOCK(queued_arr_lock);
254 static DEFINE_RWLOCK(atomic_rw);
256 static char sdebug_proc_name[] = "scsi_debug";
258 static struct bus_type pseudo_lld_bus;
260 static inline sector_t dif_offset(sector_t sector)
265 static struct device_driver sdebug_driverfs_driver = {
266 .name = sdebug_proc_name,
267 .bus = &pseudo_lld_bus,
270 static const int check_condition_result =
271 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
273 static const int illegal_condition_result =
274 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
276 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
278 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
281 static int sdebug_add_adapter(void);
282 static void sdebug_remove_adapter(void);
284 static void sdebug_max_tgts_luns(void)
286 struct sdebug_host_info *sdbg_host;
287 struct Scsi_Host *hpnt;
289 spin_lock(&sdebug_host_list_lock);
290 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
291 hpnt = sdbg_host->shost;
292 if ((hpnt->this_id >= 0) &&
293 (scsi_debug_num_tgts > hpnt->this_id))
294 hpnt->max_id = scsi_debug_num_tgts + 1;
296 hpnt->max_id = scsi_debug_num_tgts;
297 /* scsi_debug_max_luns; */
298 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
300 spin_unlock(&sdebug_host_list_lock);
303 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
306 unsigned char *sbuff;
308 sbuff = devip->sense_buff;
309 memset(sbuff, 0, SDEBUG_SENSE_LEN);
311 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
313 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
314 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
315 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
318 static void get_data_transfer_info(unsigned char *cmd,
319 unsigned long long *lba, unsigned int *num,
325 case VARIABLE_LENGTH_CMD:
326 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
327 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
328 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
329 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
331 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
332 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
334 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
341 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
342 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
343 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
344 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
346 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
351 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
354 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
361 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
364 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
368 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
369 (u32)(cmd[1] & 0x1f) << 16;
370 *num = (0 == cmd[4]) ? 256 : cmd[4];
377 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
379 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
380 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
383 /* return -ENOTTY; // correct return but upsets fdisk */
386 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
387 struct sdebug_dev_info * devip)
390 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
391 printk(KERN_INFO "scsi_debug: Reporting Unit "
392 "attention: power on reset\n");
394 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
395 return check_condition_result;
397 if ((0 == reset_only) && devip->stopped) {
398 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
399 printk(KERN_INFO "scsi_debug: Reporting Not "
400 "ready: initializing command required\n");
401 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
403 return check_condition_result;
408 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
409 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
413 struct scsi_data_buffer *sdb = scsi_in(scp);
417 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
418 return (DID_ERROR << 16);
420 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
423 sdb->resid -= act_len;
425 sdb->resid = scsi_bufflen(scp) - act_len;
430 /* Returns number of bytes fetched into 'arr' or -1 if error. */
431 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
434 if (!scsi_bufflen(scp))
436 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
439 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
443 static const char * inq_vendor_id = "Linux ";
444 static const char * inq_product_id = "scsi_debug ";
445 static const char * inq_product_rev = "0004";
447 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
448 int target_dev_id, int dev_id_num,
449 const char * dev_id_str,
455 port_a = target_dev_id + 1;
456 /* T10 vendor identifier field format (faked) */
457 arr[0] = 0x2; /* ASCII */
460 memcpy(&arr[4], inq_vendor_id, 8);
461 memcpy(&arr[12], inq_product_id, 16);
462 memcpy(&arr[28], dev_id_str, dev_id_str_len);
463 num = 8 + 16 + dev_id_str_len;
466 if (dev_id_num >= 0) {
467 /* NAA-5, Logical unit identifier (binary) */
468 arr[num++] = 0x1; /* binary (not necessarily sas) */
469 arr[num++] = 0x3; /* PIV=0, lu, naa */
472 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
476 arr[num++] = (dev_id_num >> 24);
477 arr[num++] = (dev_id_num >> 16) & 0xff;
478 arr[num++] = (dev_id_num >> 8) & 0xff;
479 arr[num++] = dev_id_num & 0xff;
480 /* Target relative port number */
481 arr[num++] = 0x61; /* proto=sas, binary */
482 arr[num++] = 0x94; /* PIV=1, target port, rel port */
483 arr[num++] = 0x0; /* reserved */
484 arr[num++] = 0x4; /* length */
485 arr[num++] = 0x0; /* reserved */
486 arr[num++] = 0x0; /* reserved */
488 arr[num++] = 0x1; /* relative port A */
490 /* NAA-5, Target port identifier */
491 arr[num++] = 0x61; /* proto=sas, binary */
492 arr[num++] = 0x93; /* piv=1, target port, naa */
495 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
499 arr[num++] = (port_a >> 24);
500 arr[num++] = (port_a >> 16) & 0xff;
501 arr[num++] = (port_a >> 8) & 0xff;
502 arr[num++] = port_a & 0xff;
503 /* NAA-5, Target port group identifier */
504 arr[num++] = 0x61; /* proto=sas, binary */
505 arr[num++] = 0x95; /* piv=1, target port group id */
510 arr[num++] = (port_group_id >> 8) & 0xff;
511 arr[num++] = port_group_id & 0xff;
512 /* NAA-5, Target device identifier */
513 arr[num++] = 0x61; /* proto=sas, binary */
514 arr[num++] = 0xa3; /* piv=1, target device, naa */
517 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
521 arr[num++] = (target_dev_id >> 24);
522 arr[num++] = (target_dev_id >> 16) & 0xff;
523 arr[num++] = (target_dev_id >> 8) & 0xff;
524 arr[num++] = target_dev_id & 0xff;
525 /* SCSI name string: Target device identifier */
526 arr[num++] = 0x63; /* proto=sas, UTF-8 */
527 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
530 memcpy(arr + num, "naa.52222220", 12);
532 snprintf(b, sizeof(b), "%08X", target_dev_id);
533 memcpy(arr + num, b, 8);
535 memset(arr + num, 0, 4);
541 static unsigned char vpd84_data[] = {
542 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
543 0x22,0x22,0x22,0x0,0xbb,0x1,
544 0x22,0x22,0x22,0x0,0xbb,0x2,
547 static int inquiry_evpd_84(unsigned char * arr)
549 memcpy(arr, vpd84_data, sizeof(vpd84_data));
550 return sizeof(vpd84_data);
553 static int inquiry_evpd_85(unsigned char * arr)
556 const char * na1 = "https://www.kernel.org/config";
557 const char * na2 = "http://www.kernel.org/log";
560 arr[num++] = 0x1; /* lu, storage config */
561 arr[num++] = 0x0; /* reserved */
566 plen = ((plen / 4) + 1) * 4;
567 arr[num++] = plen; /* length, null termianted, padded */
568 memcpy(arr + num, na1, olen);
569 memset(arr + num + olen, 0, plen - olen);
572 arr[num++] = 0x4; /* lu, logging */
573 arr[num++] = 0x0; /* reserved */
578 plen = ((plen / 4) + 1) * 4;
579 arr[num++] = plen; /* length, null terminated, padded */
580 memcpy(arr + num, na2, olen);
581 memset(arr + num + olen, 0, plen - olen);
587 /* SCSI ports VPD page */
588 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
593 port_a = target_dev_id + 1;
595 arr[num++] = 0x0; /* reserved */
596 arr[num++] = 0x0; /* reserved */
598 arr[num++] = 0x1; /* relative port 1 (primary) */
599 memset(arr + num, 0, 6);
602 arr[num++] = 12; /* length tp descriptor */
603 /* naa-5 target port identifier (A) */
604 arr[num++] = 0x61; /* proto=sas, binary */
605 arr[num++] = 0x93; /* PIV=1, target port, NAA */
606 arr[num++] = 0x0; /* reserved */
607 arr[num++] = 0x8; /* length */
608 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
612 arr[num++] = (port_a >> 24);
613 arr[num++] = (port_a >> 16) & 0xff;
614 arr[num++] = (port_a >> 8) & 0xff;
615 arr[num++] = port_a & 0xff;
617 arr[num++] = 0x0; /* reserved */
618 arr[num++] = 0x0; /* reserved */
620 arr[num++] = 0x2; /* relative port 2 (secondary) */
621 memset(arr + num, 0, 6);
624 arr[num++] = 12; /* length tp descriptor */
625 /* naa-5 target port identifier (B) */
626 arr[num++] = 0x61; /* proto=sas, binary */
627 arr[num++] = 0x93; /* PIV=1, target port, NAA */
628 arr[num++] = 0x0; /* reserved */
629 arr[num++] = 0x8; /* length */
630 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
634 arr[num++] = (port_b >> 24);
635 arr[num++] = (port_b >> 16) & 0xff;
636 arr[num++] = (port_b >> 8) & 0xff;
637 arr[num++] = port_b & 0xff;
643 static unsigned char vpd89_data[] = {
644 /* from 4th byte */ 0,0,0,0,
645 'l','i','n','u','x',' ',' ',' ',
646 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
648 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
650 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
651 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
652 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
653 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
655 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
657 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
659 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
660 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
661 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
662 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
663 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
664 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
665 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
666 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
667 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
668 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
669 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
670 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
671 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
672 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
673 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
675 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
676 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
677 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
680 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
681 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
687 static int inquiry_evpd_89(unsigned char * arr)
689 memcpy(arr, vpd89_data, sizeof(vpd89_data));
690 return sizeof(vpd89_data);
694 /* Block limits VPD page (SBC-3) */
695 static unsigned char vpdb0_data[] = {
696 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 static int inquiry_evpd_b0(unsigned char * arr)
706 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
707 gran = 1 << scsi_debug_physblk_exp;
708 arr[2] = (gran >> 8) & 0xff;
709 arr[3] = gran & 0xff;
710 if (sdebug_store_sectors > 0x400) {
711 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
712 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
713 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
714 arr[7] = sdebug_store_sectors & 0xff;
717 if (scsi_debug_unmap_max_desc) {
720 if (scsi_debug_unmap_max_blocks)
721 blocks = scsi_debug_unmap_max_blocks;
725 put_unaligned_be32(blocks, &arr[16]);
726 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
729 if (scsi_debug_unmap_alignment) {
730 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
731 arr[28] |= 0x80; /* UGAVALID */
734 if (scsi_debug_unmap_granularity) {
735 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
736 return 0x3c; /* Mandatory page length for thin provisioning */
739 return sizeof(vpdb0_data);
742 /* Block device characteristics VPD page (SBC-3) */
743 static int inquiry_evpd_b1(unsigned char *arr)
745 memset(arr, 0, 0x3c);
747 arr[1] = 1; /* non rotating medium (e.g. solid state) */
749 arr[3] = 5; /* less than 1.8" */
754 #define SDEBUG_LONG_INQ_SZ 96
755 #define SDEBUG_MAX_INQ_ARR_SZ 584
757 static int resp_inquiry(struct scsi_cmnd * scp, int target,
758 struct sdebug_dev_info * devip)
760 unsigned char pq_pdt;
762 unsigned char *cmd = (unsigned char *)scp->cmnd;
763 int alloc_len, n, ret;
765 alloc_len = (cmd[3] << 8) + cmd[4];
766 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
768 return DID_REQUEUE << 16;
770 pq_pdt = 0x1e; /* present, wlun */
771 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
772 pq_pdt = 0x7f; /* not present, no device type */
774 pq_pdt = (scsi_debug_ptype & 0x1f);
776 if (0x2 & cmd[1]) { /* CMDDT bit set */
777 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
780 return check_condition_result;
781 } else if (0x1 & cmd[1]) { /* EVPD bit set */
782 int lu_id_num, port_group_id, target_dev_id, len;
784 int host_no = devip->sdbg_host->shost->host_no;
786 port_group_id = (((host_no + 1) & 0x7f) << 8) +
787 (devip->channel & 0x7f);
788 if (0 == scsi_debug_vpd_use_hostno)
790 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
791 (devip->target * 1000) + devip->lun);
792 target_dev_id = ((host_no + 1) * 2000) +
793 (devip->target * 1000) - 3;
794 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
795 if (0 == cmd[2]) { /* supported vital product data pages */
796 arr[1] = cmd[2]; /*sanity */
798 arr[n++] = 0x0; /* this page */
799 arr[n++] = 0x80; /* unit serial number */
800 arr[n++] = 0x83; /* device identification */
801 arr[n++] = 0x84; /* software interface ident. */
802 arr[n++] = 0x85; /* management network addresses */
803 arr[n++] = 0x86; /* extended inquiry */
804 arr[n++] = 0x87; /* mode page policy */
805 arr[n++] = 0x88; /* SCSI ports */
806 arr[n++] = 0x89; /* ATA information */
807 arr[n++] = 0xb0; /* Block limits (SBC) */
808 arr[n++] = 0xb1; /* Block characteristics (SBC) */
809 arr[3] = n - 4; /* number of supported VPD pages */
810 } else if (0x80 == cmd[2]) { /* unit serial number */
811 arr[1] = cmd[2]; /*sanity */
813 memcpy(&arr[4], lu_id_str, len);
814 } else if (0x83 == cmd[2]) { /* device identification */
815 arr[1] = cmd[2]; /*sanity */
816 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
817 target_dev_id, lu_id_num,
819 } else if (0x84 == cmd[2]) { /* Software interface ident. */
820 arr[1] = cmd[2]; /*sanity */
821 arr[3] = inquiry_evpd_84(&arr[4]);
822 } else if (0x85 == cmd[2]) { /* Management network addresses */
823 arr[1] = cmd[2]; /*sanity */
824 arr[3] = inquiry_evpd_85(&arr[4]);
825 } else if (0x86 == cmd[2]) { /* extended inquiry */
826 arr[1] = cmd[2]; /*sanity */
827 arr[3] = 0x3c; /* number of following entries */
828 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
829 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
830 else if (scsi_debug_dif)
831 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
833 arr[4] = 0x0; /* no protection stuff */
834 arr[5] = 0x7; /* head of q, ordered + simple q's */
835 } else if (0x87 == cmd[2]) { /* mode page policy */
836 arr[1] = cmd[2]; /*sanity */
837 arr[3] = 0x8; /* number of following entries */
838 arr[4] = 0x2; /* disconnect-reconnect mp */
839 arr[6] = 0x80; /* mlus, shared */
840 arr[8] = 0x18; /* protocol specific lu */
841 arr[10] = 0x82; /* mlus, per initiator port */
842 } else if (0x88 == cmd[2]) { /* SCSI Ports */
843 arr[1] = cmd[2]; /*sanity */
844 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
845 } else if (0x89 == cmd[2]) { /* ATA information */
846 arr[1] = cmd[2]; /*sanity */
847 n = inquiry_evpd_89(&arr[4]);
850 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
851 arr[1] = cmd[2]; /*sanity */
852 arr[3] = inquiry_evpd_b0(&arr[4]);
853 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
854 arr[1] = cmd[2]; /*sanity */
855 arr[3] = inquiry_evpd_b1(&arr[4]);
857 /* Illegal request, invalid field in cdb */
858 mk_sense_buffer(devip, ILLEGAL_REQUEST,
859 INVALID_FIELD_IN_CDB, 0);
861 return check_condition_result;
863 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
864 ret = fill_from_dev_buffer(scp, arr,
865 min(len, SDEBUG_MAX_INQ_ARR_SZ));
869 /* drops through here for a standard inquiry */
870 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
871 arr[2] = scsi_debug_scsi_level;
872 arr[3] = 2; /* response_data_format==2 */
873 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
874 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
875 if (0 == scsi_debug_vpd_use_hostno)
876 arr[5] = 0x10; /* claim: implicit TGPS */
877 arr[6] = 0x10; /* claim: MultiP */
878 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
879 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
880 memcpy(&arr[8], inq_vendor_id, 8);
881 memcpy(&arr[16], inq_product_id, 16);
882 memcpy(&arr[32], inq_product_rev, 4);
883 /* version descriptors (2 bytes each) follow */
884 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
885 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
887 if (scsi_debug_ptype == 0) {
888 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
889 } else if (scsi_debug_ptype == 1) {
890 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
892 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
893 ret = fill_from_dev_buffer(scp, arr,
894 min(alloc_len, SDEBUG_LONG_INQ_SZ));
899 static int resp_requests(struct scsi_cmnd * scp,
900 struct sdebug_dev_info * devip)
902 unsigned char * sbuff;
903 unsigned char *cmd = (unsigned char *)scp->cmnd;
904 unsigned char arr[SDEBUG_SENSE_LEN];
908 memset(arr, 0, sizeof(arr));
909 if (devip->reset == 1)
910 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
911 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
912 sbuff = devip->sense_buff;
913 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
916 arr[1] = 0x0; /* NO_SENSE in sense_key */
917 arr[2] = THRESHOLD_EXCEEDED;
918 arr[3] = 0xff; /* TEST set and MRIE==6 */
921 arr[2] = 0x0; /* NO_SENSE in sense_key */
922 arr[7] = 0xa; /* 18 byte sense buffer */
923 arr[12] = THRESHOLD_EXCEEDED;
924 arr[13] = 0xff; /* TEST set and MRIE==6 */
927 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
928 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
929 /* DESC bit set and sense_buff in fixed format */
930 memset(arr, 0, sizeof(arr));
932 arr[1] = sbuff[2]; /* sense key */
933 arr[2] = sbuff[12]; /* asc */
934 arr[3] = sbuff[13]; /* ascq */
938 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
939 return fill_from_dev_buffer(scp, arr, len);
942 static int resp_start_stop(struct scsi_cmnd * scp,
943 struct sdebug_dev_info * devip)
945 unsigned char *cmd = (unsigned char *)scp->cmnd;
946 int power_cond, errsts, start;
948 if ((errsts = check_readiness(scp, 1, devip)))
950 power_cond = (cmd[4] & 0xf0) >> 4;
952 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
954 return check_condition_result;
957 if (start == devip->stopped)
958 devip->stopped = !start;
962 static sector_t get_sdebug_capacity(void)
964 if (scsi_debug_virtual_gb > 0)
965 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
967 return sdebug_store_sectors;
970 #define SDEBUG_READCAP_ARR_SZ 8
971 static int resp_readcap(struct scsi_cmnd * scp,
972 struct sdebug_dev_info * devip)
974 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
978 if ((errsts = check_readiness(scp, 1, devip)))
980 /* following just in case virtual_gb changed */
981 sdebug_capacity = get_sdebug_capacity();
982 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
983 if (sdebug_capacity < 0xffffffff) {
984 capac = (unsigned int)sdebug_capacity - 1;
985 arr[0] = (capac >> 24);
986 arr[1] = (capac >> 16) & 0xff;
987 arr[2] = (capac >> 8) & 0xff;
988 arr[3] = capac & 0xff;
995 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
996 arr[7] = scsi_debug_sector_size & 0xff;
997 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1000 #define SDEBUG_READCAP16_ARR_SZ 32
1001 static int resp_readcap16(struct scsi_cmnd * scp,
1002 struct sdebug_dev_info * devip)
1004 unsigned char *cmd = (unsigned char *)scp->cmnd;
1005 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1006 unsigned long long capac;
1007 int errsts, k, alloc_len;
1009 if ((errsts = check_readiness(scp, 1, devip)))
1011 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1013 /* following just in case virtual_gb changed */
1014 sdebug_capacity = get_sdebug_capacity();
1015 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1016 capac = sdebug_capacity - 1;
1017 for (k = 0; k < 8; ++k, capac >>= 8)
1018 arr[7 - k] = capac & 0xff;
1019 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1020 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1021 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1022 arr[11] = scsi_debug_sector_size & 0xff;
1023 arr[13] = scsi_debug_physblk_exp & 0xf;
1024 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1026 if (scsi_debug_unmap_granularity)
1027 arr[14] |= 0x80; /* TPE */
1029 arr[15] = scsi_debug_lowest_aligned & 0xff;
1031 if (scsi_debug_dif) {
1032 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1033 arr[12] |= 1; /* PROT_EN */
1036 return fill_from_dev_buffer(scp, arr,
1037 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1040 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1042 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1043 struct sdebug_dev_info * devip)
1045 unsigned char *cmd = (unsigned char *)scp->cmnd;
1046 unsigned char * arr;
1047 int host_no = devip->sdbg_host->shost->host_no;
1048 int n, ret, alen, rlen;
1049 int port_group_a, port_group_b, port_a, port_b;
1051 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1054 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1056 return DID_REQUEUE << 16;
1058 * EVPD page 0x88 states we have two ports, one
1059 * real and a fake port with no device connected.
1060 * So we create two port groups with one port each
1061 * and set the group with port B to unavailable.
1063 port_a = 0x1; /* relative port A */
1064 port_b = 0x2; /* relative port B */
1065 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1066 (devip->channel & 0x7f);
1067 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1068 (devip->channel & 0x7f) + 0x80;
1071 * The asymmetric access state is cycled according to the host_id.
1074 if (0 == scsi_debug_vpd_use_hostno) {
1075 arr[n++] = host_no % 3; /* Asymm access state */
1076 arr[n++] = 0x0F; /* claim: all states are supported */
1078 arr[n++] = 0x0; /* Active/Optimized path */
1079 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1081 arr[n++] = (port_group_a >> 8) & 0xff;
1082 arr[n++] = port_group_a & 0xff;
1083 arr[n++] = 0; /* Reserved */
1084 arr[n++] = 0; /* Status code */
1085 arr[n++] = 0; /* Vendor unique */
1086 arr[n++] = 0x1; /* One port per group */
1087 arr[n++] = 0; /* Reserved */
1088 arr[n++] = 0; /* Reserved */
1089 arr[n++] = (port_a >> 8) & 0xff;
1090 arr[n++] = port_a & 0xff;
1091 arr[n++] = 3; /* Port unavailable */
1092 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1093 arr[n++] = (port_group_b >> 8) & 0xff;
1094 arr[n++] = port_group_b & 0xff;
1095 arr[n++] = 0; /* Reserved */
1096 arr[n++] = 0; /* Status code */
1097 arr[n++] = 0; /* Vendor unique */
1098 arr[n++] = 0x1; /* One port per group */
1099 arr[n++] = 0; /* Reserved */
1100 arr[n++] = 0; /* Reserved */
1101 arr[n++] = (port_b >> 8) & 0xff;
1102 arr[n++] = port_b & 0xff;
1105 arr[0] = (rlen >> 24) & 0xff;
1106 arr[1] = (rlen >> 16) & 0xff;
1107 arr[2] = (rlen >> 8) & 0xff;
1108 arr[3] = rlen & 0xff;
1111 * Return the smallest value of either
1112 * - The allocated length
1113 * - The constructed command length
1114 * - The maximum array size
1117 ret = fill_from_dev_buffer(scp, arr,
1118 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1123 /* <<Following mode page info copied from ST318451LW>> */
1125 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1126 { /* Read-Write Error Recovery page for mode_sense */
1127 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1130 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1132 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1133 return sizeof(err_recov_pg);
1136 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1137 { /* Disconnect-Reconnect page for mode_sense */
1138 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1139 0, 0, 0, 0, 0, 0, 0, 0};
1141 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1143 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1144 return sizeof(disconnect_pg);
1147 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1148 { /* Format device page for mode_sense */
1149 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1150 0, 0, 0, 0, 0, 0, 0, 0,
1151 0, 0, 0, 0, 0x40, 0, 0, 0};
1153 memcpy(p, format_pg, sizeof(format_pg));
1154 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1155 p[11] = sdebug_sectors_per & 0xff;
1156 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1157 p[13] = scsi_debug_sector_size & 0xff;
1158 if (DEV_REMOVEABLE(target))
1159 p[20] |= 0x20; /* should agree with INQUIRY */
1161 memset(p + 2, 0, sizeof(format_pg) - 2);
1162 return sizeof(format_pg);
1165 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1166 { /* Caching page for mode_sense */
1167 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1168 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1170 memcpy(p, caching_pg, sizeof(caching_pg));
1172 memset(p + 2, 0, sizeof(caching_pg) - 2);
1173 return sizeof(caching_pg);
1176 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1177 { /* Control mode page for mode_sense */
1178 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1180 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1183 if (scsi_debug_dsense)
1184 ctrl_m_pg[2] |= 0x4;
1186 ctrl_m_pg[2] &= ~0x4;
1189 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1191 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1193 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1194 else if (2 == pcontrol)
1195 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1196 return sizeof(ctrl_m_pg);
1200 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1201 { /* Informational Exceptions control mode page for mode_sense */
1202 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1204 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1207 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1209 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1210 else if (2 == pcontrol)
1211 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1212 return sizeof(iec_m_pg);
1215 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1216 { /* SAS SSP mode page - short format for mode_sense */
1217 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1218 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1220 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1222 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1223 return sizeof(sas_sf_m_pg);
1227 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1229 { /* SAS phy control and discover mode page for mode_sense */
1230 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1231 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1232 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1233 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1234 0x2, 0, 0, 0, 0, 0, 0, 0,
1235 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1236 0, 0, 0, 0, 0, 0, 0, 0,
1237 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1238 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1239 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1240 0x3, 0, 0, 0, 0, 0, 0, 0,
1241 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1242 0, 0, 0, 0, 0, 0, 0, 0,
1246 port_a = target_dev_id + 1;
1247 port_b = port_a + 1;
1248 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1249 p[20] = (port_a >> 24);
1250 p[21] = (port_a >> 16) & 0xff;
1251 p[22] = (port_a >> 8) & 0xff;
1252 p[23] = port_a & 0xff;
1253 p[48 + 20] = (port_b >> 24);
1254 p[48 + 21] = (port_b >> 16) & 0xff;
1255 p[48 + 22] = (port_b >> 8) & 0xff;
1256 p[48 + 23] = port_b & 0xff;
1258 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1259 return sizeof(sas_pcd_m_pg);
1262 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1263 { /* SAS SSP shared protocol specific port mode subpage */
1264 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1265 0, 0, 0, 0, 0, 0, 0, 0,
1268 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1270 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1271 return sizeof(sas_sha_m_pg);
1274 #define SDEBUG_MAX_MSENSE_SZ 256
1276 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1277 struct sdebug_dev_info * devip)
1279 unsigned char dbd, llbaa;
1280 int pcontrol, pcode, subpcode, bd_len;
1281 unsigned char dev_spec;
1282 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1284 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1285 unsigned char *cmd = (unsigned char *)scp->cmnd;
1287 if ((errsts = check_readiness(scp, 1, devip)))
1289 dbd = !!(cmd[1] & 0x8);
1290 pcontrol = (cmd[2] & 0xc0) >> 6;
1291 pcode = cmd[2] & 0x3f;
1293 msense_6 = (MODE_SENSE == cmd[0]);
1294 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1295 if ((0 == scsi_debug_ptype) && (0 == dbd))
1296 bd_len = llbaa ? 16 : 8;
1299 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1300 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1301 if (0x3 == pcontrol) { /* Saving values not supported */
1302 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1304 return check_condition_result;
1306 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1307 (devip->target * 1000) - 3;
1308 /* set DPOFUA bit for disks */
1309 if (0 == scsi_debug_ptype)
1310 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1320 arr[4] = 0x1; /* set LONGLBA bit */
1321 arr[7] = bd_len; /* assume 255 or less */
1325 if ((bd_len > 0) && (!sdebug_capacity))
1326 sdebug_capacity = get_sdebug_capacity();
1329 if (sdebug_capacity > 0xfffffffe) {
1335 ap[0] = (sdebug_capacity >> 24) & 0xff;
1336 ap[1] = (sdebug_capacity >> 16) & 0xff;
1337 ap[2] = (sdebug_capacity >> 8) & 0xff;
1338 ap[3] = sdebug_capacity & 0xff;
1340 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1341 ap[7] = scsi_debug_sector_size & 0xff;
1344 } else if (16 == bd_len) {
1345 unsigned long long capac = sdebug_capacity;
1347 for (k = 0; k < 8; ++k, capac >>= 8)
1348 ap[7 - k] = capac & 0xff;
1349 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1350 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1351 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1352 ap[15] = scsi_debug_sector_size & 0xff;
1357 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1358 /* TODO: Control Extension page */
1359 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1361 return check_condition_result;
1364 case 0x1: /* Read-Write error recovery page, direct access */
1365 len = resp_err_recov_pg(ap, pcontrol, target);
1368 case 0x2: /* Disconnect-Reconnect page, all devices */
1369 len = resp_disconnect_pg(ap, pcontrol, target);
1372 case 0x3: /* Format device page, direct access */
1373 len = resp_format_pg(ap, pcontrol, target);
1376 case 0x8: /* Caching page, direct access */
1377 len = resp_caching_pg(ap, pcontrol, target);
1380 case 0xa: /* Control Mode page, all devices */
1381 len = resp_ctrl_m_pg(ap, pcontrol, target);
1384 case 0x19: /* if spc==1 then sas phy, control+discover */
1385 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1386 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1387 INVALID_FIELD_IN_CDB, 0);
1388 return check_condition_result;
1391 if ((0x0 == subpcode) || (0xff == subpcode))
1392 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1393 if ((0x1 == subpcode) || (0xff == subpcode))
1394 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1396 if ((0x2 == subpcode) || (0xff == subpcode))
1397 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1400 case 0x1c: /* Informational Exceptions Mode page, all devices */
1401 len = resp_iec_m_pg(ap, pcontrol, target);
1404 case 0x3f: /* Read all Mode pages */
1405 if ((0 == subpcode) || (0xff == subpcode)) {
1406 len = resp_err_recov_pg(ap, pcontrol, target);
1407 len += resp_disconnect_pg(ap + len, pcontrol, target);
1408 len += resp_format_pg(ap + len, pcontrol, target);
1409 len += resp_caching_pg(ap + len, pcontrol, target);
1410 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1411 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1412 if (0xff == subpcode) {
1413 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1414 target, target_dev_id);
1415 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1417 len += resp_iec_m_pg(ap + len, pcontrol, target);
1419 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1420 INVALID_FIELD_IN_CDB, 0);
1421 return check_condition_result;
1426 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1428 return check_condition_result;
1431 arr[0] = offset - 1;
1433 arr[0] = ((offset - 2) >> 8) & 0xff;
1434 arr[1] = (offset - 2) & 0xff;
1436 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1439 #define SDEBUG_MAX_MSELECT_SZ 512
1441 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1442 struct sdebug_dev_info * devip)
1444 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1445 int param_len, res, errsts, mpage;
1446 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1447 unsigned char *cmd = (unsigned char *)scp->cmnd;
1449 if ((errsts = check_readiness(scp, 1, devip)))
1451 memset(arr, 0, sizeof(arr));
1454 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1455 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1456 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1457 INVALID_FIELD_IN_CDB, 0);
1458 return check_condition_result;
1460 res = fetch_to_dev_buffer(scp, arr, param_len);
1462 return (DID_ERROR << 16);
1463 else if ((res < param_len) &&
1464 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1465 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1466 " IO sent=%d bytes\n", param_len, res);
1467 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1468 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1470 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1471 INVALID_FIELD_IN_PARAM_LIST, 0);
1472 return check_condition_result;
1474 off = bd_len + (mselect6 ? 4 : 8);
1475 mpage = arr[off] & 0x3f;
1476 ps = !!(arr[off] & 0x80);
1478 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1479 INVALID_FIELD_IN_PARAM_LIST, 0);
1480 return check_condition_result;
1482 spf = !!(arr[off] & 0x40);
1483 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1485 if ((pg_len + off) > param_len) {
1486 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1487 PARAMETER_LIST_LENGTH_ERR, 0);
1488 return check_condition_result;
1491 case 0xa: /* Control Mode page */
1492 if (ctrl_m_pg[1] == arr[off + 1]) {
1493 memcpy(ctrl_m_pg + 2, arr + off + 2,
1494 sizeof(ctrl_m_pg) - 2);
1495 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1499 case 0x1c: /* Informational Exceptions Mode page */
1500 if (iec_m_pg[1] == arr[off + 1]) {
1501 memcpy(iec_m_pg + 2, arr + off + 2,
1502 sizeof(iec_m_pg) - 2);
1509 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1510 INVALID_FIELD_IN_PARAM_LIST, 0);
1511 return check_condition_result;
1514 static int resp_temp_l_pg(unsigned char * arr)
1516 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1517 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1520 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1521 return sizeof(temp_l_pg);
1524 static int resp_ie_l_pg(unsigned char * arr)
1526 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1529 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1530 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1531 arr[4] = THRESHOLD_EXCEEDED;
1534 return sizeof(ie_l_pg);
1537 #define SDEBUG_MAX_LSENSE_SZ 512
1539 static int resp_log_sense(struct scsi_cmnd * scp,
1540 struct sdebug_dev_info * devip)
1542 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1543 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1544 unsigned char *cmd = (unsigned char *)scp->cmnd;
1546 if ((errsts = check_readiness(scp, 1, devip)))
1548 memset(arr, 0, sizeof(arr));
1552 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1553 INVALID_FIELD_IN_CDB, 0);
1554 return check_condition_result;
1556 pcontrol = (cmd[2] & 0xc0) >> 6;
1557 pcode = cmd[2] & 0x3f;
1558 subpcode = cmd[3] & 0xff;
1559 alloc_len = (cmd[7] << 8) + cmd[8];
1561 if (0 == subpcode) {
1563 case 0x0: /* Supported log pages log page */
1565 arr[n++] = 0x0; /* this page */
1566 arr[n++] = 0xd; /* Temperature */
1567 arr[n++] = 0x2f; /* Informational exceptions */
1570 case 0xd: /* Temperature log page */
1571 arr[3] = resp_temp_l_pg(arr + 4);
1573 case 0x2f: /* Informational exceptions log page */
1574 arr[3] = resp_ie_l_pg(arr + 4);
1577 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1578 INVALID_FIELD_IN_CDB, 0);
1579 return check_condition_result;
1581 } else if (0xff == subpcode) {
1585 case 0x0: /* Supported log pages and subpages log page */
1588 arr[n++] = 0x0; /* 0,0 page */
1590 arr[n++] = 0xff; /* this page */
1592 arr[n++] = 0x0; /* Temperature */
1594 arr[n++] = 0x0; /* Informational exceptions */
1597 case 0xd: /* Temperature subpages */
1600 arr[n++] = 0x0; /* Temperature */
1603 case 0x2f: /* Informational exceptions subpages */
1606 arr[n++] = 0x0; /* Informational exceptions */
1610 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1611 INVALID_FIELD_IN_CDB, 0);
1612 return check_condition_result;
1615 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1616 INVALID_FIELD_IN_CDB, 0);
1617 return check_condition_result;
1619 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1620 return fill_from_dev_buffer(scp, arr,
1621 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1624 static int check_device_access_params(struct sdebug_dev_info *devi,
1625 unsigned long long lba, unsigned int num)
1627 if (lba + num > sdebug_capacity) {
1628 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1629 return check_condition_result;
1631 /* transfer length excessive (tie in to block limits VPD page) */
1632 if (num > sdebug_store_sectors) {
1633 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1634 return check_condition_result;
1639 static int do_device_access(struct scsi_cmnd *scmd,
1640 struct sdebug_dev_info *devi,
1641 unsigned long long lba, unsigned int num, int write)
1644 unsigned int block, rest = 0;
1645 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1647 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1649 block = do_div(lba, sdebug_store_sectors);
1650 if (block + num > sdebug_store_sectors)
1651 rest = block + num - sdebug_store_sectors;
1653 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1654 (num - rest) * scsi_debug_sector_size);
1656 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1661 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1662 unsigned int sectors, u32 ei_lba)
1664 unsigned int i, resid;
1665 struct scatterlist *psgl;
1666 struct sd_dif_tuple *sdt;
1668 sector_t tmp_sec = start_sec;
1671 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1673 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1675 for (i = 0 ; i < sectors ; i++) {
1678 if (sdt[i].app_tag == 0xffff)
1681 sector = start_sec + i;
1683 switch (scsi_debug_guard) {
1685 csum = ip_compute_csum(fake_storep +
1686 sector * scsi_debug_sector_size,
1687 scsi_debug_sector_size);
1690 csum = crc_t10dif(fake_storep +
1691 sector * scsi_debug_sector_size,
1692 scsi_debug_sector_size);
1693 csum = cpu_to_be16(csum);
1699 if (sdt[i].guard_tag != csum) {
1700 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1701 " rcvd 0x%04x, data 0x%04x\n", __func__,
1702 (unsigned long)sector,
1703 be16_to_cpu(sdt[i].guard_tag),
1709 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1710 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1711 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1712 __func__, (unsigned long)sector);
1717 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1718 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1719 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1720 __func__, (unsigned long)sector);
1728 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1731 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1732 int len = min(psgl->length, resid);
1734 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1735 memcpy(paddr, dif_storep + dif_offset(sector), len);
1738 if (sector >= sdebug_store_sectors) {
1741 sector = do_div(tmp_sec, sdebug_store_sectors);
1744 kunmap_atomic(paddr, KM_IRQ0);
1752 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1753 unsigned int num, struct sdebug_dev_info *devip,
1756 unsigned long iflags;
1759 ret = check_device_access_params(devip, lba, num);
1763 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1764 (lba <= OPT_MEDIUM_ERR_ADDR) &&
1765 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1766 /* claim unrecoverable read error */
1767 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
1769 /* set info field and valid bit for fixed descriptor */
1770 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1771 devip->sense_buff[0] |= 0x80; /* Valid bit */
1772 ret = OPT_MEDIUM_ERR_ADDR;
1773 devip->sense_buff[3] = (ret >> 24) & 0xff;
1774 devip->sense_buff[4] = (ret >> 16) & 0xff;
1775 devip->sense_buff[5] = (ret >> 8) & 0xff;
1776 devip->sense_buff[6] = ret & 0xff;
1778 return check_condition_result;
1782 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1783 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1786 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1787 return illegal_condition_result;
1791 read_lock_irqsave(&atomic_rw, iflags);
1792 ret = do_device_access(SCpnt, devip, lba, num, 0);
1793 read_unlock_irqrestore(&atomic_rw, iflags);
1797 void dump_sector(unsigned char *buf, int len)
1801 printk(KERN_ERR ">>> Sector Dump <<<\n");
1803 for (i = 0 ; i < len ; i += 16) {
1804 printk(KERN_ERR "%04d: ", i);
1806 for (j = 0 ; j < 16 ; j++) {
1807 unsigned char c = buf[i+j];
1808 if (c >= 0x20 && c < 0x7e)
1809 printk(" %c ", buf[i+j]);
1811 printk("%02x ", buf[i+j]);
1818 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1819 unsigned int sectors, u32 ei_lba)
1822 struct sd_dif_tuple *sdt;
1823 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1824 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1825 void *daddr, *paddr;
1826 sector_t tmp_sec = start_sec;
1829 unsigned short csum;
1831 sector = do_div(tmp_sec, sdebug_store_sectors);
1833 BUG_ON(scsi_sg_count(SCpnt) == 0);
1834 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1836 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1839 /* For each data page */
1840 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1841 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1843 /* For each sector-sized chunk in data page */
1844 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1846 /* If we're at the end of the current
1847 * protection page advance to the next one
1849 if (ppage_offset >= psgl->length) {
1850 kunmap_atomic(paddr, KM_IRQ1);
1851 psgl = sg_next(psgl);
1852 BUG_ON(psgl == NULL);
1853 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1858 sdt = paddr + ppage_offset;
1860 switch (scsi_debug_guard) {
1862 csum = ip_compute_csum(daddr,
1863 scsi_debug_sector_size);
1866 csum = cpu_to_be16(crc_t10dif(daddr,
1867 scsi_debug_sector_size));
1875 if (sdt->guard_tag != csum) {
1877 "%s: GUARD check failed on sector %lu " \
1878 "rcvd 0x%04x, calculated 0x%04x\n",
1879 __func__, (unsigned long)sector,
1880 be16_to_cpu(sdt->guard_tag),
1883 dump_sector(daddr, scsi_debug_sector_size);
1887 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1888 be32_to_cpu(sdt->ref_tag)
1889 != (start_sec & 0xffffffff)) {
1891 "%s: REF check failed on sector %lu\n",
1892 __func__, (unsigned long)sector);
1894 dump_sector(daddr, scsi_debug_sector_size);
1898 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1899 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1901 "%s: REF check failed on sector %lu\n",
1902 __func__, (unsigned long)sector);
1904 dump_sector(daddr, scsi_debug_sector_size);
1908 /* Would be great to copy this in bigger
1909 * chunks. However, for the sake of
1910 * correctness we need to verify each sector
1911 * before writing it to "stable" storage
1913 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1917 if (sector == sdebug_store_sectors)
1918 sector = 0; /* Force wrap */
1922 daddr += scsi_debug_sector_size;
1923 ppage_offset += sizeof(struct sd_dif_tuple);
1926 kunmap_atomic(daddr, KM_IRQ0);
1929 kunmap_atomic(paddr, KM_IRQ1);
1937 kunmap_atomic(daddr, KM_IRQ0);
1938 kunmap_atomic(paddr, KM_IRQ1);
1942 static unsigned int map_state(sector_t lba, unsigned int *num)
1944 unsigned int granularity, alignment, mapped;
1945 sector_t block, next, end;
1947 granularity = scsi_debug_unmap_granularity;
1948 alignment = granularity - scsi_debug_unmap_alignment;
1949 block = lba + alignment;
1950 do_div(block, granularity);
1952 mapped = test_bit(block, map_storep);
1955 next = find_next_zero_bit(map_storep, map_size, block);
1957 next = find_next_bit(map_storep, map_size, block);
1959 end = next * granularity - scsi_debug_unmap_alignment;
1965 static void map_region(sector_t lba, unsigned int len)
1967 unsigned int granularity, alignment;
1968 sector_t end = lba + len;
1970 granularity = scsi_debug_unmap_granularity;
1971 alignment = granularity - scsi_debug_unmap_alignment;
1974 sector_t block, rem;
1976 block = lba + alignment;
1977 rem = do_div(block, granularity);
1979 set_bit(block, map_storep);
1981 lba += granularity - rem;
1985 static void unmap_region(sector_t lba, unsigned int len)
1987 unsigned int granularity, alignment;
1988 sector_t end = lba + len;
1990 granularity = scsi_debug_unmap_granularity;
1991 alignment = granularity - scsi_debug_unmap_alignment;
1994 sector_t block, rem;
1996 block = lba + alignment;
1997 rem = do_div(block, granularity);
1999 if (rem == 0 && lba + granularity <= end)
2000 clear_bit(block, map_storep);
2002 lba += granularity - rem;
2006 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2007 unsigned int num, struct sdebug_dev_info *devip,
2010 unsigned long iflags;
2013 ret = check_device_access_params(devip, lba, num);
2018 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2019 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2022 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2023 return illegal_condition_result;
2027 write_lock_irqsave(&atomic_rw, iflags);
2028 ret = do_device_access(SCpnt, devip, lba, num, 1);
2029 if (scsi_debug_unmap_granularity)
2030 map_region(lba, num);
2031 write_unlock_irqrestore(&atomic_rw, iflags);
2033 return (DID_ERROR << 16);
2034 else if ((ret < (num * scsi_debug_sector_size)) &&
2035 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2036 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2037 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2042 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2043 unsigned int num, struct sdebug_dev_info *devip,
2044 u32 ei_lba, unsigned int unmap)
2046 unsigned long iflags;
2047 unsigned long long i;
2050 ret = check_device_access_params(devip, lba, num);
2054 write_lock_irqsave(&atomic_rw, iflags);
2056 if (unmap && scsi_debug_unmap_granularity) {
2057 unmap_region(lba, num);
2061 /* Else fetch one logical block */
2062 ret = fetch_to_dev_buffer(scmd,
2063 fake_storep + (lba * scsi_debug_sector_size),
2064 scsi_debug_sector_size);
2067 write_unlock_irqrestore(&atomic_rw, iflags);
2068 return (DID_ERROR << 16);
2069 } else if ((ret < (num * scsi_debug_sector_size)) &&
2070 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2071 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2072 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2074 /* Copy first sector to remaining blocks */
2075 for (i = 1 ; i < num ; i++)
2076 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2077 fake_storep + (lba * scsi_debug_sector_size),
2078 scsi_debug_sector_size);
2080 if (scsi_debug_unmap_granularity)
2081 map_region(lba, num);
2083 write_unlock_irqrestore(&atomic_rw, iflags);
2088 struct unmap_block_desc {
2094 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2097 struct unmap_block_desc *desc;
2098 unsigned int i, payload_len, descriptors;
2101 ret = check_readiness(scmd, 1, devip);
2105 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2106 BUG_ON(scsi_bufflen(scmd) != payload_len);
2108 descriptors = (payload_len - 8) / 16;
2110 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2112 return check_condition_result;
2114 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2116 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2117 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2119 desc = (void *)&buf[8];
2121 for (i = 0 ; i < descriptors ; i++) {
2122 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2123 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2125 ret = check_device_access_params(devip, lba, num);
2129 unmap_region(lba, num);
2140 #define SDEBUG_GET_LBA_STATUS_LEN 32
2142 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2143 struct sdebug_dev_info * devip)
2145 unsigned long long lba;
2146 unsigned int alloc_len, mapped, num;
2147 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2150 ret = check_readiness(scmd, 1, devip);
2154 lba = get_unaligned_be64(&scmd->cmnd[2]);
2155 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2160 ret = check_device_access_params(devip, lba, 1);
2164 mapped = map_state(lba, &num);
2166 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2167 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2168 put_unaligned_be64(lba, &arr[8]); /* LBA */
2169 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2170 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2172 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2175 #define SDEBUG_RLUN_ARR_SZ 256
2177 static int resp_report_luns(struct scsi_cmnd * scp,
2178 struct sdebug_dev_info * devip)
2180 unsigned int alloc_len;
2181 int lun_cnt, i, upper, num, n, wlun, lun;
2182 unsigned char *cmd = (unsigned char *)scp->cmnd;
2183 int select_report = (int)cmd[2];
2184 struct scsi_lun *one_lun;
2185 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2186 unsigned char * max_addr;
2188 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2189 if ((alloc_len < 4) || (select_report > 2)) {
2190 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2192 return check_condition_result;
2194 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2195 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2196 lun_cnt = scsi_debug_max_luns;
2197 if (1 == select_report)
2199 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2201 wlun = (select_report > 0) ? 1 : 0;
2202 num = lun_cnt + wlun;
2203 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2204 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2205 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2206 sizeof(struct scsi_lun)), num);
2211 one_lun = (struct scsi_lun *) &arr[8];
2212 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2213 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2214 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2216 upper = (lun >> 8) & 0x3f;
2218 one_lun[i].scsi_lun[0] =
2219 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2220 one_lun[i].scsi_lun[1] = lun & 0xff;
2223 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2224 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2227 alloc_len = (unsigned char *)(one_lun + i) - arr;
2228 return fill_from_dev_buffer(scp, arr,
2229 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2232 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2233 unsigned int num, struct sdebug_dev_info *devip)
2236 unsigned char *kaddr, *buf;
2237 unsigned int offset;
2238 struct scatterlist *sg;
2239 struct scsi_data_buffer *sdb = scsi_in(scp);
2241 /* better not to use temporary buffer. */
2242 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2246 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2249 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2250 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2254 for (j = 0; j < sg->length; j++)
2255 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2257 offset += sg->length;
2258 kunmap_atomic(kaddr, KM_USER0);
2267 /* When timer goes off this function is called. */
2268 static void timer_intr_handler(unsigned long indx)
2270 struct sdebug_queued_cmd * sqcp;
2271 unsigned long iflags;
2273 if (indx >= scsi_debug_max_queue) {
2274 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2278 spin_lock_irqsave(&queued_arr_lock, iflags);
2279 sqcp = &queued_arr[(int)indx];
2280 if (! sqcp->in_use) {
2281 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2283 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2287 if (sqcp->done_funct) {
2288 sqcp->a_cmnd->result = sqcp->scsi_result;
2289 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2291 sqcp->done_funct = NULL;
2292 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2296 static struct sdebug_dev_info *
2297 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2299 struct sdebug_dev_info *devip;
2301 devip = kzalloc(sizeof(*devip), flags);
2303 devip->sdbg_host = sdbg_host;
2304 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2309 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2311 struct sdebug_host_info * sdbg_host;
2312 struct sdebug_dev_info * open_devip = NULL;
2313 struct sdebug_dev_info * devip =
2314 (struct sdebug_dev_info *)sdev->hostdata;
2318 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2320 printk(KERN_ERR "Host info NULL\n");
2323 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2324 if ((devip->used) && (devip->channel == sdev->channel) &&
2325 (devip->target == sdev->id) &&
2326 (devip->lun == sdev->lun))
2329 if ((!devip->used) && (!open_devip))
2333 if (!open_devip) { /* try and make a new one */
2334 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2336 printk(KERN_ERR "%s: out of memory at line %d\n",
2337 __func__, __LINE__);
2342 open_devip->channel = sdev->channel;
2343 open_devip->target = sdev->id;
2344 open_devip->lun = sdev->lun;
2345 open_devip->sdbg_host = sdbg_host;
2346 open_devip->reset = 1;
2347 open_devip->used = 1;
2348 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2349 if (scsi_debug_dsense)
2350 open_devip->sense_buff[0] = 0x72;
2352 open_devip->sense_buff[0] = 0x70;
2353 open_devip->sense_buff[7] = 0xa;
2355 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2356 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2361 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2363 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2364 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2365 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2366 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2370 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2372 struct sdebug_dev_info *devip;
2374 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2375 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2376 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2377 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2378 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2379 devip = devInfoReg(sdp);
2381 return 1; /* no resources, will be marked offline */
2382 sdp->hostdata = devip;
2383 if (sdp->host->cmd_per_lun)
2384 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2385 sdp->host->cmd_per_lun);
2386 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2387 if (scsi_debug_no_uld)
2388 sdp->no_uld_attach = 1;
2392 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2394 struct sdebug_dev_info *devip =
2395 (struct sdebug_dev_info *)sdp->hostdata;
2397 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2398 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2399 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2401 /* make this slot avaliable for re-use */
2403 sdp->hostdata = NULL;
2407 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2408 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2410 unsigned long iflags;
2412 struct sdebug_queued_cmd *sqcp;
2414 spin_lock_irqsave(&queued_arr_lock, iflags);
2415 for (k = 0; k < scsi_debug_max_queue; ++k) {
2416 sqcp = &queued_arr[k];
2417 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2418 del_timer_sync(&sqcp->cmnd_timer);
2420 sqcp->a_cmnd = NULL;
2424 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2425 return (k < scsi_debug_max_queue) ? 1 : 0;
2428 /* Deletes (stops) timers of all queued commands */
2429 static void stop_all_queued(void)
2431 unsigned long iflags;
2433 struct sdebug_queued_cmd *sqcp;
2435 spin_lock_irqsave(&queued_arr_lock, iflags);
2436 for (k = 0; k < scsi_debug_max_queue; ++k) {
2437 sqcp = &queued_arr[k];
2438 if (sqcp->in_use && sqcp->a_cmnd) {
2439 del_timer_sync(&sqcp->cmnd_timer);
2441 sqcp->a_cmnd = NULL;
2444 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2447 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2449 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2450 printk(KERN_INFO "scsi_debug: abort\n");
2452 stop_queued_cmnd(SCpnt);
2456 static int scsi_debug_biosparam(struct scsi_device *sdev,
2457 struct block_device * bdev, sector_t capacity, int *info)
2462 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2463 printk(KERN_INFO "scsi_debug: biosparam\n");
2464 buf = scsi_bios_ptable(bdev);
2466 res = scsi_partsize(buf, capacity,
2467 &info[2], &info[0], &info[1]);
2472 info[0] = sdebug_heads;
2473 info[1] = sdebug_sectors_per;
2474 info[2] = sdebug_cylinders_per;
2478 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2480 struct sdebug_dev_info * devip;
2482 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2483 printk(KERN_INFO "scsi_debug: device_reset\n");
2486 devip = devInfoReg(SCpnt->device);
2493 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2495 struct sdebug_host_info *sdbg_host;
2496 struct sdebug_dev_info * dev_info;
2497 struct scsi_device * sdp;
2498 struct Scsi_Host * hp;
2500 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2501 printk(KERN_INFO "scsi_debug: bus_reset\n");
2503 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2504 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2506 list_for_each_entry(dev_info,
2507 &sdbg_host->dev_info_list,
2509 dev_info->reset = 1;
2515 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2517 struct sdebug_host_info * sdbg_host;
2518 struct sdebug_dev_info * dev_info;
2520 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2521 printk(KERN_INFO "scsi_debug: host_reset\n");
2523 spin_lock(&sdebug_host_list_lock);
2524 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2525 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2527 dev_info->reset = 1;
2529 spin_unlock(&sdebug_host_list_lock);
2534 /* Initializes timers in queued array */
2535 static void __init init_all_queued(void)
2537 unsigned long iflags;
2539 struct sdebug_queued_cmd * sqcp;
2541 spin_lock_irqsave(&queued_arr_lock, iflags);
2542 for (k = 0; k < scsi_debug_max_queue; ++k) {
2543 sqcp = &queued_arr[k];
2544 init_timer(&sqcp->cmnd_timer);
2546 sqcp->a_cmnd = NULL;
2548 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2551 static void __init sdebug_build_parts(unsigned char *ramp,
2552 unsigned long store_size)
2554 struct partition * pp;
2555 int starts[SDEBUG_MAX_PARTS + 2];
2556 int sectors_per_part, num_sectors, k;
2557 int heads_by_sects, start_sec, end_sec;
2559 /* assume partition table already zeroed */
2560 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2562 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2563 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2564 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2565 "partitions to %d\n", SDEBUG_MAX_PARTS);
2567 num_sectors = (int)sdebug_store_sectors;
2568 sectors_per_part = (num_sectors - sdebug_sectors_per)
2569 / scsi_debug_num_parts;
2570 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2571 starts[0] = sdebug_sectors_per;
2572 for (k = 1; k < scsi_debug_num_parts; ++k)
2573 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2575 starts[scsi_debug_num_parts] = num_sectors;
2576 starts[scsi_debug_num_parts + 1] = 0;
2578 ramp[510] = 0x55; /* magic partition markings */
2580 pp = (struct partition *)(ramp + 0x1be);
2581 for (k = 0; starts[k + 1]; ++k, ++pp) {
2582 start_sec = starts[k];
2583 end_sec = starts[k + 1] - 1;
2586 pp->cyl = start_sec / heads_by_sects;
2587 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2588 / sdebug_sectors_per;
2589 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2591 pp->end_cyl = end_sec / heads_by_sects;
2592 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2593 / sdebug_sectors_per;
2594 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2596 pp->start_sect = start_sec;
2597 pp->nr_sects = end_sec - start_sec + 1;
2598 pp->sys_ind = 0x83; /* plain Linux partition */
2602 static int schedule_resp(struct scsi_cmnd * cmnd,
2603 struct sdebug_dev_info * devip,
2604 done_funct_t done, int scsi_result, int delta_jiff)
2606 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2608 struct scsi_device * sdp = cmnd->device;
2610 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2611 "non-zero result=0x%x\n", sdp->host->host_no,
2612 sdp->channel, sdp->id, sdp->lun, scsi_result);
2615 if (cmnd && devip) {
2616 /* simulate autosense by this driver */
2617 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2618 memcpy(cmnd->sense_buffer, devip->sense_buff,
2619 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2620 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2622 if (delta_jiff <= 0) {
2624 cmnd->result = scsi_result;
2629 unsigned long iflags;
2631 struct sdebug_queued_cmd * sqcp = NULL;
2633 spin_lock_irqsave(&queued_arr_lock, iflags);
2634 for (k = 0; k < scsi_debug_max_queue; ++k) {
2635 sqcp = &queued_arr[k];
2639 if (k >= scsi_debug_max_queue) {
2640 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2641 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2642 return 1; /* report busy to mid level */
2645 sqcp->a_cmnd = cmnd;
2646 sqcp->scsi_result = scsi_result;
2647 sqcp->done_funct = done;
2648 sqcp->cmnd_timer.function = timer_intr_handler;
2649 sqcp->cmnd_timer.data = k;
2650 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2651 add_timer(&sqcp->cmnd_timer);
2652 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2658 /* Note: The following macros create attribute files in the
2659 /sys/module/scsi_debug/parameters directory. Unfortunately this
2660 driver is unaware of a change and cannot trigger auxiliary actions
2661 as it can when the corresponding attribute in the
2662 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2664 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2665 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2666 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2667 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2668 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2669 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2670 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2671 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2672 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2673 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2674 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2675 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2676 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2677 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2678 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2679 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2680 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2682 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2683 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2684 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2685 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2686 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2687 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2688 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2689 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2690 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2691 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2692 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2694 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2695 MODULE_DESCRIPTION("SCSI debug adapter driver");
2696 MODULE_LICENSE("GPL");
2697 MODULE_VERSION(SCSI_DEBUG_VERSION);
2699 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2700 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2701 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2702 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2703 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2704 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2705 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2706 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2707 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2708 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2709 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2710 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2711 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2712 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2713 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2714 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2715 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2716 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2717 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2718 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2719 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2720 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2721 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2722 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2723 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)");
2724 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)");
2725 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)");
2726 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2728 static char sdebug_info[256];
2730 static const char * scsi_debug_info(struct Scsi_Host * shp)
2732 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2733 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2734 scsi_debug_version_date, scsi_debug_dev_size_mb,
2739 /* scsi_debug_proc_info
2740 * Used if the driver currently has no own support for /proc/scsi
2742 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2743 int length, int inout)
2745 int len, pos, begin;
2748 orig_length = length;
2752 int minLen = length > 15 ? 15 : length;
2754 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2756 memcpy(arr, buffer, minLen);
2758 if (1 != sscanf(arr, "%d", &pos))
2760 scsi_debug_opts = pos;
2761 if (scsi_debug_every_nth != 0)
2762 scsi_debug_cmnd_count = 0;
2766 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2768 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2769 "every_nth=%d(curr:%d)\n"
2770 "delay=%d, max_luns=%d, scsi_level=%d\n"
2771 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2772 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2773 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2774 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2775 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2776 scsi_debug_cmnd_count, scsi_debug_delay,
2777 scsi_debug_max_luns, scsi_debug_scsi_level,
2778 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2779 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2780 num_host_resets, dix_reads, dix_writes, dif_errors);
2785 *start = buffer + (offset - begin); /* Start of wanted data */
2786 len -= (offset - begin);
2792 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2794 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2797 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2798 const char * buf, size_t count)
2803 if (1 == sscanf(buf, "%10s", work)) {
2804 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2805 scsi_debug_delay = delay;
2811 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2812 sdebug_delay_store);
2814 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2816 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2819 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2820 const char * buf, size_t count)
2825 if (1 == sscanf(buf, "%10s", work)) {
2826 if (0 == strnicmp(work,"0x", 2)) {
2827 if (1 == sscanf(&work[2], "%x", &opts))
2830 if (1 == sscanf(work, "%d", &opts))
2836 scsi_debug_opts = opts;
2837 scsi_debug_cmnd_count = 0;
2840 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2843 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2845 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2847 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2848 const char * buf, size_t count)
2852 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2853 scsi_debug_ptype = n;
2858 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2860 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2862 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2864 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2865 const char * buf, size_t count)
2869 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2870 scsi_debug_dsense = n;
2875 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2876 sdebug_dsense_store);
2878 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2880 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2882 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2883 const char * buf, size_t count)
2887 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2888 scsi_debug_fake_rw = n;
2893 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2894 sdebug_fake_rw_store);
2896 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2898 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2900 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2901 const char * buf, size_t count)
2905 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2906 scsi_debug_no_lun_0 = n;
2911 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2912 sdebug_no_lun_0_store);
2914 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2916 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2918 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2919 const char * buf, size_t count)
2923 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2924 scsi_debug_num_tgts = n;
2925 sdebug_max_tgts_luns();
2930 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2931 sdebug_num_tgts_store);
2933 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
2935 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
2937 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
2939 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
2941 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
2943 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
2945 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
2947 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
2949 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
2950 const char * buf, size_t count)
2954 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
2955 scsi_debug_every_nth = nth;
2956 scsi_debug_cmnd_count = 0;
2961 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
2962 sdebug_every_nth_store);
2964 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
2966 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
2968 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
2969 const char * buf, size_t count)
2973 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2974 scsi_debug_max_luns = n;
2975 sdebug_max_tgts_luns();
2980 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
2981 sdebug_max_luns_store);
2983 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
2985 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
2987 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
2988 const char * buf, size_t count)
2992 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
2993 (n <= SCSI_DEBUG_CANQUEUE)) {
2994 scsi_debug_max_queue = n;
2999 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3000 sdebug_max_queue_store);
3002 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3004 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3006 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3008 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3010 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3012 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3014 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3016 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3018 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3019 const char * buf, size_t count)
3023 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3024 scsi_debug_virtual_gb = n;
3026 sdebug_capacity = get_sdebug_capacity();
3032 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3033 sdebug_virtual_gb_store);
3035 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3037 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3040 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3041 const char * buf, size_t count)
3045 if (sscanf(buf, "%d", &delta_hosts) != 1)
3047 if (delta_hosts > 0) {
3049 sdebug_add_adapter();
3050 } while (--delta_hosts);
3051 } else if (delta_hosts < 0) {
3053 sdebug_remove_adapter();
3054 } while (++delta_hosts);
3058 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3059 sdebug_add_host_store);
3061 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3064 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3066 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3067 const char * buf, size_t count)
3071 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3072 scsi_debug_vpd_use_hostno = n;
3077 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3078 sdebug_vpd_use_hostno_store);
3080 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3082 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3084 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3086 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3088 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3090 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3092 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3094 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3096 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3098 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3100 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3102 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3104 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3106 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3108 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3110 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3114 if (scsi_debug_unmap_granularity == 0)
3115 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3116 sdebug_store_sectors);
3118 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3120 buf[count++] = '\n';
3125 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3128 /* Note: The following function creates attribute files in the
3129 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3130 files (over those found in the /sys/module/scsi_debug/parameters
3131 directory) is that auxiliary actions can be triggered when an attribute
3132 is changed. For example see: sdebug_add_host_store() above.
3134 static int do_create_driverfs_files(void)
3138 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3139 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3140 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3141 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3142 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3143 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3144 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3145 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3146 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3147 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3148 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3149 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3150 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3151 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3152 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3153 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3154 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3155 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3156 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3157 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3158 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3159 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3160 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3164 static void do_remove_driverfs_files(void)
3166 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3167 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3168 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3169 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3170 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3171 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3172 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3173 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3174 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3175 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3176 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3177 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3178 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3179 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3180 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3181 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3182 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3183 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3184 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3185 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3186 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3187 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3188 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3191 static void pseudo_0_release(struct device *dev)
3193 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3194 printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
3197 static struct device pseudo_primary = {
3198 .init_name = "pseudo_0",
3199 .release = pseudo_0_release,
3202 static int __init scsi_debug_init(void)
3209 switch (scsi_debug_sector_size) {
3216 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3217 scsi_debug_sector_size);
3221 switch (scsi_debug_dif) {
3223 case SD_DIF_TYPE0_PROTECTION:
3224 case SD_DIF_TYPE1_PROTECTION:
3225 case SD_DIF_TYPE2_PROTECTION:
3226 case SD_DIF_TYPE3_PROTECTION:
3230 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3234 if (scsi_debug_guard > 1) {
3235 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3239 if (scsi_debug_ato > 1) {
3240 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3244 if (scsi_debug_physblk_exp > 15) {
3245 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3246 scsi_debug_physblk_exp);
3250 if (scsi_debug_lowest_aligned > 0x3fff) {
3251 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3252 scsi_debug_lowest_aligned);
3256 if (scsi_debug_dev_size_mb < 1)
3257 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3258 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3259 sdebug_store_sectors = sz / scsi_debug_sector_size;
3260 sdebug_capacity = get_sdebug_capacity();
3262 /* play around with geometry, don't waste too much on track 0 */
3264 sdebug_sectors_per = 32;
3265 if (scsi_debug_dev_size_mb >= 16)
3267 else if (scsi_debug_dev_size_mb >= 256)
3269 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3270 (sdebug_sectors_per * sdebug_heads);
3271 if (sdebug_cylinders_per >= 1024) {
3272 /* other LLDs do this; implies >= 1GB ram disk ... */
3274 sdebug_sectors_per = 63;
3275 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3276 (sdebug_sectors_per * sdebug_heads);
3279 fake_storep = vmalloc(sz);
3280 if (NULL == fake_storep) {
3281 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3284 memset(fake_storep, 0, sz);
3285 if (scsi_debug_num_parts > 0)
3286 sdebug_build_parts(fake_storep, sz);
3288 if (scsi_debug_dif) {
3291 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3292 dif_storep = vmalloc(dif_size);
3294 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3295 dif_size, dif_storep);
3297 if (dif_storep == NULL) {
3298 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3303 memset(dif_storep, 0xff, dif_size);
3306 if (scsi_debug_unmap_granularity) {
3307 unsigned int map_bytes;
3309 if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3311 "%s: ERR: unmap_granularity < unmap_alignment\n",
3316 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3317 map_bytes = map_size >> 3;
3318 map_storep = vmalloc(map_bytes);
3320 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3323 if (map_storep == NULL) {
3324 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3329 memset(map_storep, 0x0, map_bytes);
3331 /* Map first 1KB for partition table */
3332 if (scsi_debug_num_parts)
3336 ret = device_register(&pseudo_primary);
3338 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
3342 ret = bus_register(&pseudo_lld_bus);
3344 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3348 ret = driver_register(&sdebug_driverfs_driver);
3350 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3354 ret = do_create_driverfs_files();
3356 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3363 host_to_add = scsi_debug_add_host;
3364 scsi_debug_add_host = 0;
3366 for (k = 0; k < host_to_add; k++) {
3367 if (sdebug_add_adapter()) {
3368 printk(KERN_ERR "scsi_debug_init: "
3369 "sdebug_add_adapter failed k=%d\n", k);
3374 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3375 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3376 scsi_debug_add_host);
3381 do_remove_driverfs_files();
3382 driver_unregister(&sdebug_driverfs_driver);
3384 bus_unregister(&pseudo_lld_bus);
3386 device_unregister(&pseudo_primary);
3397 static void __exit scsi_debug_exit(void)
3399 int k = scsi_debug_add_host;
3403 sdebug_remove_adapter();
3404 do_remove_driverfs_files();
3405 driver_unregister(&sdebug_driverfs_driver);
3406 bus_unregister(&pseudo_lld_bus);
3407 device_unregister(&pseudo_primary);
3415 device_initcall(scsi_debug_init);
3416 module_exit(scsi_debug_exit);
3418 static void sdebug_release_adapter(struct device * dev)
3420 struct sdebug_host_info *sdbg_host;
3422 sdbg_host = to_sdebug_host(dev);
3426 static int sdebug_add_adapter(void)
3428 int k, devs_per_host;
3430 struct sdebug_host_info *sdbg_host;
3431 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3433 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3434 if (NULL == sdbg_host) {
3435 printk(KERN_ERR "%s: out of memory at line %d\n",
3436 __func__, __LINE__);
3440 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3442 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3443 for (k = 0; k < devs_per_host; k++) {
3444 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3445 if (!sdbg_devinfo) {
3446 printk(KERN_ERR "%s: out of memory at line %d\n",
3447 __func__, __LINE__);
3453 spin_lock(&sdebug_host_list_lock);
3454 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3455 spin_unlock(&sdebug_host_list_lock);
3457 sdbg_host->dev.bus = &pseudo_lld_bus;
3458 sdbg_host->dev.parent = &pseudo_primary;
3459 sdbg_host->dev.release = &sdebug_release_adapter;
3460 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3462 error = device_register(&sdbg_host->dev);
3467 ++scsi_debug_add_host;
3471 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3473 list_del(&sdbg_devinfo->dev_list);
3474 kfree(sdbg_devinfo);
3481 static void sdebug_remove_adapter(void)
3483 struct sdebug_host_info * sdbg_host = NULL;
3485 spin_lock(&sdebug_host_list_lock);
3486 if (!list_empty(&sdebug_host_list)) {
3487 sdbg_host = list_entry(sdebug_host_list.prev,
3488 struct sdebug_host_info, host_list);
3489 list_del(&sdbg_host->host_list);
3491 spin_unlock(&sdebug_host_list_lock);
3496 device_unregister(&sdbg_host->dev);
3497 --scsi_debug_add_host;
3501 int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3503 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3506 unsigned long long lba;
3509 int target = SCpnt->device->id;
3510 struct sdebug_dev_info *devip = NULL;
3511 int inj_recovered = 0;
3512 int inj_transport = 0;
3515 int delay_override = 0;
3518 scsi_set_resid(SCpnt, 0);
3519 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3520 printk(KERN_INFO "scsi_debug: cmd ");
3521 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3522 printk("%02x ", (int)cmd[k]);
3526 if (target == SCpnt->device->host->hostt->this_id) {
3527 printk(KERN_INFO "scsi_debug: initiator's id used as "
3529 return schedule_resp(SCpnt, NULL, done,
3530 DID_NO_CONNECT << 16, 0);
3533 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3534 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3535 return schedule_resp(SCpnt, NULL, done,
3536 DID_NO_CONNECT << 16, 0);
3537 devip = devInfoReg(SCpnt->device);
3539 return schedule_resp(SCpnt, NULL, done,
3540 DID_NO_CONNECT << 16, 0);
3542 if ((scsi_debug_every_nth != 0) &&
3543 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3544 scsi_debug_cmnd_count = 0;
3545 if (scsi_debug_every_nth < -1)
3546 scsi_debug_every_nth = -1;
3547 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3548 return 0; /* ignore command causing timeout */
3549 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3550 inj_recovered = 1; /* to reads and writes below */
3551 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3552 inj_transport = 1; /* to reads and writes below */
3553 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3554 inj_dif = 1; /* to reads and writes below */
3555 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3556 inj_dix = 1; /* to reads and writes below */
3563 case TEST_UNIT_READY:
3565 break; /* only allowable wlun commands */
3567 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3568 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3569 "not supported for wlun\n", *cmd);
3570 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3572 errsts = check_condition_result;
3573 return schedule_resp(SCpnt, devip, done, errsts,
3579 case INQUIRY: /* mandatory, ignore unit attention */
3581 errsts = resp_inquiry(SCpnt, target, devip);
3583 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3585 errsts = resp_requests(SCpnt, devip);
3587 case REZERO_UNIT: /* actually this is REWIND for SSC */
3589 errsts = resp_start_stop(SCpnt, devip);
3591 case ALLOW_MEDIUM_REMOVAL:
3592 errsts = check_readiness(SCpnt, 1, devip);
3595 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3596 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3597 cmd[4] ? "inhibited" : "enabled");
3599 case SEND_DIAGNOSTIC: /* mandatory */
3600 errsts = check_readiness(SCpnt, 1, devip);
3602 case TEST_UNIT_READY: /* mandatory */
3604 errsts = check_readiness(SCpnt, 0, devip);
3607 errsts = check_readiness(SCpnt, 1, devip);
3610 errsts = check_readiness(SCpnt, 1, devip);
3613 errsts = check_readiness(SCpnt, 1, devip);
3616 errsts = check_readiness(SCpnt, 1, devip);
3619 errsts = resp_readcap(SCpnt, devip);
3621 case SERVICE_ACTION_IN:
3622 if (cmd[1] == SAI_READ_CAPACITY_16)
3623 errsts = resp_readcap16(SCpnt, devip);
3624 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3626 if (scsi_debug_unmap_max_desc == 0) {
3627 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3628 INVALID_COMMAND_OPCODE, 0);
3629 errsts = check_condition_result;
3631 errsts = resp_get_lba_status(SCpnt, devip);
3633 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3635 errsts = check_condition_result;
3638 case MAINTENANCE_IN:
3639 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3640 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3642 errsts = check_condition_result;
3645 errsts = resp_report_tgtpgs(SCpnt, devip);
3650 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3651 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3653 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3654 INVALID_COMMAND_OPCODE, 0);
3655 errsts = check_condition_result;
3659 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3660 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3661 (cmd[1] & 0xe0) == 0)
3662 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3667 errsts = check_readiness(SCpnt, 0, devip);
3670 if (scsi_debug_fake_rw)
3672 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3673 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3674 if (inj_recovered && (0 == errsts)) {
3675 mk_sense_buffer(devip, RECOVERED_ERROR,
3676 THRESHOLD_EXCEEDED, 0);
3677 errsts = check_condition_result;
3678 } else if (inj_transport && (0 == errsts)) {
3679 mk_sense_buffer(devip, ABORTED_COMMAND,
3680 TRANSPORT_PROBLEM, ACK_NAK_TO);
3681 errsts = check_condition_result;
3682 } else if (inj_dif && (0 == errsts)) {
3683 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3684 errsts = illegal_condition_result;
3685 } else if (inj_dix && (0 == errsts)) {
3686 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3687 errsts = illegal_condition_result;
3690 case REPORT_LUNS: /* mandatory, ignore unit attention */
3692 errsts = resp_report_luns(SCpnt, devip);
3694 case VERIFY: /* 10 byte SBC-2 command */
3695 errsts = check_readiness(SCpnt, 0, devip);
3700 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3701 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3703 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3704 INVALID_COMMAND_OPCODE, 0);
3705 errsts = check_condition_result;
3709 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3710 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3711 (cmd[1] & 0xe0) == 0)
3712 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3717 errsts = check_readiness(SCpnt, 0, devip);
3720 if (scsi_debug_fake_rw)
3722 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3723 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3724 if (inj_recovered && (0 == errsts)) {
3725 mk_sense_buffer(devip, RECOVERED_ERROR,
3726 THRESHOLD_EXCEEDED, 0);
3727 errsts = check_condition_result;
3728 } else if (inj_dif && (0 == errsts)) {
3729 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3730 errsts = illegal_condition_result;
3731 } else if (inj_dix && (0 == errsts)) {
3732 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3733 errsts = illegal_condition_result;
3741 errsts = check_readiness(SCpnt, 0, devip);
3744 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3745 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3748 errsts = check_readiness(SCpnt, 0, devip);
3752 if (scsi_debug_unmap_max_desc == 0) {
3753 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3754 INVALID_COMMAND_OPCODE, 0);
3755 errsts = check_condition_result;
3757 errsts = resp_unmap(SCpnt, devip);
3761 errsts = resp_mode_sense(SCpnt, target, devip);
3764 errsts = resp_mode_select(SCpnt, 1, devip);
3766 case MODE_SELECT_10:
3767 errsts = resp_mode_select(SCpnt, 0, devip);
3770 errsts = resp_log_sense(SCpnt, devip);
3772 case SYNCHRONIZE_CACHE:
3774 errsts = check_readiness(SCpnt, 0, devip);
3777 errsts = check_readiness(SCpnt, 1, devip);
3779 case XDWRITEREAD_10:
3780 if (!scsi_bidi_cmnd(SCpnt)) {
3781 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3782 INVALID_FIELD_IN_CDB, 0);
3783 errsts = check_condition_result;
3787 errsts = check_readiness(SCpnt, 0, devip);
3790 if (scsi_debug_fake_rw)
3792 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3793 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3796 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3799 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3801 case VARIABLE_LENGTH_CMD:
3802 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3804 if ((cmd[10] & 0xe0) == 0)
3806 "Unprotected RD/WR to DIF device\n");
3808 if (cmd[9] == READ_32) {
3809 BUG_ON(SCpnt->cmd_len < 32);
3813 if (cmd[9] == WRITE_32) {
3814 BUG_ON(SCpnt->cmd_len < 32);
3819 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3820 INVALID_FIELD_IN_CDB, 0);
3821 errsts = check_condition_result;
3825 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3826 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3827 "supported\n", *cmd);
3828 errsts = check_readiness(SCpnt, 1, devip);
3830 break; /* Unit attention takes precedence */
3831 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3832 errsts = check_condition_result;
3835 return schedule_resp(SCpnt, devip, done, errsts,
3836 (delay_override ? 0 : scsi_debug_delay));
3839 static struct scsi_host_template sdebug_driver_template = {
3840 .proc_info = scsi_debug_proc_info,
3841 .proc_name = sdebug_proc_name,
3842 .name = "SCSI DEBUG",
3843 .info = scsi_debug_info,
3844 .slave_alloc = scsi_debug_slave_alloc,
3845 .slave_configure = scsi_debug_slave_configure,
3846 .slave_destroy = scsi_debug_slave_destroy,
3847 .ioctl = scsi_debug_ioctl,
3848 .queuecommand = scsi_debug_queuecommand,
3849 .eh_abort_handler = scsi_debug_abort,
3850 .eh_bus_reset_handler = scsi_debug_bus_reset,
3851 .eh_device_reset_handler = scsi_debug_device_reset,
3852 .eh_host_reset_handler = scsi_debug_host_reset,
3853 .bios_param = scsi_debug_biosparam,
3854 .can_queue = SCSI_DEBUG_CANQUEUE,
3856 .sg_tablesize = 256,
3858 .max_sectors = 0xffff,
3859 .use_clustering = DISABLE_CLUSTERING,
3860 .module = THIS_MODULE,
3863 static int sdebug_driver_probe(struct device * dev)
3866 struct sdebug_host_info *sdbg_host;
3867 struct Scsi_Host *hpnt;
3870 sdbg_host = to_sdebug_host(dev);
3872 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3873 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3875 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3880 sdbg_host->shost = hpnt;
3881 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3882 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3883 hpnt->max_id = scsi_debug_num_tgts + 1;
3885 hpnt->max_id = scsi_debug_num_tgts;
3886 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3890 switch (scsi_debug_dif) {
3892 case SD_DIF_TYPE1_PROTECTION:
3893 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3895 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3898 case SD_DIF_TYPE2_PROTECTION:
3899 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3901 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3904 case SD_DIF_TYPE3_PROTECTION:
3905 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3907 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3912 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3916 scsi_host_set_prot(hpnt, host_prot);
3918 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3919 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3920 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3921 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3922 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3923 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3924 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3925 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3927 if (scsi_debug_guard == 1)
3928 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3930 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3932 error = scsi_add_host(hpnt, &sdbg_host->dev);
3934 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3936 scsi_host_put(hpnt);
3938 scsi_scan_host(hpnt);
3944 static int sdebug_driver_remove(struct device * dev)
3946 struct sdebug_host_info *sdbg_host;
3947 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3949 sdbg_host = to_sdebug_host(dev);
3952 printk(KERN_ERR "%s: Unable to locate host info\n",
3957 scsi_remove_host(sdbg_host->shost);
3959 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3961 list_del(&sdbg_devinfo->dev_list);
3962 kfree(sdbg_devinfo);
3965 scsi_host_put(sdbg_host->shost);
3969 static int pseudo_lld_bus_match(struct device *dev,
3970 struct device_driver *dev_driver)
3975 static struct bus_type pseudo_lld_bus = {
3977 .match = pseudo_lld_bus_match,
3978 .probe = sdebug_driver_probe,
3979 .remove = sdebug_driver_remove,