ub: Fix timeouts
[safe/jmp/linux-2.6] / drivers / block / ub.c
1 /*
2  * The low performance USB storage driver (ub).
3  *
4  * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5  * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6  *
7  * This work is a part of Linux kernel, is derived from it,
8  * and is not licensed separately. See file COPYING for details.
9  *
10  * TODO (sorted by decreasing priority)
11  *  -- Return sense now that rq allows it (we always auto-sense anyway).
12  *  -- set readonly flag for CDs, set removable flag for CF readers
13  *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
14  *  -- verify the 13 conditions and do bulk resets
15  *  -- highmem
16  *  -- move top_sense and work_bcs into separate allocations (if they survive)
17  *     for cache purists and esoteric architectures.
18  *  -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
19  *  -- prune comments, they are too volumnous
20  *  -- Resove XXX's
21  *  -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
22  */
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/usb.h>
26 #include <linux/usb_usual.h>
27 #include <linux/blkdev.h>
28 #include <linux/timer.h>
29 #include <linux/scatterlist.h>
30 #include <scsi/scsi.h>
31
32 #define DRV_NAME "ub"
33
34 #define UB_MAJOR 180
35
36 /*
37  * The command state machine is the key model for understanding of this driver.
38  *
39  * The general rule is that all transitions are done towards the bottom
40  * of the diagram, thus preventing any loops.
41  *
42  * An exception to that is how the STAT state is handled. A counter allows it
43  * to be re-entered along the path marked with [C].
44  *
45  *       +--------+
46  *       ! INIT   !
47  *       +--------+
48  *           !
49  *        ub_scsi_cmd_start fails ->--------------------------------------\
50  *           !                                                            !
51  *           V                                                            !
52  *       +--------+                                                       !
53  *       ! CMD    !                                                       !
54  *       +--------+                                                       !
55  *           !                                            +--------+      !
56  *         was -EPIPE -->-------------------------------->! CLEAR  !      !
57  *           !                                            +--------+      !
58  *           !                                                !           !
59  *         was error -->------------------------------------- ! --------->\
60  *           !                                                !           !
61  *  /--<-- cmd->dir == NONE ?                                 !           !
62  *  !        !                                                !           !
63  *  !        V                                                !           !
64  *  !    +--------+                                           !           !
65  *  !    ! DATA   !                                           !           !
66  *  !    +--------+                                           !           !
67  *  !        !                           +---------+          !           !
68  *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
69  *  !        !                           +---------+          !           !
70  *  !        !                                !               !           !
71  *  !        !                              was error -->---- ! --------->\
72  *  !      was error -->--------------------- ! ------------- ! --------->\
73  *  !        !                                !               !           !
74  *  !        V                                !               !           !
75  *  \--->+--------+                           !               !           !
76  *       ! STAT   !<--------------------------/               !           !
77  *  /--->+--------+                                           !           !
78  *  !        !                                                !           !
79  * [C]     was -EPIPE -->-----------\                         !           !
80  *  !        !                      !                         !           !
81  *  +<---- len == 0                 !                         !           !
82  *  !        !                      !                         !           !
83  *  !      was error -->--------------------------------------!---------->\
84  *  !        !                      !                         !           !
85  *  +<---- bad CSW                  !                         !           !
86  *  +<---- bad tag                  !                         !           !
87  *  !        !                      V                         !           !
88  *  !        !                 +--------+                     !           !
89  *  !        !                 ! CLRRS  !                     !           !
90  *  !        !                 +--------+                     !           !
91  *  !        !                      !                         !           !
92  *  \------- ! --------------------[C]--------\               !           !
93  *           !                                !               !           !
94  *         cmd->error---\                +--------+           !           !
95  *           !          +--------------->! SENSE  !<----------/           !
96  *         STAT_FAIL----/                +--------+                       !
97  *           !                                !                           V
98  *           !                                V                      +--------+
99  *           \--------------------------------\--------------------->! DONE   !
100  *                                                                   +--------+
101  */
102
103 /*
104  * This many LUNs per USB device.
105  * Every one of them takes a host, see UB_MAX_HOSTS.
106  */
107 #define UB_MAX_LUNS   9
108
109 /*
110  */
111
112 #define UB_PARTS_PER_LUN      8
113
114 #define UB_MAX_CDB_SIZE      16         /* Corresponds to Bulk */
115
116 #define UB_SENSE_SIZE  18
117
118 /*
119  */
120
121 /* command block wrapper */
122 struct bulk_cb_wrap {
123         __le32  Signature;              /* contains 'USBC' */
124         u32     Tag;                    /* unique per command id */
125         __le32  DataTransferLength;     /* size of data */
126         u8      Flags;                  /* direction in bit 0 */
127         u8      Lun;                    /* LUN */
128         u8      Length;                 /* of of the CDB */
129         u8      CDB[UB_MAX_CDB_SIZE];   /* max command */
130 };
131
132 #define US_BULK_CB_WRAP_LEN     31
133 #define US_BULK_CB_SIGN         0x43425355      /*spells out USBC */
134 #define US_BULK_FLAG_IN         1
135 #define US_BULK_FLAG_OUT        0
136
137 /* command status wrapper */
138 struct bulk_cs_wrap {
139         __le32  Signature;              /* should = 'USBS' */
140         u32     Tag;                    /* same as original command */
141         __le32  Residue;                /* amount not transferred */
142         u8      Status;                 /* see below */
143 };
144
145 #define US_BULK_CS_WRAP_LEN     13
146 #define US_BULK_CS_SIGN         0x53425355      /* spells out 'USBS' */
147 #define US_BULK_STAT_OK         0
148 #define US_BULK_STAT_FAIL       1
149 #define US_BULK_STAT_PHASE      2
150
151 /* bulk-only class specific requests */
152 #define US_BULK_RESET_REQUEST   0xff
153 #define US_BULK_GET_MAX_LUN     0xfe
154
155 /*
156  */
157 struct ub_dev;
158
159 #define UB_MAX_REQ_SG   9       /* cdrecord requires 32KB and maybe a header */
160 #define UB_MAX_SECTORS 64
161
162 /*
163  * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
164  * even if a webcam hogs the bus, but some devices need time to spin up.
165  */
166 #define UB_URB_TIMEOUT  (HZ*2)
167 #define UB_DATA_TIMEOUT (HZ*5)  /* ZIP does spin-ups in the data phase */
168 #define UB_STAT_TIMEOUT (HZ*5)  /* Same spinups and eject for a dataless cmd. */
169 #define UB_CTRL_TIMEOUT (HZ/2)  /* 500ms ought to be enough to clear a stall */
170
171 /*
172  * An instance of a SCSI command in transit.
173  */
174 #define UB_DIR_NONE     0
175 #define UB_DIR_READ     1
176 #define UB_DIR_ILLEGAL2 2
177 #define UB_DIR_WRITE    3
178
179 #define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
180                          (((c)==UB_DIR_READ)? 'r': 'n'))
181
182 enum ub_scsi_cmd_state {
183         UB_CMDST_INIT,                  /* Initial state */
184         UB_CMDST_CMD,                   /* Command submitted */
185         UB_CMDST_DATA,                  /* Data phase */
186         UB_CMDST_CLR2STS,               /* Clearing before requesting status */
187         UB_CMDST_STAT,                  /* Status phase */
188         UB_CMDST_CLEAR,                 /* Clearing a stall (halt, actually) */
189         UB_CMDST_CLRRS,                 /* Clearing before retrying status */
190         UB_CMDST_SENSE,                 /* Sending Request Sense */
191         UB_CMDST_DONE                   /* Final state */
192 };
193
194 struct ub_scsi_cmd {
195         unsigned char cdb[UB_MAX_CDB_SIZE];
196         unsigned char cdb_len;
197
198         unsigned char dir;              /* 0 - none, 1 - read, 3 - write. */
199         enum ub_scsi_cmd_state state;
200         unsigned int tag;
201         struct ub_scsi_cmd *next;
202
203         int error;                      /* Return code - valid upon done */
204         unsigned int act_len;           /* Return size */
205         unsigned char key, asc, ascq;   /* May be valid if error==-EIO */
206
207         int stat_count;                 /* Retries getting status. */
208         unsigned int timeo;             /* jiffies until rq->timeout changes */
209
210         unsigned int len;               /* Requested length */
211         unsigned int current_sg;
212         unsigned int nsg;               /* sgv[nsg] */
213         struct scatterlist sgv[UB_MAX_REQ_SG];
214
215         struct ub_lun *lun;
216         void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
217         void *back;
218 };
219
220 struct ub_request {
221         struct request *rq;
222         unsigned int current_try;
223         unsigned int nsg;               /* sgv[nsg] */
224         struct scatterlist sgv[UB_MAX_REQ_SG];
225 };
226
227 /*
228  */
229 struct ub_capacity {
230         unsigned long nsec;             /* Linux size - 512 byte sectors */
231         unsigned int bsize;             /* Linux hardsect_size */
232         unsigned int bshift;            /* Shift between 512 and hard sects */
233 };
234
235 /*
236  * This is a direct take-off from linux/include/completion.h
237  * The difference is that I do not wait on this thing, just poll.
238  * When I want to wait (ub_probe), I just use the stock completion.
239  *
240  * Note that INIT_COMPLETION takes no lock. It is correct. But why
241  * in the bloody hell that thing takes struct instead of pointer to struct
242  * is quite beyond me. I just copied it from the stock completion.
243  */
244 struct ub_completion {
245         unsigned int done;
246         spinlock_t lock;
247 };
248
249 static inline void ub_init_completion(struct ub_completion *x)
250 {
251         x->done = 0;
252         spin_lock_init(&x->lock);
253 }
254
255 #define UB_INIT_COMPLETION(x)   ((x).done = 0)
256
257 static void ub_complete(struct ub_completion *x)
258 {
259         unsigned long flags;
260
261         spin_lock_irqsave(&x->lock, flags);
262         x->done++;
263         spin_unlock_irqrestore(&x->lock, flags);
264 }
265
266 static int ub_is_completed(struct ub_completion *x)
267 {
268         unsigned long flags;
269         int ret;
270
271         spin_lock_irqsave(&x->lock, flags);
272         ret = x->done;
273         spin_unlock_irqrestore(&x->lock, flags);
274         return ret;
275 }
276
277 /*
278  */
279 struct ub_scsi_cmd_queue {
280         int qlen, qmax;
281         struct ub_scsi_cmd *head, *tail;
282 };
283
284 /*
285  * The block device instance (one per LUN).
286  */
287 struct ub_lun {
288         struct ub_dev *udev;
289         struct list_head link;
290         struct gendisk *disk;
291         int id;                         /* Host index */
292         int num;                        /* LUN number */
293         char name[16];
294
295         int changed;                    /* Media was changed */
296         int removable;
297         int readonly;
298
299         struct ub_request urq;
300
301         /* Use Ingo's mempool if or when we have more than one command. */
302         /*
303          * Currently we never need more than one command for the whole device.
304          * However, giving every LUN a command is a cheap and automatic way
305          * to enforce fairness between them.
306          */
307         int cmda[1];
308         struct ub_scsi_cmd cmdv[1];
309
310         struct ub_capacity capacity; 
311 };
312
313 /*
314  * The USB device instance.
315  */
316 struct ub_dev {
317         spinlock_t *lock;
318         atomic_t poison;                /* The USB device is disconnected */
319         int openc;                      /* protected by ub_lock! */
320                                         /* kref is too implicit for our taste */
321         int reset;                      /* Reset is running */
322         unsigned int tagcnt;
323         char name[12];
324         struct usb_device *dev;
325         struct usb_interface *intf;
326
327         struct list_head luns;
328
329         unsigned int send_bulk_pipe;    /* cached pipe values */
330         unsigned int recv_bulk_pipe;
331         unsigned int send_ctrl_pipe;
332         unsigned int recv_ctrl_pipe;
333
334         struct tasklet_struct tasklet;
335
336         struct ub_scsi_cmd_queue cmd_queue;
337         struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
338         unsigned char top_sense[UB_SENSE_SIZE];
339
340         struct ub_completion work_done;
341         struct urb work_urb;
342         struct timer_list work_timer;
343         int last_pipe;                  /* What might need clearing */
344         __le32 signature;               /* Learned signature */
345         struct bulk_cb_wrap work_bcb;
346         struct bulk_cs_wrap work_bcs;
347         struct usb_ctrlrequest work_cr;
348
349         struct work_struct reset_work;
350         wait_queue_head_t reset_wait;
351
352         int sg_stat[6];
353 };
354
355 /*
356  */
357 static void ub_cleanup(struct ub_dev *sc);
358 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
359 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
360     struct ub_scsi_cmd *cmd, struct ub_request *urq);
361 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
362     struct ub_scsi_cmd *cmd, struct ub_request *urq);
363 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
364 static void ub_end_rq(struct request *rq, unsigned int status,
365     unsigned int cmd_len);
366 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
367     struct ub_request *urq, struct ub_scsi_cmd *cmd);
368 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
369 static void ub_urb_complete(struct urb *urb);
370 static void ub_scsi_action(unsigned long _dev);
371 static void ub_scsi_dispatch(struct ub_dev *sc);
372 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
373 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
374 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
375 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
376 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
377 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
378 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
379 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
380     int stalled_pipe);
381 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
382 static void ub_reset_enter(struct ub_dev *sc, int try);
383 static void ub_reset_task(struct work_struct *work);
384 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
385 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
386     struct ub_capacity *ret);
387 static int ub_sync_reset(struct ub_dev *sc);
388 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
389 static int ub_probe_lun(struct ub_dev *sc, int lnum);
390
391 /*
392  */
393 #ifdef CONFIG_USB_LIBUSUAL
394
395 #define ub_usb_ids  storage_usb_ids
396 #else
397
398 static struct usb_device_id ub_usb_ids[] = {
399         { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
400         { }
401 };
402
403 MODULE_DEVICE_TABLE(usb, ub_usb_ids);
404 #endif /* CONFIG_USB_LIBUSUAL */
405
406 /*
407  * Find me a way to identify "next free minor" for add_disk(),
408  * and the array disappears the next day. However, the number of
409  * hosts has something to do with the naming and /proc/partitions.
410  * This has to be thought out in detail before changing.
411  * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
412  */
413 #define UB_MAX_HOSTS  26
414 static char ub_hostv[UB_MAX_HOSTS];
415
416 #define UB_QLOCK_NUM 5
417 static spinlock_t ub_qlockv[UB_QLOCK_NUM];
418 static int ub_qlock_next = 0;
419
420 static DEFINE_SPINLOCK(ub_lock);        /* Locks globals and ->openc */
421
422 /*
423  * The id allocator.
424  *
425  * This also stores the host for indexing by minor, which is somewhat dirty.
426  */
427 static int ub_id_get(void)
428 {
429         unsigned long flags;
430         int i;
431
432         spin_lock_irqsave(&ub_lock, flags);
433         for (i = 0; i < UB_MAX_HOSTS; i++) {
434                 if (ub_hostv[i] == 0) {
435                         ub_hostv[i] = 1;
436                         spin_unlock_irqrestore(&ub_lock, flags);
437                         return i;
438                 }
439         }
440         spin_unlock_irqrestore(&ub_lock, flags);
441         return -1;
442 }
443
444 static void ub_id_put(int id)
445 {
446         unsigned long flags;
447
448         if (id < 0 || id >= UB_MAX_HOSTS) {
449                 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
450                 return;
451         }
452
453         spin_lock_irqsave(&ub_lock, flags);
454         if (ub_hostv[id] == 0) {
455                 spin_unlock_irqrestore(&ub_lock, flags);
456                 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
457                 return;
458         }
459         ub_hostv[id] = 0;
460         spin_unlock_irqrestore(&ub_lock, flags);
461 }
462
463 /*
464  * This is necessitated by the fact that blk_cleanup_queue does not
465  * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
466  * Since our blk_init_queue() passes a spinlock common with ub_dev,
467  * we have life time issues when ub_cleanup frees ub_dev.
468  */
469 static spinlock_t *ub_next_lock(void)
470 {
471         unsigned long flags;
472         spinlock_t *ret;
473
474         spin_lock_irqsave(&ub_lock, flags);
475         ret = &ub_qlockv[ub_qlock_next];
476         ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
477         spin_unlock_irqrestore(&ub_lock, flags);
478         return ret;
479 }
480
481 /*
482  * Downcount for deallocation. This rides on two assumptions:
483  *  - once something is poisoned, its refcount cannot grow
484  *  - opens cannot happen at this time (del_gendisk was done)
485  * If the above is true, we can drop the lock, which we need for
486  * blk_cleanup_queue(): the silly thing may attempt to sleep.
487  * [Actually, it never needs to sleep for us, but it calls might_sleep()]
488  */
489 static void ub_put(struct ub_dev *sc)
490 {
491         unsigned long flags;
492
493         spin_lock_irqsave(&ub_lock, flags);
494         --sc->openc;
495         if (sc->openc == 0 && atomic_read(&sc->poison)) {
496                 spin_unlock_irqrestore(&ub_lock, flags);
497                 ub_cleanup(sc);
498         } else {
499                 spin_unlock_irqrestore(&ub_lock, flags);
500         }
501 }
502
503 /*
504  * Final cleanup and deallocation.
505  */
506 static void ub_cleanup(struct ub_dev *sc)
507 {
508         struct list_head *p;
509         struct ub_lun *lun;
510         struct request_queue *q;
511
512         while (!list_empty(&sc->luns)) {
513                 p = sc->luns.next;
514                 lun = list_entry(p, struct ub_lun, link);
515                 list_del(p);
516
517                 /* I don't think queue can be NULL. But... Stolen from sx8.c */
518                 if ((q = lun->disk->queue) != NULL)
519                         blk_cleanup_queue(q);
520                 /*
521                  * If we zero disk->private_data BEFORE put_disk, we have
522                  * to check for NULL all over the place in open, release,
523                  * check_media and revalidate, because the block level
524                  * semaphore is well inside the put_disk.
525                  * But we cannot zero after the call, because *disk is gone.
526                  * The sd.c is blatantly racy in this area.
527                  */
528                 /* disk->private_data = NULL; */
529                 put_disk(lun->disk);
530                 lun->disk = NULL;
531
532                 ub_id_put(lun->id);
533                 kfree(lun);
534         }
535
536         usb_set_intfdata(sc->intf, NULL);
537         usb_put_intf(sc->intf);
538         usb_put_dev(sc->dev);
539         kfree(sc);
540 }
541
542 /*
543  * The "command allocator".
544  */
545 static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
546 {
547         struct ub_scsi_cmd *ret;
548
549         if (lun->cmda[0])
550                 return NULL;
551         ret = &lun->cmdv[0];
552         lun->cmda[0] = 1;
553         return ret;
554 }
555
556 static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
557 {
558         if (cmd != &lun->cmdv[0]) {
559                 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
560                     lun->name, cmd);
561                 return;
562         }
563         if (!lun->cmda[0]) {
564                 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
565                 return;
566         }
567         lun->cmda[0] = 0;
568 }
569
570 /*
571  * The command queue.
572  */
573 static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
574 {
575         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
576
577         if (t->qlen++ == 0) {
578                 t->head = cmd;
579                 t->tail = cmd;
580         } else {
581                 t->tail->next = cmd;
582                 t->tail = cmd;
583         }
584
585         if (t->qlen > t->qmax)
586                 t->qmax = t->qlen;
587 }
588
589 static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
590 {
591         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
592
593         if (t->qlen++ == 0) {
594                 t->head = cmd;
595                 t->tail = cmd;
596         } else {
597                 cmd->next = t->head;
598                 t->head = cmd;
599         }
600
601         if (t->qlen > t->qmax)
602                 t->qmax = t->qlen;
603 }
604
605 static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
606 {
607         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
608         struct ub_scsi_cmd *cmd;
609
610         if (t->qlen == 0)
611                 return NULL;
612         if (--t->qlen == 0)
613                 t->tail = NULL;
614         cmd = t->head;
615         t->head = cmd->next;
616         cmd->next = NULL;
617         return cmd;
618 }
619
620 #define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
621
622 /*
623  * The request function is our main entry point
624  */
625
626 static void ub_request_fn(struct request_queue *q)
627 {
628         struct ub_lun *lun = q->queuedata;
629         struct request *rq;
630
631         while ((rq = elv_next_request(q)) != NULL) {
632                 if (ub_request_fn_1(lun, rq) != 0) {
633                         blk_stop_queue(q);
634                         break;
635                 }
636         }
637 }
638
639 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
640 {
641         struct ub_dev *sc = lun->udev;
642         struct ub_scsi_cmd *cmd;
643         struct ub_request *urq;
644         int n_elem;
645
646         if (atomic_read(&sc->poison)) {
647                 blkdev_dequeue_request(rq);
648                 ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
649                 return 0;
650         }
651
652         if (lun->changed && !blk_pc_request(rq)) {
653                 blkdev_dequeue_request(rq);
654                 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
655                 return 0;
656         }
657
658         if (lun->urq.rq != NULL)
659                 return -1;
660         if ((cmd = ub_get_cmd(lun)) == NULL)
661                 return -1;
662         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
663
664         blkdev_dequeue_request(rq);
665
666         urq = &lun->urq;
667         memset(urq, 0, sizeof(struct ub_request));
668         urq->rq = rq;
669
670         /*
671          * get scatterlist from block layer
672          */
673         sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
674         n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
675         if (n_elem < 0) {
676                 /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
677                 printk(KERN_INFO "%s: failed request map (%d)\n",
678                     lun->name, n_elem);
679                 goto drop;
680         }
681         if (n_elem > UB_MAX_REQ_SG) {   /* Paranoia */
682                 printk(KERN_WARNING "%s: request with %d segments\n",
683                     lun->name, n_elem);
684                 goto drop;
685         }
686         urq->nsg = n_elem;
687         sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
688
689         if (blk_pc_request(rq)) {
690                 ub_cmd_build_packet(sc, lun, cmd, urq);
691         } else {
692                 ub_cmd_build_block(sc, lun, cmd, urq);
693         }
694         cmd->state = UB_CMDST_INIT;
695         cmd->lun = lun;
696         cmd->done = ub_rw_cmd_done;
697         cmd->back = urq;
698
699         cmd->tag = sc->tagcnt++;
700         if (ub_submit_scsi(sc, cmd) != 0)
701                 goto drop;
702
703         return 0;
704
705 drop:
706         ub_put_cmd(lun, cmd);
707         ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
708         return 0;
709 }
710
711 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
712     struct ub_scsi_cmd *cmd, struct ub_request *urq)
713 {
714         struct request *rq = urq->rq;
715         unsigned int block, nblks;
716
717         if (rq_data_dir(rq) == WRITE)
718                 cmd->dir = UB_DIR_WRITE;
719         else
720                 cmd->dir = UB_DIR_READ;
721
722         cmd->nsg = urq->nsg;
723         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
724
725         /*
726          * build the command
727          *
728          * The call to blk_queue_hardsect_size() guarantees that request
729          * is aligned, but it is given in terms of 512 byte units, always.
730          */
731         block = rq->sector >> lun->capacity.bshift;
732         nblks = rq->nr_sectors >> lun->capacity.bshift;
733
734         cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
735         /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
736         cmd->cdb[2] = block >> 24;
737         cmd->cdb[3] = block >> 16;
738         cmd->cdb[4] = block >> 8;
739         cmd->cdb[5] = block;
740         cmd->cdb[7] = nblks >> 8;
741         cmd->cdb[8] = nblks;
742         cmd->cdb_len = 10;
743
744         cmd->len = rq->nr_sectors * 512;
745 }
746
747 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
748     struct ub_scsi_cmd *cmd, struct ub_request *urq)
749 {
750         struct request *rq = urq->rq;
751
752         if (rq->data_len == 0) {
753                 cmd->dir = UB_DIR_NONE;
754         } else {
755                 if (rq_data_dir(rq) == WRITE)
756                         cmd->dir = UB_DIR_WRITE;
757                 else
758                         cmd->dir = UB_DIR_READ;
759         }
760
761         cmd->nsg = urq->nsg;
762         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
763
764         memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
765         cmd->cdb_len = rq->cmd_len;
766
767         cmd->len = rq->data_len;
768
769         /*
770          * To reapply this to every URB is not as incorrect as it looks.
771          * In return, we avoid any complicated tracking calculations.
772          */
773         cmd->timeo = rq->timeout;
774 }
775
776 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
777 {
778         struct ub_lun *lun = cmd->lun;
779         struct ub_request *urq = cmd->back;
780         struct request *rq;
781         unsigned int scsi_status;
782         unsigned int cmd_len;
783
784         rq = urq->rq;
785
786         if (cmd->error == 0) {
787                 if (blk_pc_request(rq)) {
788                         if (cmd->act_len >= rq->data_len)
789                                 rq->data_len = 0;
790                         else
791                                 rq->data_len -= cmd->act_len;
792                         scsi_status = 0;
793                 } else {
794                         if (cmd->act_len != cmd->len) {
795                                 if ((cmd->key == MEDIUM_ERROR ||
796                                      cmd->key == UNIT_ATTENTION) &&
797                                     ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
798                                         return;
799                                 scsi_status = SAM_STAT_CHECK_CONDITION;
800                         } else {
801                                 scsi_status = 0;
802                         }
803                 }
804         } else {
805                 if (blk_pc_request(rq)) {
806                         /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
807                         memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
808                         rq->sense_len = UB_SENSE_SIZE;
809                         if (sc->top_sense[0] != 0)
810                                 scsi_status = SAM_STAT_CHECK_CONDITION;
811                         else
812                                 scsi_status = DID_ERROR << 16;
813                 } else {
814                         if (cmd->error == -EIO) {
815                                 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
816                                         return;
817                         }
818                         scsi_status = SAM_STAT_CHECK_CONDITION;
819                 }
820         }
821
822         urq->rq = NULL;
823
824         cmd_len = cmd->len;
825         ub_put_cmd(lun, cmd);
826         ub_end_rq(rq, scsi_status, cmd_len);
827         blk_start_queue(lun->disk->queue);
828 }
829
830 static void ub_end_rq(struct request *rq, unsigned int scsi_status,
831     unsigned int cmd_len)
832 {
833         int error;
834         long rqlen;
835
836         if (scsi_status == 0) {
837                 error = 0;
838         } else {
839                 error = -EIO;
840                 rq->errors = scsi_status;
841         }
842         rqlen = blk_rq_bytes(rq);    /* Oddly enough, this is the residue. */
843         if (__blk_end_request(rq, error, cmd_len)) {
844                 printk(KERN_WARNING DRV_NAME
845                     ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
846                     blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
847         }
848 }
849
850 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
851     struct ub_request *urq, struct ub_scsi_cmd *cmd)
852 {
853
854         if (atomic_read(&sc->poison))
855                 return -ENXIO;
856
857         ub_reset_enter(sc, urq->current_try);
858
859         if (urq->current_try >= 3)
860                 return -EIO;
861         urq->current_try++;
862
863         /* Remove this if anyone complains of flooding. */
864         printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
865             "[sense %x %02x %02x] retry %d\n",
866             sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
867             cmd->key, cmd->asc, cmd->ascq, urq->current_try);
868
869         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
870         ub_cmd_build_block(sc, lun, cmd, urq);
871
872         cmd->state = UB_CMDST_INIT;
873         cmd->lun = lun;
874         cmd->done = ub_rw_cmd_done;
875         cmd->back = urq;
876
877         cmd->tag = sc->tagcnt++;
878
879 #if 0 /* Wasteful */
880         return ub_submit_scsi(sc, cmd);
881 #else
882         ub_cmdq_add(sc, cmd);
883         return 0;
884 #endif
885 }
886
887 /*
888  * Submit a regular SCSI operation (not an auto-sense).
889  *
890  * The Iron Law of Good Submit Routine is:
891  * Zero return - callback is done, Nonzero return - callback is not done.
892  * No exceptions.
893  *
894  * Host is assumed locked.
895  */
896 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
897 {
898
899         if (cmd->state != UB_CMDST_INIT ||
900             (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
901                 return -EINVAL;
902         }
903
904         ub_cmdq_add(sc, cmd);
905         /*
906          * We can call ub_scsi_dispatch(sc) right away here, but it's a little
907          * safer to jump to a tasklet, in case upper layers do something silly.
908          */
909         tasklet_schedule(&sc->tasklet);
910         return 0;
911 }
912
913 /*
914  * Submit the first URB for the queued command.
915  * This function does not deal with queueing in any way.
916  */
917 static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
918 {
919         struct bulk_cb_wrap *bcb;
920         int rc;
921
922         bcb = &sc->work_bcb;
923
924         /*
925          * ``If the allocation length is eighteen or greater, and a device
926          * server returns less than eithteen bytes of data, the application
927          * client should assume that the bytes not transferred would have been
928          * zeroes had the device server returned those bytes.''
929          *
930          * We zero sense for all commands so that when a packet request
931          * fails it does not return a stale sense.
932          */
933         memset(&sc->top_sense, 0, UB_SENSE_SIZE);
934
935         /* set up the command wrapper */
936         bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
937         bcb->Tag = cmd->tag;            /* Endianness is not important */
938         bcb->DataTransferLength = cpu_to_le32(cmd->len);
939         bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
940         bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
941         bcb->Length = cmd->cdb_len;
942
943         /* copy the command payload */
944         memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
945
946         UB_INIT_COMPLETION(sc->work_done);
947
948         sc->last_pipe = sc->send_bulk_pipe;
949         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
950             bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
951
952         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
953                 /* XXX Clear stalls */
954                 ub_complete(&sc->work_done);
955                 return rc;
956         }
957
958         sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
959         add_timer(&sc->work_timer);
960
961         cmd->state = UB_CMDST_CMD;
962         return 0;
963 }
964
965 /*
966  * Timeout handler.
967  */
968 static void ub_urb_timeout(unsigned long arg)
969 {
970         struct ub_dev *sc = (struct ub_dev *) arg;
971         unsigned long flags;
972
973         spin_lock_irqsave(sc->lock, flags);
974         if (!ub_is_completed(&sc->work_done))
975                 usb_unlink_urb(&sc->work_urb);
976         spin_unlock_irqrestore(sc->lock, flags);
977 }
978
979 /*
980  * Completion routine for the work URB.
981  *
982  * This can be called directly from usb_submit_urb (while we have
983  * the sc->lock taken) and from an interrupt (while we do NOT have
984  * the sc->lock taken). Therefore, bounce this off to a tasklet.
985  */
986 static void ub_urb_complete(struct urb *urb)
987 {
988         struct ub_dev *sc = urb->context;
989
990         ub_complete(&sc->work_done);
991         tasklet_schedule(&sc->tasklet);
992 }
993
994 static void ub_scsi_action(unsigned long _dev)
995 {
996         struct ub_dev *sc = (struct ub_dev *) _dev;
997         unsigned long flags;
998
999         spin_lock_irqsave(sc->lock, flags);
1000         ub_scsi_dispatch(sc);
1001         spin_unlock_irqrestore(sc->lock, flags);
1002 }
1003
1004 static void ub_scsi_dispatch(struct ub_dev *sc)
1005 {
1006         struct ub_scsi_cmd *cmd;
1007         int rc;
1008
1009         while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
1010                 if (cmd->state == UB_CMDST_DONE) {
1011                         ub_cmdq_pop(sc);
1012                         (*cmd->done)(sc, cmd);
1013                 } else if (cmd->state == UB_CMDST_INIT) {
1014                         if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1015                                 break;
1016                         cmd->error = rc;
1017                         cmd->state = UB_CMDST_DONE;
1018                 } else {
1019                         if (!ub_is_completed(&sc->work_done))
1020                                 break;
1021                         del_timer(&sc->work_timer);
1022                         ub_scsi_urb_compl(sc, cmd);
1023                 }
1024         }
1025 }
1026
1027 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1028 {
1029         struct urb *urb = &sc->work_urb;
1030         struct bulk_cs_wrap *bcs;
1031         int len;
1032         int rc;
1033
1034         if (atomic_read(&sc->poison)) {
1035                 ub_state_done(sc, cmd, -ENODEV);
1036                 return;
1037         }
1038
1039         if (cmd->state == UB_CMDST_CLEAR) {
1040                 if (urb->status == -EPIPE) {
1041                         /*
1042                          * STALL while clearning STALL.
1043                          * The control pipe clears itself - nothing to do.
1044                          */
1045                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1046                             sc->name);
1047                         goto Bad_End;
1048                 }
1049
1050                 /*
1051                  * We ignore the result for the halt clear.
1052                  */
1053
1054                 /* reset the endpoint toggle */
1055                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1056                         usb_pipeout(sc->last_pipe), 0);
1057
1058                 ub_state_sense(sc, cmd);
1059
1060         } else if (cmd->state == UB_CMDST_CLR2STS) {
1061                 if (urb->status == -EPIPE) {
1062                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1063                             sc->name);
1064                         goto Bad_End;
1065                 }
1066
1067                 /*
1068                  * We ignore the result for the halt clear.
1069                  */
1070
1071                 /* reset the endpoint toggle */
1072                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1073                         usb_pipeout(sc->last_pipe), 0);
1074
1075                 ub_state_stat(sc, cmd);
1076
1077         } else if (cmd->state == UB_CMDST_CLRRS) {
1078                 if (urb->status == -EPIPE) {
1079                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1080                             sc->name);
1081                         goto Bad_End;
1082                 }
1083
1084                 /*
1085                  * We ignore the result for the halt clear.
1086                  */
1087
1088                 /* reset the endpoint toggle */
1089                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1090                         usb_pipeout(sc->last_pipe), 0);
1091
1092                 ub_state_stat_counted(sc, cmd);
1093
1094         } else if (cmd->state == UB_CMDST_CMD) {
1095                 switch (urb->status) {
1096                 case 0:
1097                         break;
1098                 case -EOVERFLOW:
1099                         goto Bad_End;
1100                 case -EPIPE:
1101                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1102                         if (rc != 0) {
1103                                 printk(KERN_NOTICE "%s: "
1104                                     "unable to submit clear (%d)\n",
1105                                     sc->name, rc);
1106                                 /*
1107                                  * This is typically ENOMEM or some other such shit.
1108                                  * Retrying is pointless. Just do Bad End on it...
1109                                  */
1110                                 ub_state_done(sc, cmd, rc);
1111                                 return;
1112                         }
1113                         cmd->state = UB_CMDST_CLEAR;
1114                         return;
1115                 case -ESHUTDOWN:        /* unplug */
1116                 case -EILSEQ:           /* unplug timeout on uhci */
1117                         ub_state_done(sc, cmd, -ENODEV);
1118                         return;
1119                 default:
1120                         goto Bad_End;
1121                 }
1122                 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1123                         goto Bad_End;
1124                 }
1125
1126                 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1127                         ub_state_stat(sc, cmd);
1128                         return;
1129                 }
1130
1131                 // udelay(125);         // usb-storage has this
1132                 ub_data_start(sc, cmd);
1133
1134         } else if (cmd->state == UB_CMDST_DATA) {
1135                 if (urb->status == -EPIPE) {
1136                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1137                         if (rc != 0) {
1138                                 printk(KERN_NOTICE "%s: "
1139                                     "unable to submit clear (%d)\n",
1140                                     sc->name, rc);
1141                                 ub_state_done(sc, cmd, rc);
1142                                 return;
1143                         }
1144                         cmd->state = UB_CMDST_CLR2STS;
1145                         return;
1146                 }
1147                 if (urb->status == -EOVERFLOW) {
1148                         /*
1149                          * A babble? Failure, but we must transfer CSW now.
1150                          */
1151                         cmd->error = -EOVERFLOW;        /* A cheap trick... */
1152                         ub_state_stat(sc, cmd);
1153                         return;
1154                 }
1155
1156                 if (cmd->dir == UB_DIR_WRITE) {
1157                         /*
1158                          * Do not continue writes in case of a failure.
1159                          * Doing so would cause sectors to be mixed up,
1160                          * which is worse than sectors lost.
1161                          *
1162                          * We must try to read the CSW, or many devices
1163                          * get confused.
1164                          */
1165                         len = urb->actual_length;
1166                         if (urb->status != 0 ||
1167                             len != cmd->sgv[cmd->current_sg].length) {
1168                                 cmd->act_len += len;
1169
1170                                 cmd->error = -EIO;
1171                                 ub_state_stat(sc, cmd);
1172                                 return;
1173                         }
1174
1175                 } else {
1176                         /*
1177                          * If an error occurs on read, we record it, and
1178                          * continue to fetch data in order to avoid bubble.
1179                          *
1180                          * As a small shortcut, we stop if we detect that
1181                          * a CSW mixed into data.
1182                          */
1183                         if (urb->status != 0)
1184                                 cmd->error = -EIO;
1185
1186                         len = urb->actual_length;
1187                         if (urb->status != 0 ||
1188                             len != cmd->sgv[cmd->current_sg].length) {
1189                                 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1190                                         goto Bad_End;
1191                         }
1192                 }
1193
1194                 cmd->act_len += urb->actual_length;
1195
1196                 if (++cmd->current_sg < cmd->nsg) {
1197                         ub_data_start(sc, cmd);
1198                         return;
1199                 }
1200                 ub_state_stat(sc, cmd);
1201
1202         } else if (cmd->state == UB_CMDST_STAT) {
1203                 if (urb->status == -EPIPE) {
1204                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1205                         if (rc != 0) {
1206                                 printk(KERN_NOTICE "%s: "
1207                                     "unable to submit clear (%d)\n",
1208                                     sc->name, rc);
1209                                 ub_state_done(sc, cmd, rc);
1210                                 return;
1211                         }
1212
1213                         /*
1214                          * Having a stall when getting CSW is an error, so
1215                          * make sure uppper levels are not oblivious to it.
1216                          */
1217                         cmd->error = -EIO;              /* A cheap trick... */
1218
1219                         cmd->state = UB_CMDST_CLRRS;
1220                         return;
1221                 }
1222
1223                 /* Catch everything, including -EOVERFLOW and other nasties. */
1224                 if (urb->status != 0)
1225                         goto Bad_End;
1226
1227                 if (urb->actual_length == 0) {
1228                         ub_state_stat_counted(sc, cmd);
1229                         return;
1230                 }
1231
1232                 /*
1233                  * Check the returned Bulk protocol status.
1234                  * The status block has to be validated first.
1235                  */
1236
1237                 bcs = &sc->work_bcs;
1238
1239                 if (sc->signature == cpu_to_le32(0)) {
1240                         /*
1241                          * This is the first reply, so do not perform the check.
1242                          * Instead, remember the signature the device uses
1243                          * for future checks. But do not allow a nul.
1244                          */
1245                         sc->signature = bcs->Signature;
1246                         if (sc->signature == cpu_to_le32(0)) {
1247                                 ub_state_stat_counted(sc, cmd);
1248                                 return;
1249                         }
1250                 } else {
1251                         if (bcs->Signature != sc->signature) {
1252                                 ub_state_stat_counted(sc, cmd);
1253                                 return;
1254                         }
1255                 }
1256
1257                 if (bcs->Tag != cmd->tag) {
1258                         /*
1259                          * This usually happens when we disagree with the
1260                          * device's microcode about something. For instance,
1261                          * a few of them throw this after timeouts. They buffer
1262                          * commands and reply at commands we timed out before.
1263                          * Without flushing these replies we loop forever.
1264                          */
1265                         ub_state_stat_counted(sc, cmd);
1266                         return;
1267                 }
1268
1269                 len = le32_to_cpu(bcs->Residue);
1270                 if (len != cmd->len - cmd->act_len) {
1271                         /*
1272                          * It is all right to transfer less, the caller has
1273                          * to check. But it's not all right if the device
1274                          * counts disagree with our counts.
1275                          */
1276                         goto Bad_End;
1277                 }
1278
1279                 switch (bcs->Status) {
1280                 case US_BULK_STAT_OK:
1281                         break;
1282                 case US_BULK_STAT_FAIL:
1283                         ub_state_sense(sc, cmd);
1284                         return;
1285                 case US_BULK_STAT_PHASE:
1286                         goto Bad_End;
1287                 default:
1288                         printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1289                             sc->name, bcs->Status);
1290                         ub_state_done(sc, cmd, -EINVAL);
1291                         return;
1292                 }
1293
1294                 /* Not zeroing error to preserve a babble indicator */
1295                 if (cmd->error != 0) {
1296                         ub_state_sense(sc, cmd);
1297                         return;
1298                 }
1299                 cmd->state = UB_CMDST_DONE;
1300                 ub_cmdq_pop(sc);
1301                 (*cmd->done)(sc, cmd);
1302
1303         } else if (cmd->state == UB_CMDST_SENSE) {
1304                 ub_state_done(sc, cmd, -EIO);
1305
1306         } else {
1307                 printk(KERN_WARNING "%s: "
1308                     "wrong command state %d\n",
1309                     sc->name, cmd->state);
1310                 ub_state_done(sc, cmd, -EINVAL);
1311                 return;
1312         }
1313         return;
1314
1315 Bad_End: /* Little Excel is dead */
1316         ub_state_done(sc, cmd, -EIO);
1317 }
1318
1319 /*
1320  * Factorization helper for the command state machine:
1321  * Initiate a data segment transfer.
1322  */
1323 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1324 {
1325         struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1326         int pipe;
1327         int rc;
1328
1329         UB_INIT_COMPLETION(sc->work_done);
1330
1331         if (cmd->dir == UB_DIR_READ)
1332                 pipe = sc->recv_bulk_pipe;
1333         else
1334                 pipe = sc->send_bulk_pipe;
1335         sc->last_pipe = pipe;
1336         usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1337             sg->length, ub_urb_complete, sc);
1338
1339         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1340                 /* XXX Clear stalls */
1341                 ub_complete(&sc->work_done);
1342                 ub_state_done(sc, cmd, rc);
1343                 return;
1344         }
1345
1346         if (cmd->timeo)
1347                 sc->work_timer.expires = jiffies + cmd->timeo;
1348         else
1349                 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1350         add_timer(&sc->work_timer);
1351
1352         cmd->state = UB_CMDST_DATA;
1353 }
1354
1355 /*
1356  * Factorization helper for the command state machine:
1357  * Finish the command.
1358  */
1359 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1360 {
1361
1362         cmd->error = rc;
1363         cmd->state = UB_CMDST_DONE;
1364         ub_cmdq_pop(sc);
1365         (*cmd->done)(sc, cmd);
1366 }
1367
1368 /*
1369  * Factorization helper for the command state machine:
1370  * Submit a CSW read.
1371  */
1372 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1373 {
1374         int rc;
1375
1376         UB_INIT_COMPLETION(sc->work_done);
1377
1378         sc->last_pipe = sc->recv_bulk_pipe;
1379         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1380             &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1381
1382         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1383                 /* XXX Clear stalls */
1384                 ub_complete(&sc->work_done);
1385                 ub_state_done(sc, cmd, rc);
1386                 return -1;
1387         }
1388
1389         if (cmd->timeo)
1390                 sc->work_timer.expires = jiffies + cmd->timeo;
1391         else
1392                 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1393         add_timer(&sc->work_timer);
1394         return 0;
1395 }
1396
1397 /*
1398  * Factorization helper for the command state machine:
1399  * Submit a CSW read and go to STAT state.
1400  */
1401 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1402 {
1403
1404         if (__ub_state_stat(sc, cmd) != 0)
1405                 return;
1406
1407         cmd->stat_count = 0;
1408         cmd->state = UB_CMDST_STAT;
1409 }
1410
1411 /*
1412  * Factorization helper for the command state machine:
1413  * Submit a CSW read and go to STAT state with counter (along [C] path).
1414  */
1415 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1416 {
1417
1418         if (++cmd->stat_count >= 4) {
1419                 ub_state_sense(sc, cmd);
1420                 return;
1421         }
1422
1423         if (__ub_state_stat(sc, cmd) != 0)
1424                 return;
1425
1426         cmd->state = UB_CMDST_STAT;
1427 }
1428
1429 /*
1430  * Factorization helper for the command state machine:
1431  * Submit a REQUEST SENSE and go to SENSE state.
1432  */
1433 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1434 {
1435         struct ub_scsi_cmd *scmd;
1436         struct scatterlist *sg;
1437         int rc;
1438
1439         if (cmd->cdb[0] == REQUEST_SENSE) {
1440                 rc = -EPIPE;
1441                 goto error;
1442         }
1443
1444         scmd = &sc->top_rqs_cmd;
1445         memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1446         scmd->cdb[0] = REQUEST_SENSE;
1447         scmd->cdb[4] = UB_SENSE_SIZE;
1448         scmd->cdb_len = 6;
1449         scmd->dir = UB_DIR_READ;
1450         scmd->state = UB_CMDST_INIT;
1451         scmd->nsg = 1;
1452         sg = &scmd->sgv[0];
1453         sg_init_table(sg, UB_MAX_REQ_SG);
1454         sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1455                         (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1456         scmd->len = UB_SENSE_SIZE;
1457         scmd->lun = cmd->lun;
1458         scmd->done = ub_top_sense_done;
1459         scmd->back = cmd;
1460
1461         scmd->tag = sc->tagcnt++;
1462
1463         cmd->state = UB_CMDST_SENSE;
1464
1465         ub_cmdq_insert(sc, scmd);
1466         return;
1467
1468 error:
1469         ub_state_done(sc, cmd, rc);
1470 }
1471
1472 /*
1473  * A helper for the command's state machine:
1474  * Submit a stall clear.
1475  */
1476 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1477     int stalled_pipe)
1478 {
1479         int endp;
1480         struct usb_ctrlrequest *cr;
1481         int rc;
1482
1483         endp = usb_pipeendpoint(stalled_pipe);
1484         if (usb_pipein (stalled_pipe))
1485                 endp |= USB_DIR_IN;
1486
1487         cr = &sc->work_cr;
1488         cr->bRequestType = USB_RECIP_ENDPOINT;
1489         cr->bRequest = USB_REQ_CLEAR_FEATURE;
1490         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1491         cr->wIndex = cpu_to_le16(endp);
1492         cr->wLength = cpu_to_le16(0);
1493
1494         UB_INIT_COMPLETION(sc->work_done);
1495
1496         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1497             (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1498
1499         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1500                 ub_complete(&sc->work_done);
1501                 return rc;
1502         }
1503
1504         sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1505         add_timer(&sc->work_timer);
1506         return 0;
1507 }
1508
1509 /*
1510  */
1511 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1512 {
1513         unsigned char *sense = sc->top_sense;
1514         struct ub_scsi_cmd *cmd;
1515
1516         /*
1517          * Find the command which triggered the unit attention or a check,
1518          * save the sense into it, and advance its state machine.
1519          */
1520         if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1521                 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1522                 return;
1523         }
1524         if (cmd != scmd->back) {
1525                 printk(KERN_WARNING "%s: "
1526                     "sense done for wrong command 0x%x\n",
1527                     sc->name, cmd->tag);
1528                 return;
1529         }
1530         if (cmd->state != UB_CMDST_SENSE) {
1531                 printk(KERN_WARNING "%s: "
1532                     "sense done with bad cmd state %d\n",
1533                     sc->name, cmd->state);
1534                 return;
1535         }
1536
1537         /*
1538          * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1539          */
1540         cmd->key = sense[2] & 0x0F;
1541         cmd->asc = sense[12];
1542         cmd->ascq = sense[13];
1543
1544         ub_scsi_urb_compl(sc, cmd);
1545 }
1546
1547 /*
1548  * Reset management
1549  * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1550  * XXX Make usb_sync_reset asynchronous.
1551  */
1552
1553 static void ub_reset_enter(struct ub_dev *sc, int try)
1554 {
1555
1556         if (sc->reset) {
1557                 /* This happens often on multi-LUN devices. */
1558                 return;
1559         }
1560         sc->reset = try + 1;
1561
1562 #if 0 /* Not needed because the disconnect waits for us. */
1563         unsigned long flags;
1564         spin_lock_irqsave(&ub_lock, flags);
1565         sc->openc++;
1566         spin_unlock_irqrestore(&ub_lock, flags);
1567 #endif
1568
1569 #if 0 /* We let them stop themselves. */
1570         struct ub_lun *lun;
1571         list_for_each_entry(lun, &sc->luns, link) {
1572                 blk_stop_queue(lun->disk->queue);
1573         }
1574 #endif
1575
1576         schedule_work(&sc->reset_work);
1577 }
1578
1579 static void ub_reset_task(struct work_struct *work)
1580 {
1581         struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1582         unsigned long flags;
1583         struct ub_lun *lun;
1584         int lkr, rc;
1585
1586         if (!sc->reset) {
1587                 printk(KERN_WARNING "%s: Running reset unrequested\n",
1588                     sc->name);
1589                 return;
1590         }
1591
1592         if (atomic_read(&sc->poison)) {
1593                 ;
1594         } else if ((sc->reset & 1) == 0) {
1595                 ub_sync_reset(sc);
1596                 msleep(700);    /* usb-storage sleeps 6s (!) */
1597                 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1598                 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1599         } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1600                 ;
1601         } else {
1602                 if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
1603                         printk(KERN_NOTICE
1604                             "%s: usb_lock_device_for_reset failed (%d)\n",
1605                             sc->name, lkr);
1606                 } else {
1607                         rc = usb_reset_device(sc->dev);
1608                         if (rc < 0) {
1609                                 printk(KERN_NOTICE "%s: "
1610                                     "usb_lock_device_for_reset failed (%d)\n",
1611                                     sc->name, rc);
1612                         }
1613
1614                         if (lkr)
1615                                 usb_unlock_device(sc->dev);
1616                 }
1617         }
1618
1619         /*
1620          * In theory, no commands can be running while reset is active,
1621          * so nobody can ask for another reset, and so we do not need any
1622          * queues of resets or anything. We do need a spinlock though,
1623          * to interact with block layer.
1624          */
1625         spin_lock_irqsave(sc->lock, flags);
1626         sc->reset = 0;
1627         tasklet_schedule(&sc->tasklet);
1628         list_for_each_entry(lun, &sc->luns, link) {
1629                 blk_start_queue(lun->disk->queue);
1630         }
1631         wake_up(&sc->reset_wait);
1632         spin_unlock_irqrestore(sc->lock, flags);
1633 }
1634
1635 /*
1636  * This is called from a process context.
1637  */
1638 static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1639 {
1640
1641         lun->readonly = 0;      /* XXX Query this from the device */
1642
1643         lun->capacity.nsec = 0;
1644         lun->capacity.bsize = 512;
1645         lun->capacity.bshift = 0;
1646
1647         if (ub_sync_tur(sc, lun) != 0)
1648                 return;                 /* Not ready */
1649         lun->changed = 0;
1650
1651         if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1652                 /*
1653                  * The retry here means something is wrong, either with the
1654                  * device, with the transport, or with our code.
1655                  * We keep this because sd.c has retries for capacity.
1656                  */
1657                 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1658                         lun->capacity.nsec = 0;
1659                         lun->capacity.bsize = 512;
1660                         lun->capacity.bshift = 0;
1661                 }
1662         }
1663 }
1664
1665 /*
1666  * The open funcion.
1667  * This is mostly needed to keep refcounting, but also to support
1668  * media checks on removable media drives.
1669  */
1670 static int ub_bd_open(struct inode *inode, struct file *filp)
1671 {
1672         struct gendisk *disk = inode->i_bdev->bd_disk;
1673         struct ub_lun *lun = disk->private_data;
1674         struct ub_dev *sc = lun->udev;
1675         unsigned long flags;
1676         int rc;
1677
1678         spin_lock_irqsave(&ub_lock, flags);
1679         if (atomic_read(&sc->poison)) {
1680                 spin_unlock_irqrestore(&ub_lock, flags);
1681                 return -ENXIO;
1682         }
1683         sc->openc++;
1684         spin_unlock_irqrestore(&ub_lock, flags);
1685
1686         if (lun->removable || lun->readonly)
1687                 check_disk_change(inode->i_bdev);
1688
1689         /*
1690          * The sd.c considers ->media_present and ->changed not equivalent,
1691          * under some pretty murky conditions (a failure of READ CAPACITY).
1692          * We may need it one day.
1693          */
1694         if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
1695                 rc = -ENOMEDIUM;
1696                 goto err_open;
1697         }
1698
1699         if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
1700                 rc = -EROFS;
1701                 goto err_open;
1702         }
1703
1704         return 0;
1705
1706 err_open:
1707         ub_put(sc);
1708         return rc;
1709 }
1710
1711 /*
1712  */
1713 static int ub_bd_release(struct inode *inode, struct file *filp)
1714 {
1715         struct gendisk *disk = inode->i_bdev->bd_disk;
1716         struct ub_lun *lun = disk->private_data;
1717         struct ub_dev *sc = lun->udev;
1718
1719         ub_put(sc);
1720         return 0;
1721 }
1722
1723 /*
1724  * The ioctl interface.
1725  */
1726 static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1727     unsigned int cmd, unsigned long arg)
1728 {
1729         struct gendisk *disk = inode->i_bdev->bd_disk;
1730         void __user *usermem = (void __user *) arg;
1731
1732         return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);
1733 }
1734
1735 /*
1736  * This is called once a new disk was seen by the block layer or by ub_probe().
1737  * The main onjective here is to discover the features of the media such as
1738  * the capacity, read-only status, etc. USB storage generally does not
1739  * need to be spun up, but if we needed it, this would be the place.
1740  *
1741  * This call can sleep.
1742  *
1743  * The return code is not used.
1744  */
1745 static int ub_bd_revalidate(struct gendisk *disk)
1746 {
1747         struct ub_lun *lun = disk->private_data;
1748
1749         ub_revalidate(lun->udev, lun);
1750
1751         /* XXX Support sector size switching like in sr.c */
1752         blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1753         set_capacity(disk, lun->capacity.nsec);
1754         // set_disk_ro(sdkp->disk, lun->readonly);
1755
1756         return 0;
1757 }
1758
1759 /*
1760  * The check is called by the block layer to verify if the media
1761  * is still available. It is supposed to be harmless, lightweight and
1762  * non-intrusive in case the media was not changed.
1763  *
1764  * This call can sleep.
1765  *
1766  * The return code is bool!
1767  */
1768 static int ub_bd_media_changed(struct gendisk *disk)
1769 {
1770         struct ub_lun *lun = disk->private_data;
1771
1772         if (!lun->removable)
1773                 return 0;
1774
1775         /*
1776          * We clean checks always after every command, so this is not
1777          * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1778          * the device is actually not ready with operator or software
1779          * intervention required. One dangerous item might be a drive which
1780          * spins itself down, and come the time to write dirty pages, this
1781          * will fail, then block layer discards the data. Since we never
1782          * spin drives up, such devices simply cannot be used with ub anyway.
1783          */
1784         if (ub_sync_tur(lun->udev, lun) != 0) {
1785                 lun->changed = 1;
1786                 return 1;
1787         }
1788
1789         return lun->changed;
1790 }
1791
1792 static struct block_device_operations ub_bd_fops = {
1793         .owner          = THIS_MODULE,
1794         .open           = ub_bd_open,
1795         .release        = ub_bd_release,
1796         .ioctl          = ub_bd_ioctl,
1797         .media_changed  = ub_bd_media_changed,
1798         .revalidate_disk = ub_bd_revalidate,
1799 };
1800
1801 /*
1802  * Common ->done routine for commands executed synchronously.
1803  */
1804 static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1805 {
1806         struct completion *cop = cmd->back;
1807         complete(cop);
1808 }
1809
1810 /*
1811  * Test if the device has a check condition on it, synchronously.
1812  */
1813 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1814 {
1815         struct ub_scsi_cmd *cmd;
1816         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1817         unsigned long flags;
1818         struct completion compl;
1819         int rc;
1820
1821         init_completion(&compl);
1822
1823         rc = -ENOMEM;
1824         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1825                 goto err_alloc;
1826
1827         cmd->cdb[0] = TEST_UNIT_READY;
1828         cmd->cdb_len = 6;
1829         cmd->dir = UB_DIR_NONE;
1830         cmd->state = UB_CMDST_INIT;
1831         cmd->lun = lun;                 /* This may be NULL, but that's ok */
1832         cmd->done = ub_probe_done;
1833         cmd->back = &compl;
1834
1835         spin_lock_irqsave(sc->lock, flags);
1836         cmd->tag = sc->tagcnt++;
1837
1838         rc = ub_submit_scsi(sc, cmd);
1839         spin_unlock_irqrestore(sc->lock, flags);
1840
1841         if (rc != 0)
1842                 goto err_submit;
1843
1844         wait_for_completion(&compl);
1845
1846         rc = cmd->error;
1847
1848         if (rc == -EIO && cmd->key != 0)        /* Retries for benh's key */
1849                 rc = cmd->key;
1850
1851 err_submit:
1852         kfree(cmd);
1853 err_alloc:
1854         return rc;
1855 }
1856
1857 /*
1858  * Read the SCSI capacity synchronously (for probing).
1859  */
1860 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1861     struct ub_capacity *ret)
1862 {
1863         struct ub_scsi_cmd *cmd;
1864         struct scatterlist *sg;
1865         char *p;
1866         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1867         unsigned long flags;
1868         unsigned int bsize, shift;
1869         unsigned long nsec;
1870         struct completion compl;
1871         int rc;
1872
1873         init_completion(&compl);
1874
1875         rc = -ENOMEM;
1876         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1877                 goto err_alloc;
1878         p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1879
1880         cmd->cdb[0] = 0x25;
1881         cmd->cdb_len = 10;
1882         cmd->dir = UB_DIR_READ;
1883         cmd->state = UB_CMDST_INIT;
1884         cmd->nsg = 1;
1885         sg = &cmd->sgv[0];
1886         sg_init_table(sg, UB_MAX_REQ_SG);
1887         sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1888         cmd->len = 8;
1889         cmd->lun = lun;
1890         cmd->done = ub_probe_done;
1891         cmd->back = &compl;
1892
1893         spin_lock_irqsave(sc->lock, flags);
1894         cmd->tag = sc->tagcnt++;
1895
1896         rc = ub_submit_scsi(sc, cmd);
1897         spin_unlock_irqrestore(sc->lock, flags);
1898
1899         if (rc != 0)
1900                 goto err_submit;
1901
1902         wait_for_completion(&compl);
1903
1904         if (cmd->error != 0) {
1905                 rc = -EIO;
1906                 goto err_read;
1907         }
1908         if (cmd->act_len != 8) {
1909                 rc = -EIO;
1910                 goto err_read;
1911         }
1912
1913         /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1914         nsec = be32_to_cpu(*(__be32 *)p) + 1;
1915         bsize = be32_to_cpu(*(__be32 *)(p + 4));
1916         switch (bsize) {
1917         case 512:       shift = 0;      break;
1918         case 1024:      shift = 1;      break;
1919         case 2048:      shift = 2;      break;
1920         case 4096:      shift = 3;      break;
1921         default:
1922                 rc = -EDOM;
1923                 goto err_inv_bsize;
1924         }
1925
1926         ret->bsize = bsize;
1927         ret->bshift = shift;
1928         ret->nsec = nsec << shift;
1929         rc = 0;
1930
1931 err_inv_bsize:
1932 err_read:
1933 err_submit:
1934         kfree(cmd);
1935 err_alloc:
1936         return rc;
1937 }
1938
1939 /*
1940  */
1941 static void ub_probe_urb_complete(struct urb *urb)
1942 {
1943         struct completion *cop = urb->context;
1944         complete(cop);
1945 }
1946
1947 static void ub_probe_timeout(unsigned long arg)
1948 {
1949         struct completion *cop = (struct completion *) arg;
1950         complete(cop);
1951 }
1952
1953 /*
1954  * Reset with a Bulk reset.
1955  */
1956 static int ub_sync_reset(struct ub_dev *sc)
1957 {
1958         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1959         struct usb_ctrlrequest *cr;
1960         struct completion compl;
1961         struct timer_list timer;
1962         int rc;
1963
1964         init_completion(&compl);
1965
1966         cr = &sc->work_cr;
1967         cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1968         cr->bRequest = US_BULK_RESET_REQUEST;
1969         cr->wValue = cpu_to_le16(0);
1970         cr->wIndex = cpu_to_le16(ifnum);
1971         cr->wLength = cpu_to_le16(0);
1972
1973         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1974             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1975
1976         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1977                 printk(KERN_WARNING
1978                      "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1979                 return rc;
1980         }
1981
1982         init_timer(&timer);
1983         timer.function = ub_probe_timeout;
1984         timer.data = (unsigned long) &compl;
1985         timer.expires = jiffies + UB_CTRL_TIMEOUT;
1986         add_timer(&timer);
1987
1988         wait_for_completion(&compl);
1989
1990         del_timer_sync(&timer);
1991         usb_kill_urb(&sc->work_urb);
1992
1993         return sc->work_urb.status;
1994 }
1995
1996 /*
1997  * Get number of LUNs by the way of Bulk GetMaxLUN command.
1998  */
1999 static int ub_sync_getmaxlun(struct ub_dev *sc)
2000 {
2001         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2002         unsigned char *p;
2003         enum { ALLOC_SIZE = 1 };
2004         struct usb_ctrlrequest *cr;
2005         struct completion compl;
2006         struct timer_list timer;
2007         int nluns;
2008         int rc;
2009
2010         init_completion(&compl);
2011
2012         rc = -ENOMEM;
2013         if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2014                 goto err_alloc;
2015         *p = 55;
2016
2017         cr = &sc->work_cr;
2018         cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2019         cr->bRequest = US_BULK_GET_MAX_LUN;
2020         cr->wValue = cpu_to_le16(0);
2021         cr->wIndex = cpu_to_le16(ifnum);
2022         cr->wLength = cpu_to_le16(1);
2023
2024         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2025             (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2026
2027         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2028                 goto err_submit;
2029
2030         init_timer(&timer);
2031         timer.function = ub_probe_timeout;
2032         timer.data = (unsigned long) &compl;
2033         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2034         add_timer(&timer);
2035
2036         wait_for_completion(&compl);
2037
2038         del_timer_sync(&timer);
2039         usb_kill_urb(&sc->work_urb);
2040
2041         if ((rc = sc->work_urb.status) < 0)
2042                 goto err_io;
2043
2044         if (sc->work_urb.actual_length != 1) {
2045                 nluns = 0;
2046         } else {
2047                 if ((nluns = *p) == 55) {
2048                         nluns = 0;
2049                 } else {
2050                         /* GetMaxLUN returns the maximum LUN number */
2051                         nluns += 1;
2052                         if (nluns > UB_MAX_LUNS)
2053                                 nluns = UB_MAX_LUNS;
2054                 }
2055         }
2056
2057         kfree(p);
2058         return nluns;
2059
2060 err_io:
2061 err_submit:
2062         kfree(p);
2063 err_alloc:
2064         return rc;
2065 }
2066
2067 /*
2068  * Clear initial stalls.
2069  */
2070 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2071 {
2072         int endp;
2073         struct usb_ctrlrequest *cr;
2074         struct completion compl;
2075         struct timer_list timer;
2076         int rc;
2077
2078         init_completion(&compl);
2079
2080         endp = usb_pipeendpoint(stalled_pipe);
2081         if (usb_pipein (stalled_pipe))
2082                 endp |= USB_DIR_IN;
2083
2084         cr = &sc->work_cr;
2085         cr->bRequestType = USB_RECIP_ENDPOINT;
2086         cr->bRequest = USB_REQ_CLEAR_FEATURE;
2087         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2088         cr->wIndex = cpu_to_le16(endp);
2089         cr->wLength = cpu_to_le16(0);
2090
2091         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2092             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2093
2094         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2095                 printk(KERN_WARNING
2096                      "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2097                 return rc;
2098         }
2099
2100         init_timer(&timer);
2101         timer.function = ub_probe_timeout;
2102         timer.data = (unsigned long) &compl;
2103         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2104         add_timer(&timer);
2105
2106         wait_for_completion(&compl);
2107
2108         del_timer_sync(&timer);
2109         usb_kill_urb(&sc->work_urb);
2110
2111         /* reset the endpoint toggle */
2112         usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
2113
2114         return 0;
2115 }
2116
2117 /*
2118  * Get the pipe settings.
2119  */
2120 static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2121     struct usb_interface *intf)
2122 {
2123         struct usb_host_interface *altsetting = intf->cur_altsetting;
2124         struct usb_endpoint_descriptor *ep_in = NULL;
2125         struct usb_endpoint_descriptor *ep_out = NULL;
2126         struct usb_endpoint_descriptor *ep;
2127         int i;
2128
2129         /*
2130          * Find the endpoints we need.
2131          * We are expecting a minimum of 2 endpoints - in and out (bulk).
2132          * We will ignore any others.
2133          */
2134         for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2135                 ep = &altsetting->endpoint[i].desc;
2136
2137                 /* Is it a BULK endpoint? */
2138                 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2139                                 == USB_ENDPOINT_XFER_BULK) {
2140                         /* BULK in or out? */
2141                         if (ep->bEndpointAddress & USB_DIR_IN) {
2142                                 if (ep_in == NULL)
2143                                         ep_in = ep;
2144                         } else {
2145                                 if (ep_out == NULL)
2146                                         ep_out = ep;
2147                         }
2148                 }
2149         }
2150
2151         if (ep_in == NULL || ep_out == NULL) {
2152                 printk(KERN_NOTICE "%s: failed endpoint check\n",
2153                     sc->name);
2154                 return -ENODEV;
2155         }
2156
2157         /* Calculate and store the pipe values */
2158         sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2159         sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2160         sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2161                 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2162         sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
2163                 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2164
2165         return 0;
2166 }
2167
2168 /*
2169  * Probing is done in the process context, which allows us to cheat
2170  * and not to build a state machine for the discovery.
2171  */
2172 static int ub_probe(struct usb_interface *intf,
2173     const struct usb_device_id *dev_id)
2174 {
2175         struct ub_dev *sc;
2176         int nluns;
2177         int rc;
2178         int i;
2179
2180         if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2181                 return -ENXIO;
2182
2183         rc = -ENOMEM;
2184         if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2185                 goto err_core;
2186         sc->lock = ub_next_lock();
2187         INIT_LIST_HEAD(&sc->luns);
2188         usb_init_urb(&sc->work_urb);
2189         tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2190         atomic_set(&sc->poison, 0);
2191         INIT_WORK(&sc->reset_work, ub_reset_task);
2192         init_waitqueue_head(&sc->reset_wait);
2193
2194         init_timer(&sc->work_timer);
2195         sc->work_timer.data = (unsigned long) sc;
2196         sc->work_timer.function = ub_urb_timeout;
2197
2198         ub_init_completion(&sc->work_done);
2199         sc->work_done.done = 1;         /* A little yuk, but oh well... */
2200
2201         sc->dev = interface_to_usbdev(intf);
2202         sc->intf = intf;
2203         // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2204         usb_set_intfdata(intf, sc);
2205         usb_get_dev(sc->dev);
2206         /*
2207          * Since we give the interface struct to the block level through
2208          * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2209          * oopses on close after a disconnect (kernels 2.6.16 and up).
2210          */
2211         usb_get_intf(sc->intf);
2212
2213         snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2214             sc->dev->bus->busnum, sc->dev->devnum);
2215
2216         /* XXX Verify that we can handle the device (from descriptors) */
2217
2218         if (ub_get_pipes(sc, sc->dev, intf) != 0)
2219                 goto err_dev_desc;
2220
2221         /*
2222          * At this point, all USB initialization is done, do upper layer.
2223          * We really hate halfway initialized structures, so from the
2224          * invariants perspective, this ub_dev is fully constructed at
2225          * this point.
2226          */
2227
2228         /*
2229          * This is needed to clear toggles. It is a problem only if we do
2230          * `rmmod ub && modprobe ub` without disconnects, but we like that.
2231          */
2232 #if 0 /* iPod Mini fails if we do this (big white iPod works) */
2233         ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2234         ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2235 #endif
2236
2237         /*
2238          * The way this is used by the startup code is a little specific.
2239          * A SCSI check causes a USB stall. Our common case code sees it
2240          * and clears the check, after which the device is ready for use.
2241          * But if a check was not present, any command other than
2242          * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2243          *
2244          * If we neglect to clear the SCSI check, the first real command fails
2245          * (which is the capacity readout). We clear that and retry, but why
2246          * causing spurious retries for no reason.
2247          *
2248          * Revalidation may start with its own TEST_UNIT_READY, but that one
2249          * has to succeed, so we clear checks with an additional one here.
2250          * In any case it's not our business how revaliadation is implemented.
2251          */
2252         for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2253                 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2254                 if (rc != 0x6) break;
2255                 msleep(10);
2256         }
2257
2258         nluns = 1;
2259         for (i = 0; i < 3; i++) {
2260                 if ((rc = ub_sync_getmaxlun(sc)) < 0)
2261                         break;
2262                 if (rc != 0) {
2263                         nluns = rc;
2264                         break;
2265                 }
2266                 msleep(100);
2267         }
2268
2269         for (i = 0; i < nluns; i++) {
2270                 ub_probe_lun(sc, i);
2271         }
2272         return 0;
2273
2274 err_dev_desc:
2275         usb_set_intfdata(intf, NULL);
2276         usb_put_intf(sc->intf);
2277         usb_put_dev(sc->dev);
2278         kfree(sc);
2279 err_core:
2280         return rc;
2281 }
2282
2283 static int ub_probe_lun(struct ub_dev *sc, int lnum)
2284 {
2285         struct ub_lun *lun;
2286         struct request_queue *q;
2287         struct gendisk *disk;
2288         int rc;
2289
2290         rc = -ENOMEM;
2291         if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2292                 goto err_alloc;
2293         lun->num = lnum;
2294
2295         rc = -ENOSR;
2296         if ((lun->id = ub_id_get()) == -1)
2297                 goto err_id;
2298
2299         lun->udev = sc;
2300
2301         snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2302             lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2303
2304         lun->removable = 1;             /* XXX Query this from the device */
2305         lun->changed = 1;               /* ub_revalidate clears only */
2306         ub_revalidate(sc, lun);
2307
2308         rc = -ENOMEM;
2309         if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2310                 goto err_diskalloc;
2311
2312         sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2313         disk->major = UB_MAJOR;
2314         disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2315         disk->fops = &ub_bd_fops;
2316         disk->private_data = lun;
2317         disk->driverfs_dev = &sc->intf->dev;
2318
2319         rc = -ENOMEM;
2320         if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2321                 goto err_blkqinit;
2322
2323         disk->queue = q;
2324
2325         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2326         blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2327         blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2328         blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
2329         blk_queue_max_sectors(q, UB_MAX_SECTORS);
2330         blk_queue_hardsect_size(q, lun->capacity.bsize);
2331
2332         lun->disk = disk;
2333         q->queuedata = lun;
2334         list_add(&lun->link, &sc->luns);
2335
2336         set_capacity(disk, lun->capacity.nsec);
2337         if (lun->removable)
2338                 disk->flags |= GENHD_FL_REMOVABLE;
2339
2340         add_disk(disk);
2341
2342         return 0;
2343
2344 err_blkqinit:
2345         put_disk(disk);
2346 err_diskalloc:
2347         ub_id_put(lun->id);
2348 err_id:
2349         kfree(lun);
2350 err_alloc:
2351         return rc;
2352 }
2353
2354 static void ub_disconnect(struct usb_interface *intf)
2355 {
2356         struct ub_dev *sc = usb_get_intfdata(intf);
2357         struct ub_lun *lun;
2358         unsigned long flags;
2359
2360         /*
2361          * Prevent ub_bd_release from pulling the rug from under us.
2362          * XXX This is starting to look like a kref.
2363          * XXX Why not to take this ref at probe time?
2364          */
2365         spin_lock_irqsave(&ub_lock, flags);
2366         sc->openc++;
2367         spin_unlock_irqrestore(&ub_lock, flags);
2368
2369         /*
2370          * Fence stall clearnings, operations triggered by unlinkings and so on.
2371          * We do not attempt to unlink any URBs, because we do not trust the
2372          * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2373          */
2374         atomic_set(&sc->poison, 1);
2375
2376         /*
2377          * Wait for reset to end, if any.
2378          */
2379         wait_event(sc->reset_wait, !sc->reset);
2380
2381         /*
2382          * Blow away queued commands.
2383          *
2384          * Actually, this never works, because before we get here
2385          * the HCD terminates outstanding URB(s). It causes our
2386          * SCSI command queue to advance, commands fail to submit,
2387          * and the whole queue drains. So, we just use this code to
2388          * print warnings.
2389          */
2390         spin_lock_irqsave(sc->lock, flags);
2391         {
2392                 struct ub_scsi_cmd *cmd;
2393                 int cnt = 0;
2394                 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2395                         cmd->error = -ENOTCONN;
2396                         cmd->state = UB_CMDST_DONE;
2397                         ub_cmdq_pop(sc);
2398                         (*cmd->done)(sc, cmd);
2399                         cnt++;
2400                 }
2401                 if (cnt != 0) {
2402                         printk(KERN_WARNING "%s: "
2403                             "%d was queued after shutdown\n", sc->name, cnt);
2404                 }
2405         }
2406         spin_unlock_irqrestore(sc->lock, flags);
2407
2408         /*
2409          * Unregister the upper layer.
2410          */
2411         list_for_each_entry(lun, &sc->luns, link) {
2412                 del_gendisk(lun->disk);
2413                 /*
2414                  * I wish I could do:
2415                  *    queue_flag_set(QUEUE_FLAG_DEAD, q);
2416                  * As it is, we rely on our internal poisoning and let
2417                  * the upper levels to spin furiously failing all the I/O.
2418                  */
2419         }
2420
2421         /*
2422          * Testing for -EINPROGRESS is always a bug, so we are bending
2423          * the rules a little.
2424          */
2425         spin_lock_irqsave(sc->lock, flags);
2426         if (sc->work_urb.status == -EINPROGRESS) {      /* janitors: ignore */
2427                 printk(KERN_WARNING "%s: "
2428                     "URB is active after disconnect\n", sc->name);
2429         }
2430         spin_unlock_irqrestore(sc->lock, flags);
2431
2432         /*
2433          * There is virtually no chance that other CPU runs times so long
2434          * after ub_urb_complete should have called del_timer, but only if HCD
2435          * didn't forget to deliver a callback on unlink.
2436          */
2437         del_timer_sync(&sc->work_timer);
2438
2439         /*
2440          * At this point there must be no commands coming from anyone
2441          * and no URBs left in transit.
2442          */
2443
2444         ub_put(sc);
2445 }
2446
2447 static struct usb_driver ub_driver = {
2448         .name =         "ub",
2449         .probe =        ub_probe,
2450         .disconnect =   ub_disconnect,
2451         .id_table =     ub_usb_ids,
2452 };
2453
2454 static int __init ub_init(void)
2455 {
2456         int rc;
2457         int i;
2458
2459         for (i = 0; i < UB_QLOCK_NUM; i++)
2460                 spin_lock_init(&ub_qlockv[i]);
2461
2462         if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2463                 goto err_regblkdev;
2464
2465         if ((rc = usb_register(&ub_driver)) != 0)
2466                 goto err_register;
2467
2468         usb_usual_set_present(USB_US_TYPE_UB);
2469         return 0;
2470
2471 err_register:
2472         unregister_blkdev(UB_MAJOR, DRV_NAME);
2473 err_regblkdev:
2474         return rc;
2475 }
2476
2477 static void __exit ub_exit(void)
2478 {
2479         usb_deregister(&ub_driver);
2480
2481         unregister_blkdev(UB_MAJOR, DRV_NAME);
2482         usb_usual_clear_present(USB_US_TYPE_UB);
2483 }
2484
2485 module_init(ub_init);
2486 module_exit(ub_exit);
2487
2488 MODULE_LICENSE("GPL");