* and is not licensed separately. See file COPYING for details.
*
* TODO (sorted by decreasing priority)
+ * -- Return sense now that rq allows it (we always auto-sense anyway).
* -- set readonly flag for CDs, set removable flag for CF readers
* -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
* -- verify the 13 conditions and do bulk resets
#include <linux/usb_usual.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
-static void ub_end_rq(struct request *rq, unsigned int status);
+static void ub_end_rq(struct request *rq, unsigned int status,
+ unsigned int cmd_len);
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd);
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
{
struct list_head *p;
struct ub_lun *lun;
- request_queue_t *q;
+ struct request_queue *q;
while (!list_empty(&sc->luns)) {
p = sc->luns.next;
* The request function is our main entry point
*/
-static void ub_request_fn(request_queue_t *q)
+static void ub_request_fn(struct request_queue *q)
{
struct ub_lun *lun = q->queuedata;
struct request *rq;
if (atomic_read(&sc->poison)) {
blkdev_dequeue_request(rq);
- ub_end_rq(rq, DID_NO_CONNECT << 16);
+ ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
return 0;
}
if (lun->changed && !blk_pc_request(rq)) {
blkdev_dequeue_request(rq);
- ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
+ ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
return 0;
}
/*
* get scatterlist from block layer
*/
+ sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
if (n_elem < 0) {
/* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
drop:
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, DID_ERROR << 16);
+ ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
return 0;
}
struct ub_request *urq = cmd->back;
struct request *rq;
unsigned int scsi_status;
+ unsigned int cmd_len;
rq = urq->rq;
rq->data_len = 0;
else
rq->data_len -= cmd->act_len;
+ scsi_status = 0;
+ } else {
+ if (cmd->act_len != cmd->len) {
+ if ((cmd->key == MEDIUM_ERROR ||
+ cmd->key == UNIT_ATTENTION) &&
+ ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
+ return;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ } else {
+ scsi_status = 0;
+ }
}
- scsi_status = 0;
} else {
if (blk_pc_request(rq)) {
/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
urq->rq = NULL;
+ cmd_len = cmd->len;
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, scsi_status);
+ ub_end_rq(rq, scsi_status, cmd_len);
blk_start_queue(lun->disk->queue);
}
-static void ub_end_rq(struct request *rq, unsigned int scsi_status)
+static void ub_end_rq(struct request *rq, unsigned int scsi_status,
+ unsigned int cmd_len)
{
- int uptodate;
+ int error;
+ long rqlen;
if (scsi_status == 0) {
- uptodate = 1;
+ error = 0;
} else {
- uptodate = 0;
+ error = -EIO;
rq->errors = scsi_status;
}
- end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
- end_that_request_last(rq, uptodate);
+ rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */
+ if (__blk_end_request(rq, error, cmd_len)) {
+ printk(KERN_WARNING DRV_NAME
+ ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
+ blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
+ }
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
- /* Fill what we shouldn't be filling, because usb-storage did so. */
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
-
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
ub_complete(&sc->work_done);
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
- usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
- page_address(sg->page) + sg->offset, sg->length,
- ub_urb_complete, sc);
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
+ usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
+ sg->length, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
sc->last_pipe = sc->recv_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
&sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
scmd->state = UB_CMDST_INIT;
scmd->nsg = 1;
sg = &scmd->sgv[0];
- sg->page = virt_to_page(sc->top_sense);
- sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
- sg->length = UB_SENSE_SIZE;
+ sg_init_table(sg, UB_MAX_REQ_SG);
+ sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
+ (unsigned long)sc->top_sense & (PAGE_SIZE-1));
scmd->len = UB_SENSE_SIZE;
scmd->lun = cmd->lun;
scmd->done = ub_top_sense_done;
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
ub_complete(&sc->work_done);
#endif
#if 0 /* We let them stop themselves. */
- struct list_head *p;
struct ub_lun *lun;
- list_for_each(p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
blk_stop_queue(lun->disk->queue);
}
#endif
{
struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
- struct list_head *p;
struct ub_lun *lun;
int lkr, rc;
spin_lock_irqsave(sc->lock, flags);
sc->reset = 0;
tasklet_schedule(&sc->tasklet);
- list_for_each(p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
blk_start_queue(lun->disk->queue);
}
wake_up(&sc->reset_wait);
struct gendisk *disk = inode->i_bdev->bd_disk;
void __user *usermem = (void __user *) arg;
- return scsi_cmd_ioctl(filp, disk, cmd, usermem);
+ return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);
}
/*
cmd->state = UB_CMDST_INIT;
cmd->nsg = 1;
sg = &cmd->sgv[0];
- sg->page = virt_to_page(p);
- sg->offset = (unsigned long)p & (PAGE_SIZE-1);
- sg->length = 8;
+ sg_init_table(sg, UB_MAX_REQ_SG);
+ sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
cmd->len = 8;
cmd->lun = lun;
cmd->done = ub_probe_done;
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
printk(KERN_WARNING
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
(unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
goto err_submit;
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
printk(KERN_WARNING
static int ub_probe_lun(struct ub_dev *sc, int lnum)
{
struct ub_lun *lun;
- request_queue_t *q;
+ struct request_queue *q;
struct gendisk *disk;
int rc;
static void ub_disconnect(struct usb_interface *intf)
{
struct ub_dev *sc = usb_get_intfdata(intf);
- struct list_head *p;
struct ub_lun *lun;
unsigned long flags;
/*
* Unregister the upper layer.
*/
- list_for_each (p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
del_gendisk(lun->disk);
/*
* I wish I could do:
- * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+ * queue_flag_set(QUEUE_FLAG_DEAD, q);
* As it is, we rely on our internal poisoning and let
* the upper levels to spin furiously failing all the I/O.
*/