X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fs390%2Fblock%2Fdasd.c;h=e64f62d5e0fc503f2fe7533c741778be1a5092d1;hb=9934c8c04561413609d2bc38c6b9f268cba774a4;hp=403957af5d711920c2bda9201cbd818a97aeb3aa;hpb=aaff0f644a182015622d7686a66986319a1085d1;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 403957a..e64f62d 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -9,6 +9,9 @@ * */ +#define KMSG_COMPONENT "dasd" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include #include #include @@ -17,11 +20,13 @@ #include #include #include +#include #include #include #include #include +#include /* This is ugly... */ #define PRINTK_HEADER "dasd:" @@ -48,43 +53,45 @@ MODULE_LICENSE("GPL"); /* * SECTION: prototypes for static functions of dasd.c */ -static int dasd_alloc_queue(struct dasd_device * device); -static void dasd_setup_queue(struct dasd_device * device); -static void dasd_free_queue(struct dasd_device * device); -static void dasd_flush_request_queue(struct dasd_device *); -static int dasd_flush_ccw_queue(struct dasd_device *, int); -static void dasd_tasklet(struct dasd_device *); +static int dasd_alloc_queue(struct dasd_block *); +static void dasd_setup_queue(struct dasd_block *); +static void dasd_free_queue(struct dasd_block *); +static void dasd_flush_request_queue(struct dasd_block *); +static int dasd_flush_block_queue(struct dasd_block *); +static void dasd_device_tasklet(struct dasd_device *); +static void dasd_block_tasklet(struct dasd_block *); static void do_kick_device(struct work_struct *); +static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); +static void dasd_device_timeout(unsigned long); +static void dasd_block_timeout(unsigned long); /* * SECTION: Operations on the device structure. */ static wait_queue_head_t dasd_init_waitq; static wait_queue_head_t dasd_flush_wq; +static wait_queue_head_t generic_waitq; /* * Allocate memory for a new device structure. */ -struct dasd_device * -dasd_alloc_device(void) +struct dasd_device *dasd_alloc_device(void) { struct dasd_device *device; - device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); - if (device == NULL) + device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); + if (!device) return ERR_PTR(-ENOMEM); - /* open_count = 0 means device online but not in use */ - atomic_set(&device->open_count, -1); /* Get two pages for normal block device operations. */ device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); - if (device->ccw_mem == NULL) { + if (!device->ccw_mem) { kfree(device); return ERR_PTR(-ENOMEM); } /* Get one page for error recovery. */ device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); - if (device->erp_mem == NULL) { + if (!device->erp_mem) { free_pages((unsigned long) device->ccw_mem, 1); kfree(device); return ERR_PTR(-ENOMEM); @@ -93,13 +100,14 @@ dasd_alloc_device(void) dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); spin_lock_init(&device->mem_lock); - spin_lock_init(&device->request_queue_lock); - atomic_set (&device->tasklet_scheduled, 0); + atomic_set(&device->tasklet_scheduled, 0); tasklet_init(&device->tasklet, - (void (*)(unsigned long)) dasd_tasklet, + (void (*)(unsigned long)) dasd_device_tasklet, (unsigned long) device); INIT_LIST_HEAD(&device->ccw_queue); init_timer(&device->timer); + device->timer.function = dasd_device_timeout; + device->timer.data = (unsigned long) device; INIT_WORK(&device->kick_work, do_kick_device); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; @@ -110,8 +118,7 @@ dasd_alloc_device(void) /* * Free memory of a device structure. */ -void -dasd_free_device(struct dasd_device *device) +void dasd_free_device(struct dasd_device *device) { kfree(device->private); free_page((unsigned long) device->erp_mem); @@ -120,10 +127,44 @@ dasd_free_device(struct dasd_device *device) } /* + * Allocate memory for a new device structure. + */ +struct dasd_block *dasd_alloc_block(void) +{ + struct dasd_block *block; + + block = kzalloc(sizeof(*block), GFP_ATOMIC); + if (!block) + return ERR_PTR(-ENOMEM); + /* open_count = 0 means device online but not in use */ + atomic_set(&block->open_count, -1); + + spin_lock_init(&block->request_queue_lock); + atomic_set(&block->tasklet_scheduled, 0); + tasklet_init(&block->tasklet, + (void (*)(unsigned long)) dasd_block_tasklet, + (unsigned long) block); + INIT_LIST_HEAD(&block->ccw_queue); + spin_lock_init(&block->queue_lock); + init_timer(&block->timer); + block->timer.function = dasd_block_timeout; + block->timer.data = (unsigned long) block; + + return block; +} + +/* + * Free memory of a device structure. + */ +void dasd_free_block(struct dasd_block *block) +{ + kfree(block); +} + +/* * Make a new device known to the system. */ -static int -dasd_state_new_to_known(struct dasd_device *device) +static int dasd_state_new_to_known(struct dasd_device *device) { int rc; @@ -133,12 +174,13 @@ dasd_state_new_to_known(struct dasd_device *device) */ dasd_get_device(device); - rc = dasd_alloc_queue(device); - if (rc) { - dasd_put_device(device); - return rc; + if (device->block) { + rc = dasd_alloc_queue(device->block); + if (rc) { + dasd_put_device(device); + return rc; + } } - device->state = DASD_STATE_KNOWN; return 0; } @@ -146,21 +188,24 @@ dasd_state_new_to_known(struct dasd_device *device) /* * Let the system forget about a device. */ -static int -dasd_state_known_to_new(struct dasd_device * device) +static int dasd_state_known_to_new(struct dasd_device *device) { /* Disable extended error reporting for this device. */ dasd_eer_disable(device); /* Forget the discipline information. */ - if (device->discipline) + if (device->discipline) { + if (device->discipline->uncheck_device) + device->discipline->uncheck_device(device); module_put(device->discipline->owner); + } device->discipline = NULL; if (device->base_discipline) module_put(device->base_discipline->owner); device->base_discipline = NULL; device->state = DASD_STATE_NEW; - dasd_free_queue(device); + if (device->block) + dasd_free_queue(device->block); /* Give up reference we took in dasd_state_new_to_known. */ dasd_put_device(device); @@ -170,19 +215,19 @@ dasd_state_known_to_new(struct dasd_device * device) /* * Request the irq line for the device. */ -static int -dasd_state_known_to_basic(struct dasd_device * device) +static int dasd_state_known_to_basic(struct dasd_device *device) { int rc; /* Allocate and register gendisk structure. */ - rc = dasd_gendisk_alloc(device); - if (rc) - return rc; - + if (device->block) { + rc = dasd_gendisk_alloc(device->block); + if (rc) + return rc; + } /* register 'device' debug area, used for all DBF_DEV_XXX calls */ - device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, - 8 * sizeof (long)); + device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, + 8 * sizeof(long)); debug_register_view(device->debug_area, &debug_sprintf_view); debug_set_level(device->debug_area, DBF_WARNING); DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); @@ -194,16 +239,17 @@ dasd_state_known_to_basic(struct dasd_device * device) /* * Release the irq line for the device. Terminate any running i/o. */ -static int -dasd_state_basic_to_known(struct dasd_device * device) +static int dasd_state_basic_to_known(struct dasd_device *device) { int rc; - - dasd_gendisk_free(device); - rc = dasd_flush_ccw_queue(device, 1); + if (device->block) { + dasd_gendisk_free(device->block); + dasd_block_clear_timer(device->block); + } + rc = dasd_flush_device_queue(device); if (rc) return rc; - dasd_clear_timer(device); + dasd_device_clear_timer(device); DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); if (device->debug_area != NULL) { @@ -228,26 +274,32 @@ dasd_state_basic_to_known(struct dasd_device * device) * In case the analysis returns an error, the device setup is stopped * (a fake disk was already added to allow formatting). */ -static int -dasd_state_basic_to_ready(struct dasd_device * device) +static int dasd_state_basic_to_ready(struct dasd_device *device) { int rc; + struct dasd_block *block; rc = 0; - if (device->discipline->do_analysis != NULL) - rc = device->discipline->do_analysis(device); - if (rc) { - if (rc != -EAGAIN) - device->state = DASD_STATE_UNFMT; - return rc; - } + block = device->block; /* make disk known with correct capacity */ - dasd_setup_queue(device); - set_capacity(device->gdp, device->blocks << device->s2b_shift); - device->state = DASD_STATE_READY; - rc = dasd_scan_partitions(device); - if (rc) - device->state = DASD_STATE_BASIC; + if (block) { + if (block->base->discipline->do_analysis != NULL) + rc = block->base->discipline->do_analysis(block); + if (rc) { + if (rc != -EAGAIN) + device->state = DASD_STATE_UNFMT; + return rc; + } + dasd_setup_queue(block); + set_capacity(block->gdp, + block->blocks << block->s2b_shift); + device->state = DASD_STATE_READY; + rc = dasd_scan_partitions(block); + if (rc) + device->state = DASD_STATE_BASIC; + } else { + device->state = DASD_STATE_READY; + } return rc; } @@ -256,28 +308,31 @@ dasd_state_basic_to_ready(struct dasd_device * device) * Forget format information. Check if the target level is basic * and if it is create fake disk for formatting. */ -static int -dasd_state_ready_to_basic(struct dasd_device * device) +static int dasd_state_ready_to_basic(struct dasd_device *device) { int rc; - rc = dasd_flush_ccw_queue(device, 0); - if (rc) - return rc; - dasd_destroy_partitions(device); - dasd_flush_request_queue(device); - device->blocks = 0; - device->bp_block = 0; - device->s2b_shift = 0; device->state = DASD_STATE_BASIC; + if (device->block) { + struct dasd_block *block = device->block; + rc = dasd_flush_block_queue(block); + if (rc) { + device->state = DASD_STATE_READY; + return rc; + } + dasd_destroy_partitions(block); + dasd_flush_request_queue(block); + block->blocks = 0; + block->bp_block = 0; + block->s2b_shift = 0; + } return 0; } /* * Back to basic. */ -static int -dasd_state_unfmt_to_basic(struct dasd_device * device) +static int dasd_state_unfmt_to_basic(struct dasd_device *device) { device->state = DASD_STATE_BASIC; return 0; @@ -291,26 +346,58 @@ dasd_state_unfmt_to_basic(struct dasd_device * device) static int dasd_state_ready_to_online(struct dasd_device * device) { + int rc; + struct gendisk *disk; + struct disk_part_iter piter; + struct hd_struct *part; + + if (device->discipline->ready_to_online) { + rc = device->discipline->ready_to_online(device); + if (rc) + return rc; + } device->state = DASD_STATE_ONLINE; - dasd_schedule_bh(device); + if (device->block) { + dasd_schedule_block_bh(device->block); + disk = device->block->bdev->bd_disk; + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); + while ((part = disk_part_iter_next(&piter))) + kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); + disk_part_iter_exit(&piter); + } return 0; } /* * Stop the requeueing of requests again. */ -static int -dasd_state_online_to_ready(struct dasd_device * device) +static int dasd_state_online_to_ready(struct dasd_device *device) { + int rc; + struct gendisk *disk; + struct disk_part_iter piter; + struct hd_struct *part; + + if (device->discipline->online_to_ready) { + rc = device->discipline->online_to_ready(device); + if (rc) + return rc; + } device->state = DASD_STATE_READY; + if (device->block) { + disk = device->block->bdev->bd_disk; + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); + while ((part = disk_part_iter_next(&piter))) + kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); + disk_part_iter_exit(&piter); + } return 0; } /* * Device startup state changes. */ -static int -dasd_increase_state(struct dasd_device *device) +static int dasd_increase_state(struct dasd_device *device) { int rc; @@ -345,8 +432,7 @@ dasd_increase_state(struct dasd_device *device) /* * Device shutdown state changes. */ -static int -dasd_decrease_state(struct dasd_device *device) +static int dasd_decrease_state(struct dasd_device *device) { int rc; @@ -381,8 +467,7 @@ dasd_decrease_state(struct dasd_device *device) /* * This is the main startup/shutdown routine. */ -static void -dasd_change_state(struct dasd_device *device) +static void dasd_change_state(struct dasd_device *device) { int rc; @@ -396,8 +481,10 @@ dasd_change_state(struct dasd_device *device) if (rc && rc != -EAGAIN) device->target = device->state; - if (device->state == device->target) + if (device->state == device->target) { wake_up(&dasd_init_waitq); + dasd_put_device(device); + } /* let user-space know that the device status changed */ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); @@ -409,17 +496,15 @@ dasd_change_state(struct dasd_device *device) * dasd_kick_device will schedule a call do do_kick_device to the kernel * event daemon. */ -static void -do_kick_device(struct work_struct *work) +static void do_kick_device(struct work_struct *work) { struct dasd_device *device = container_of(work, struct dasd_device, kick_work); dasd_change_state(device); - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); dasd_put_device(device); } -void -dasd_kick_device(struct dasd_device *device) +void dasd_kick_device(struct dasd_device *device) { dasd_get_device(device); /* queue call to dasd_kick_device to the kernel event daemon. */ @@ -429,15 +514,17 @@ dasd_kick_device(struct dasd_device *device) /* * Set the target state for a device and starts the state change. */ -void -dasd_set_target_state(struct dasd_device *device, int target) +void dasd_set_target_state(struct dasd_device *device, int target) { + dasd_get_device(device); /* If we are in probeonly mode stop at DASD_STATE_READY. */ if (dasd_probeonly && target > DASD_STATE_READY) target = DASD_STATE_READY; if (device->target != target) { - if (device->state == target) + if (device->state == target) { wake_up(&dasd_init_waitq); + dasd_put_device(device); + } device->target = target; } if (device->state != device->target) @@ -447,14 +534,12 @@ dasd_set_target_state(struct dasd_device *device, int target) /* * Enable devices with device numbers in [from..to]. */ -static inline int -_wait_for_device(struct dasd_device *device) +static inline int _wait_for_device(struct dasd_device *device) { return (device->state == device->target); } -void -dasd_enable_device(struct dasd_device *device) +void dasd_enable_device(struct dasd_device *device) { dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) @@ -475,20 +560,20 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF; /* * Increments counter in global and local profiling structures. */ -#define dasd_profile_counter(value, counter, device) \ +#define dasd_profile_counter(value, counter, block) \ { \ int index; \ for (index = 0; index < 31 && value >> (2+index); index++); \ dasd_global_profile.counter[index]++; \ - device->profile.counter[index]++; \ + block->profile.counter[index]++; \ } /* * Add profiling information for cqr before execution. */ -static void -dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, - struct request *req) +static void dasd_profile_start(struct dasd_block *block, + struct dasd_ccw_req *cqr, + struct request *req) { struct list_head *l; unsigned int counter; @@ -498,19 +583,19 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, /* count the length of the chanq for statistics */ counter = 0; - list_for_each(l, &device->ccw_queue) + list_for_each(l, &block->ccw_queue) if (++counter >= 31) break; dasd_global_profile.dasd_io_nr_req[counter]++; - device->profile.dasd_io_nr_req[counter]++; + block->profile.dasd_io_nr_req[counter]++; } /* * Add profiling information for cqr after execution. */ -static void -dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, - struct request *req) +static void dasd_profile_end(struct dasd_block *block, + struct dasd_ccw_req *cqr, + struct request *req) { long strtime, irqtime, endtime, tottime; /* in microseconds */ long tottimeps, sectors; @@ -518,7 +603,7 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, if (dasd_profile_level != DASD_PROFILE_ON) return; - sectors = req->nr_sectors; + sectors = blk_rq_sectors(req); if (!cqr->buildclk || !cqr->startclk || !cqr->stopclk || !cqr->endclk || !sectors) @@ -532,27 +617,27 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, if (!dasd_global_profile.dasd_io_reqs) memset(&dasd_global_profile, 0, - sizeof (struct dasd_profile_info_t)); + sizeof(struct dasd_profile_info_t)); dasd_global_profile.dasd_io_reqs++; dasd_global_profile.dasd_io_sects += sectors; - if (!device->profile.dasd_io_reqs) - memset(&device->profile, 0, - sizeof (struct dasd_profile_info_t)); - device->profile.dasd_io_reqs++; - device->profile.dasd_io_sects += sectors; + if (!block->profile.dasd_io_reqs) + memset(&block->profile, 0, + sizeof(struct dasd_profile_info_t)); + block->profile.dasd_io_reqs++; + block->profile.dasd_io_sects += sectors; - dasd_profile_counter(sectors, dasd_io_secs, device); - dasd_profile_counter(tottime, dasd_io_times, device); - dasd_profile_counter(tottimeps, dasd_io_timps, device); - dasd_profile_counter(strtime, dasd_io_time1, device); - dasd_profile_counter(irqtime, dasd_io_time2, device); - dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); - dasd_profile_counter(endtime, dasd_io_time3, device); + dasd_profile_counter(sectors, dasd_io_secs, block); + dasd_profile_counter(tottime, dasd_io_times, block); + dasd_profile_counter(tottimeps, dasd_io_timps, block); + dasd_profile_counter(strtime, dasd_io_time1, block); + dasd_profile_counter(irqtime, dasd_io_time2, block); + dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); + dasd_profile_counter(endtime, dasd_io_time3, block); } #else -#define dasd_profile_start(device, cqr, req) do {} while (0) -#define dasd_profile_end(device, cqr, req) do {} while (0) +#define dasd_profile_start(block, cqr, req) do {} while (0) +#define dasd_profile_end(block, cqr, req) do {} while (0) #endif /* CONFIG_DASD_PROFILE */ /* @@ -562,9 +647,9 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, * memory and 2) dasd_smalloc_request uses the static ccw memory * that gets allocated for each device. */ -struct dasd_ccw_req * -dasd_kmalloc_request(char *magic, int cplength, int datasize, - struct dasd_device * device) +struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, + int datasize, + struct dasd_device *device) { struct dasd_ccw_req *cqr; @@ -600,9 +685,9 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize, return cqr; } -struct dasd_ccw_req * -dasd_smalloc_request(char *magic, int cplength, int datasize, - struct dasd_device * device) +struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, + int datasize, + struct dasd_device *device) { unsigned long flags; struct dasd_ccw_req *cqr; @@ -649,8 +734,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize, * idal lists that might have been created by dasd_set_cda and the * struct dasd_ccw_req itself. */ -void -dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) +void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) { #ifdef CONFIG_64BIT struct ccw1 *ccw; @@ -667,8 +751,7 @@ dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) dasd_put_device(device); } -void -dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) +void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) { unsigned long flags; @@ -681,16 +764,15 @@ dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) /* * Check discipline magic in cqr. */ -static inline int -dasd_check_cqr(struct dasd_ccw_req *cqr) +static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) { struct dasd_device *device; if (cqr == NULL) return -EINVAL; - device = cqr->device; + device = cqr->startdev; if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { - DEV_MESSAGE(KERN_WARNING, device, + DBF_DEV_EVENT(DBF_WARNING, device, " dasd_ccw_req 0x%08x magic doesn't match" " discipline 0x%08x", cqr->magic, @@ -706,24 +788,24 @@ dasd_check_cqr(struct dasd_ccw_req *cqr) * ccw_device_clear can fail if the i/o subsystem * is in a bad mood. */ -int -dasd_term_IO(struct dasd_ccw_req * cqr) +int dasd_term_IO(struct dasd_ccw_req *cqr) { struct dasd_device *device; int retries, rc; + char errorstring[ERRORLENGTH]; /* Check the cqr */ rc = dasd_check_cqr(cqr); if (rc) return rc; retries = 0; - device = (struct dasd_device *) cqr->device; + device = (struct dasd_device *) cqr->startdev; while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { rc = ccw_device_clear(device->cdev, (long) cqr); switch (rc) { case 0: /* termination successful */ cqr->retries--; - cqr->status = DASD_CQR_CLEAR; + cqr->status = DASD_CQR_CLEAR_PENDING; cqr->stopclk = get_clock(); cqr->starttime = 0; DBF_DEV_EVENT(DBF_DEBUG, device, @@ -744,16 +826,16 @@ dasd_term_IO(struct dasd_ccw_req * cqr) "device busy, retry later"); break; default: - DEV_MESSAGE(KERN_ERR, device, - "line %d unknown RC=%d, please " - "report to linux390@de.ibm.com", - __LINE__, rc); + /* internal error 10 - unknown rc*/ + snprintf(errorstring, ERRORLENGTH, "10 %d", rc); + dev_err(&device->cdev->dev, "An error occurred in the " + "DASD device driver, reason=%s\n", errorstring); BUG(); break; } retries++; } - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); return rc; } @@ -761,29 +843,35 @@ dasd_term_IO(struct dasd_ccw_req * cqr) * Start the i/o. This start_IO can fail if the channel is really busy. * In that case set up a timer to start the request later. */ -int -dasd_start_IO(struct dasd_ccw_req * cqr) +int dasd_start_IO(struct dasd_ccw_req *cqr) { struct dasd_device *device; int rc; + char errorstring[ERRORLENGTH]; /* Check the cqr */ rc = dasd_check_cqr(cqr); if (rc) return rc; - device = (struct dasd_device *) cqr->device; + device = (struct dasd_device *) cqr->startdev; if (cqr->retries < 0) { - DEV_MESSAGE(KERN_DEBUG, device, - "start_IO: request %p (%02x/%i) - no retry left.", - cqr, cqr->status, cqr->retries); - cqr->status = DASD_CQR_FAILED; + /* internal error 14 - start_IO run out of retries */ + sprintf(errorstring, "14 %p", cqr); + dev_err(&device->cdev->dev, "An error occurred in the DASD " + "device driver, reason=%s\n", errorstring); + cqr->status = DASD_CQR_ERROR; return -EIO; } cqr->startclk = get_clock(); cqr->starttime = jiffies; cqr->retries--; - rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, - cqr->lpm, 0); + if (cqr->cpmode == 1) { + rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, + (long) cqr, cqr->lpm); + } else { + rc = ccw_device_start(device->cdev, cqr->cpaddr, + (long) cqr, cqr->lpm, 0); + } switch (rc) { case 0: cqr->status = DASD_CQR_IN_IO; @@ -792,11 +880,11 @@ dasd_start_IO(struct dasd_ccw_req * cqr) cqr); break; case -EBUSY: - DBF_DEV_EVENT(DBF_ERR, device, "%s", + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", "start_IO: device busy, retry later"); break; case -ETIMEDOUT: - DBF_DEV_EVENT(DBF_ERR, device, "%s", + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", "start_IO: request timeout, retry later"); break; case -EACCES: @@ -806,19 +894,24 @@ dasd_start_IO(struct dasd_ccw_req * cqr) * Do a retry with all available pathes. */ cqr->lpm = LPM_ANYPATH; - DBF_DEV_EVENT(DBF_ERR, device, "%s", + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", "start_IO: selected pathes gone," " retry on all pathes"); break; case -ENODEV: + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + "start_IO: -ENODEV device gone, retry"); + break; case -EIO: - DBF_DEV_EVENT(DBF_ERR, device, "%s", - "start_IO: device gone, retry"); + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + "start_IO: -EIO device gone, retry"); break; default: - DEV_MESSAGE(KERN_ERR, device, - "line %d unknown RC=%d, please report" - " to linux390@de.ibm.com", __LINE__, rc); + /* internal error 11 - unknown rc */ + snprintf(errorstring, ERRORLENGTH, "11 %d", rc); + dev_err(&device->cdev->dev, + "An error occurred in the DASD device driver, " + "reason=%s\n", errorstring); BUG(); break; } @@ -833,8 +926,7 @@ dasd_start_IO(struct dasd_ccw_req * cqr) * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), * DASD_CQR_QUEUED for 2) and 3). */ -static void -dasd_timeout_device(unsigned long ptr) +static void dasd_device_timeout(unsigned long ptr) { unsigned long flags; struct dasd_device *device; @@ -844,425 +936,289 @@ dasd_timeout_device(unsigned long ptr) /* re-activate request queue */ device->stopped &= ~DASD_STOPPED_PENDING; spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); } /* * Setup timeout for a device in jiffies. */ -void -dasd_set_timer(struct dasd_device *device, int expires) +void dasd_device_set_timer(struct dasd_device *device, int expires) { - if (expires == 0) { - if (timer_pending(&device->timer)) - del_timer(&device->timer); - return; - } - if (timer_pending(&device->timer)) { - if (mod_timer(&device->timer, jiffies + expires)) - return; - } - device->timer.function = dasd_timeout_device; - device->timer.data = (unsigned long) device; - device->timer.expires = jiffies + expires; - add_timer(&device->timer); + if (expires == 0) + del_timer(&device->timer); + else + mod_timer(&device->timer, jiffies + expires); } /* * Clear timeout for a device. */ -void -dasd_clear_timer(struct dasd_device *device) +void dasd_device_clear_timer(struct dasd_device *device) { - if (timer_pending(&device->timer)) - del_timer(&device->timer); + del_timer(&device->timer); } -static void -dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) +static void dasd_handle_killed_request(struct ccw_device *cdev, + unsigned long intparm) { struct dasd_ccw_req *cqr; struct dasd_device *device; + if (!intparm) + return; cqr = (struct dasd_ccw_req *) intparm; if (cqr->status != DASD_CQR_IN_IO) { - MESSAGE(KERN_DEBUG, + DBF_EVENT(DBF_DEBUG, "invalid status in handle_killed_request: " "bus_id %s, status %02x", - cdev->dev.bus_id, cqr->status); + dev_name(&cdev->dev), cqr->status); return; } - device = (struct dasd_device *) cqr->device; + device = (struct dasd_device *) cqr->startdev; if (device == NULL || device != dasd_device_from_cdev_locked(cdev) || strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { - MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", - cdev->dev.bus_id); + DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " + "bus_id %s", dev_name(&cdev->dev)); return; } /* Schedule request to be retried. */ cqr->status = DASD_CQR_QUEUED; - dasd_clear_timer(device); - dasd_schedule_bh(device); + dasd_device_clear_timer(device); + dasd_schedule_device_bh(device); dasd_put_device(device); } -static void -dasd_handle_state_change_pending(struct dasd_device *device) +void dasd_generic_handle_state_change(struct dasd_device *device) { - struct dasd_ccw_req *cqr; - struct list_head *l, *n; - /* First of all start sense subsystem status request. */ dasd_eer_snss(device); device->stopped &= ~DASD_STOPPED_PENDING; - - /* restart all 'running' IO on queue */ - list_for_each_safe(l, n, &device->ccw_queue) { - cqr = list_entry(l, struct dasd_ccw_req, list); - if (cqr->status == DASD_CQR_IN_IO) { - cqr->status = DASD_CQR_QUEUED; - } - } - dasd_clear_timer(device); - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); } /* * Interrupt handler for "normal" ssch-io based dasd devices. */ -void -dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, - struct irb *irb) +void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) { struct dasd_ccw_req *cqr, *next; struct dasd_device *device; unsigned long long now; int expires; - dasd_era_t era; - char mask; if (IS_ERR(irb)) { switch (PTR_ERR(irb)) { case -EIO: - dasd_handle_killed_request(cdev, intparm); break; case -ETIMEDOUT: - printk(KERN_WARNING"%s(%s): request timed out\n", - __FUNCTION__, cdev->dev.bus_id); - //FIXME - dasd uses own timeout interface... + DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", + __func__, dev_name(&cdev->dev)); break; default: - printk(KERN_WARNING"%s(%s): unknown error %ld\n", - __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); + DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", + __func__, dev_name(&cdev->dev), PTR_ERR(irb)); } + dasd_handle_killed_request(cdev, intparm); return; } now = get_clock(); - DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", - cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), - (unsigned int) intparm); - - /* first of all check for state change pending interrupt */ - mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; - if ((irb->scsw.dstat & mask) == mask) { + /* check for unsolicited interrupts */ + cqr = (struct dasd_ccw_req *) intparm; + if (!cqr || ((scsw_cc(&irb->scsw) == 1) && + (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && + (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { + if (cqr && cqr->status == DASD_CQR_IN_IO) + cqr->status = DASD_CQR_QUEUED; device = dasd_device_from_cdev_locked(cdev); if (!IS_ERR(device)) { - dasd_handle_state_change_pending(device); + dasd_device_clear_timer(device); + device->discipline->handle_unsolicited_interrupt(device, + irb); dasd_put_device(device); } return; } - cqr = (struct dasd_ccw_req *) intparm; - - /* check for unsolicited interrupts */ - if (cqr == NULL) { - MESSAGE(KERN_DEBUG, - "unsolicited interrupt received: bus_id %s", - cdev->dev.bus_id); - return; - } - - device = (struct dasd_device *) cqr->device; - if (device == NULL || + device = (struct dasd_device *) cqr->startdev; + if (!device || strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { - MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", - cdev->dev.bus_id); + DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " + "bus_id %s", dev_name(&cdev->dev)); return; } /* Check for clear pending */ - if (cqr->status == DASD_CQR_CLEAR && - irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { - cqr->status = DASD_CQR_QUEUED; - dasd_clear_timer(device); + if (cqr->status == DASD_CQR_CLEAR_PENDING && + scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { + cqr->status = DASD_CQR_CLEARED; + dasd_device_clear_timer(device); wake_up(&dasd_flush_wq); - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); return; } - /* check status - the request might have been killed by dyn detach */ + /* check status - the request might have been killed by dyn detach */ if (cqr->status != DASD_CQR_IN_IO) { - MESSAGE(KERN_DEBUG, - "invalid status: bus_id %s, status %02x", - cdev->dev.bus_id, cqr->status); + DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " + "status %02x", dev_name(&cdev->dev), cqr->status); return; } - DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", - ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); - - /* Find out the appropriate era_action. */ - if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) - era = dasd_era_fatal; - else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && - irb->scsw.cstat == 0 && - !irb->esw.esw0.erw.cons) - era = dasd_era_none; - else if (irb->esw.esw0.erw.cons) - era = device->discipline->examine_error(cqr, irb); - else - era = dasd_era_recover; - DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era); + next = NULL; expires = 0; - if (era == dasd_era_none) { - cqr->status = DASD_CQR_DONE; + if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && + scsw_cstat(&irb->scsw) == 0) { + /* request was completed successfully */ + cqr->status = DASD_CQR_SUCCESS; cqr->stopclk = now; /* Start first request on queue if possible -> fast_io. */ - if (cqr->list.next != &device->ccw_queue) { - next = list_entry(cqr->list.next, - struct dasd_ccw_req, list); - if ((next->status == DASD_CQR_QUEUED) && - (!device->stopped)) { - if (device->discipline->start_IO(next) == 0) - expires = next->expires; - else - DEV_MESSAGE(KERN_DEBUG, device, "%s", - "Interrupt fastpath " - "failed!"); - } + if (cqr->devlist.next != &device->ccw_queue) { + next = list_entry(cqr->devlist.next, + struct dasd_ccw_req, devlist); } - } else { /* error */ - memcpy(&cqr->irb, irb, sizeof (struct irb)); + } else { /* error */ + memcpy(&cqr->irb, irb, sizeof(struct irb)); + /* log sense for every failed I/O to s390 debugfeature */ + dasd_log_sense_dbf(cqr, irb); if (device->features & DASD_FEATURE_ERPLOG) { - /* dump sense data */ dasd_log_sense(cqr, irb); } - switch (era) { - case dasd_era_fatal: - cqr->status = DASD_CQR_FAILED; - cqr->stopclk = now; - break; - case dasd_era_recover: + + /* + * If we don't want complex ERP for this request, then just + * reset this and retry it in the fastpath + */ + if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && + cqr->retries > 0) { + if (cqr->lpm == LPM_ANYPATH) + DBF_DEV_EVENT(DBF_DEBUG, device, + "default ERP in fastpath " + "(%i retries left)", + cqr->retries); + cqr->lpm = LPM_ANYPATH; + cqr->status = DASD_CQR_QUEUED; + next = cqr; + } else cqr->status = DASD_CQR_ERROR; - break; - default: - BUG(); - } + } + if (next && (next->status == DASD_CQR_QUEUED) && + (!device->stopped)) { + if (device->discipline->start_IO(next) == 0) + expires = next->expires; } if (expires != 0) - dasd_set_timer(device, expires); + dasd_device_set_timer(device, expires); else - dasd_clear_timer(device); - dasd_schedule_bh(device); + dasd_device_clear_timer(device); + dasd_schedule_device_bh(device); } /* - * posts the buffer_cache about a finalized request + * If we have an error on a dasd_block layer request then we cancel + * and return all further requests from the same dasd_block as well. */ -static inline void -dasd_end_request(struct request *req, int uptodate) +static void __dasd_device_recovery(struct dasd_device *device, + struct dasd_ccw_req *ref_cqr) { - if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) - BUG(); - add_disk_randomness(req->rq_disk); - end_that_request_last(req, uptodate); -} + struct list_head *l, *n; + struct dasd_ccw_req *cqr; -/* - * Process finished error recovery ccw. - */ -static inline void -__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) -{ - dasd_erp_fn_t erp_fn; + /* + * only requeue request that came from the dasd_block layer + */ + if (!ref_cqr->block) + return; - if (cqr->status == DASD_CQR_DONE) - DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); - else - DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); - erp_fn = device->discipline->erp_postaction(cqr); - erp_fn(cqr); -} + list_for_each_safe(l, n, &device->ccw_queue) { + cqr = list_entry(l, struct dasd_ccw_req, devlist); + if (cqr->status == DASD_CQR_QUEUED && + ref_cqr->block == cqr->block) { + cqr->status = DASD_CQR_CLEARED; + } + } +}; /* - * Process ccw request queue. + * Remove those ccw requests from the queue that need to be returned + * to the upper layer. */ -static void -__dasd_process_ccw_queue(struct dasd_device * device, - struct list_head *final_queue) +static void __dasd_device_process_ccw_queue(struct dasd_device *device, + struct list_head *final_queue) { struct list_head *l, *n; struct dasd_ccw_req *cqr; - dasd_erp_fn_t erp_fn; -restart: /* Process request with final status. */ list_for_each_safe(l, n, &device->ccw_queue) { - cqr = list_entry(l, struct dasd_ccw_req, list); + cqr = list_entry(l, struct dasd_ccw_req, devlist); + /* Stop list processing at the first non-final request. */ - if (cqr->status != DASD_CQR_DONE && - cqr->status != DASD_CQR_FAILED && - cqr->status != DASD_CQR_ERROR) + if (cqr->status == DASD_CQR_QUEUED || + cqr->status == DASD_CQR_IN_IO || + cqr->status == DASD_CQR_CLEAR_PENDING) break; - /* Process requests with DASD_CQR_ERROR */ if (cqr->status == DASD_CQR_ERROR) { - if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { - cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); - } else { - if (cqr->irb.esw.esw0.erw.cons && - test_bit(DASD_CQR_FLAGS_USE_ERP, - &cqr->flags)) { - erp_fn = device->discipline-> - erp_action(cqr); - erp_fn(cqr); - } else - dasd_default_erp_action(cqr); - } - goto restart; - } - - /* First of all call extended error reporting. */ - if (dasd_eer_enabled(device) && - cqr->status == DASD_CQR_FAILED) { - dasd_eer_write(device, cqr, DASD_EER_FATALERROR); - - /* restart request */ - cqr->status = DASD_CQR_QUEUED; - cqr->retries = 255; - device->stopped |= DASD_STOPPED_QUIESCE; - goto restart; - } - - /* Process finished ERP request. */ - if (cqr->refers) { - __dasd_process_erp(device, cqr); - goto restart; + __dasd_device_recovery(device, cqr); } - /* Rechain finished requests to final queue */ - cqr->endclk = get_clock(); - list_move_tail(&cqr->list, final_queue); + list_move_tail(&cqr->devlist, final_queue); } } -static void -dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) -{ - struct request *req; - struct dasd_device *device; - int status; - - req = (struct request *) data; - device = cqr->device; - dasd_profile_end(device, cqr, req); - status = cqr->device->discipline->free_cp(cqr,req); - spin_lock_irq(&device->request_queue_lock); - dasd_end_request(req, status); - spin_unlock_irq(&device->request_queue_lock); -} - - /* - * Fetch requests from the block device queue. + * the cqrs from the final queue are returned to the upper layer + * by setting a dasd_block state and calling the callback function */ -static void -__dasd_process_blk_queue(struct dasd_device * device) +static void __dasd_device_process_final_queue(struct dasd_device *device, + struct list_head *final_queue) { - request_queue_t *queue; - struct request *req; + struct list_head *l, *n; struct dasd_ccw_req *cqr; - int nr_queued; - - queue = device->request_queue; - /* No queue ? Then there is nothing to do. */ - if (queue == NULL) - return; - - /* - * We requeue request from the block device queue to the ccw - * queue only in two states. In state DASD_STATE_READY the - * partition detection is done and we need to requeue requests - * for that. State DASD_STATE_ONLINE is normal block device - * operation. - */ - if (device->state != DASD_STATE_READY && - device->state != DASD_STATE_ONLINE) - return; - nr_queued = 0; - /* Now we try to fetch requests from the request queue */ - list_for_each_entry(cqr, &device->ccw_queue, list) - if (cqr->status == DASD_CQR_QUEUED) - nr_queued++; - while (!blk_queue_plugged(queue) && - elv_next_request(queue) && - nr_queued < DASD_CHANQ_MAX_SIZE) { - req = elv_next_request(queue); - - if (device->features & DASD_FEATURE_READONLY && - rq_data_dir(req) == WRITE) { - DBF_DEV_EVENT(DBF_ERR, device, - "Rejecting write request %p", - req); - blkdev_dequeue_request(req); - dasd_end_request(req, 0); - continue; - } - if (device->stopped & DASD_STOPPED_DC_EIO) { - blkdev_dequeue_request(req); - dasd_end_request(req, 0); - continue; - } - cqr = device->discipline->build_cp(device, req); - if (IS_ERR(cqr)) { - if (PTR_ERR(cqr) == -ENOMEM) - break; /* terminate request queue loop */ - if (PTR_ERR(cqr) == -EAGAIN) { - /* - * The current request cannot be build right - * now, we have to try later. If this request - * is the head-of-queue we stop the device - * for 1/2 second. - */ - if (!list_empty(&device->ccw_queue)) - break; - device->stopped |= DASD_STOPPED_PENDING; - dasd_set_timer(device, HZ/2); - break; - } - DBF_DEV_EVENT(DBF_ERR, device, - "CCW creation failed (rc=%ld) " - "on request %p", - PTR_ERR(cqr), req); - blkdev_dequeue_request(req); - dasd_end_request(req, 0); - continue; + struct dasd_block *block; + void (*callback)(struct dasd_ccw_req *, void *data); + void *callback_data; + char errorstring[ERRORLENGTH]; + + list_for_each_safe(l, n, final_queue) { + cqr = list_entry(l, struct dasd_ccw_req, devlist); + list_del_init(&cqr->devlist); + block = cqr->block; + callback = cqr->callback; + callback_data = cqr->callback_data; + if (block) + spin_lock_bh(&block->queue_lock); + switch (cqr->status) { + case DASD_CQR_SUCCESS: + cqr->status = DASD_CQR_DONE; + break; + case DASD_CQR_ERROR: + cqr->status = DASD_CQR_NEED_ERP; + break; + case DASD_CQR_CLEARED: + cqr->status = DASD_CQR_TERMINATED; + break; + default: + /* internal error 12 - wrong cqr status*/ + snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); + dev_err(&device->cdev->dev, + "An error occurred in the DASD device driver, " + "reason=%s\n", errorstring); + BUG(); } - cqr->callback = dasd_end_request_cb; - cqr->callback_data = (void *) req; - cqr->status = DASD_CQR_QUEUED; - blkdev_dequeue_request(req); - list_add_tail(&cqr->list, &device->ccw_queue); - dasd_profile_start(device, cqr, req); - nr_queued++; + if (cqr->callback != NULL) + (callback)(cqr, callback_data); + if (block) + spin_unlock_bh(&block->queue_lock); } } @@ -1270,29 +1226,28 @@ __dasd_process_blk_queue(struct dasd_device * device) * Take a look at the first request on the ccw queue and check * if it reached its expire time. If so, terminate the IO. */ -static void -__dasd_check_expire(struct dasd_device * device) +static void __dasd_device_check_expire(struct dasd_device *device) { struct dasd_ccw_req *cqr; if (list_empty(&device->ccw_queue)) return; - cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); + cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { if (device->discipline->term_IO(cqr) != 0) { /* Hmpf, try again in 5 sec */ - dasd_set_timer(device, 5*HZ); - DEV_MESSAGE(KERN_ERR, device, - "internal error - timeout (%is) expired " - "for cqr %p, termination failed, " - "retrying in 5s", - (cqr->expires/HZ), cqr); + dev_err(&device->cdev->dev, + "cqr %p timed out (%is) but cannot be " + "ended, retrying in 5 s\n", + cqr, (cqr->expires/HZ)); + cqr->expires += 5*HZ; + dasd_device_set_timer(device, 5*HZ); } else { - DEV_MESSAGE(KERN_ERR, device, - "internal error - timeout (%is) expired " - "for cqr %p (%i retries left)", - (cqr->expires/HZ), cqr, cqr->retries); + dev_err(&device->cdev->dev, + "cqr %p timed out (%is), %i retries " + "remaining\n", cqr, (cqr->expires/HZ), + cqr->retries); } } } @@ -1301,176 +1256,121 @@ __dasd_check_expire(struct dasd_device * device) * Take a look at the first request on the ccw queue and check * if it needs to be started. */ -static void -__dasd_start_head(struct dasd_device * device) +static void __dasd_device_start_head(struct dasd_device *device) { struct dasd_ccw_req *cqr; int rc; if (list_empty(&device->ccw_queue)) return; - cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); + cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if (cqr->status != DASD_CQR_QUEUED) return; - /* Non-temporary stop condition will trigger fail fast */ - if (device->stopped & ~DASD_STOPPED_PENDING && - test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && - (!dasd_eer_enabled(device))) { - cqr->status = DASD_CQR_FAILED; - dasd_schedule_bh(device); + /* when device is stopped, return request to previous layer */ + if (device->stopped) { + cqr->status = DASD_CQR_CLEARED; + dasd_schedule_device_bh(device); return; } - /* Don't try to start requests if device is stopped */ - if (device->stopped) - return; rc = device->discipline->start_IO(cqr); if (rc == 0) - dasd_set_timer(device, cqr->expires); + dasd_device_set_timer(device, cqr->expires); else if (rc == -EACCES) { - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); } else /* Hmpf, try again in 1/2 sec */ - dasd_set_timer(device, 50); -} - -static inline int -_wait_for_clear(struct dasd_ccw_req *cqr) -{ - return (cqr->status == DASD_CQR_QUEUED); + dasd_device_set_timer(device, 50); } /* - * Remove all requests from the ccw queue (all = '1') or only block device - * requests in case all = '0'. - * Take care of the erp-chain (chained via cqr->refers) and remove either - * the whole erp-chain or none of the erp-requests. - * If a request is currently running, term_IO is called and the request - * is re-queued. Prior to removing the terminated request we need to wait - * for the clear-interrupt. - * In case termination is not possible we stop processing and just finishing - * the already moved requests. + * Go through all request on the dasd_device request queue, + * terminate them on the cdev if necessary, and return them to the + * submitting layer via callback. + * Note: + * Make sure that all 'submitting layers' still exist when + * this function is called!. In other words, when 'device' is a base + * device then all block layer requests must have been removed before + * via dasd_flush_block_queue. */ -static int -dasd_flush_ccw_queue(struct dasd_device * device, int all) +int dasd_flush_device_queue(struct dasd_device *device) { - struct dasd_ccw_req *cqr, *orig, *n; - int rc, i; - + struct dasd_ccw_req *cqr, *n; + int rc; struct list_head flush_queue; INIT_LIST_HEAD(&flush_queue); spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = 0; -restart: - list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { - /* get original request of erp request-chain */ - for (orig = cqr; orig->refers != NULL; orig = orig->refers); - - /* Flush all request or only block device requests? */ - if (all == 0 && cqr->callback != dasd_end_request_cb && - orig->callback != dasd_end_request_cb) { - continue; - } + list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { /* Check status and move request to flush_queue */ switch (cqr->status) { case DASD_CQR_IN_IO: rc = device->discipline->term_IO(cqr); if (rc) { /* unable to terminate requeust */ - DEV_MESSAGE(KERN_ERR, device, - "dasd flush ccw_queue is unable " - " to terminate request %p", - cqr); + dev_err(&device->cdev->dev, + "Flushing the DASD request queue " + "failed for request %p\n", cqr); /* stop flush processing */ goto finished; } break; case DASD_CQR_QUEUED: - case DASD_CQR_ERROR: - /* set request to FAILED */ cqr->stopclk = get_clock(); - cqr->status = DASD_CQR_FAILED; + cqr->status = DASD_CQR_CLEARED; break; - default: /* do not touch the others */ + default: /* no need to modify the others */ break; } - /* Rechain request (including erp chain) */ - for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { - cqr->endclk = get_clock(); - list_move_tail(&cqr->list, &flush_queue); - } - if (i > 1) - /* moved more than one request - need to restart */ - goto restart; + list_move_tail(&cqr->devlist, &flush_queue); } - finished: spin_unlock_irq(get_ccwdev_lock(device->cdev)); - /* Now call the callback function of flushed requests */ -restart_cb: - list_for_each_entry_safe(cqr, n, &flush_queue, list) { - if (cqr->status == DASD_CQR_CLEAR) { - /* wait for clear interrupt! */ - wait_event(dasd_flush_wq, _wait_for_clear(cqr)); - cqr->status = DASD_CQR_FAILED; - } - /* Process finished ERP request. */ - if (cqr->refers) { - __dasd_process_erp(device, cqr); - /* restart list_for_xx loop since dasd_process_erp - * might remove multiple elements */ - goto restart_cb; - } - /* call the callback function */ - cqr->endclk = get_clock(); - if (cqr->callback != NULL) - (cqr->callback)(cqr, cqr->callback_data); - } + /* + * After this point all requests must be in state CLEAR_PENDING, + * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become + * one of the others. + */ + list_for_each_entry_safe(cqr, n, &flush_queue, devlist) + wait_event(dasd_flush_wq, + (cqr->status != DASD_CQR_CLEAR_PENDING)); + /* + * Now set each request back to TERMINATED, DONE or NEED_ERP + * and call the callback function of flushed requests + */ + __dasd_device_process_final_queue(device, &flush_queue); return rc; } /* * Acquire the device lock and process queues for the device. */ -static void -dasd_tasklet(struct dasd_device * device) +static void dasd_device_tasklet(struct dasd_device *device) { struct list_head final_queue; - struct list_head *l, *n; - struct dasd_ccw_req *cqr; atomic_set (&device->tasklet_scheduled, 0); INIT_LIST_HEAD(&final_queue); spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Check expire time of first request on the ccw queue. */ - __dasd_check_expire(device); - /* Finish off requests on ccw queue */ - __dasd_process_ccw_queue(device, &final_queue); + __dasd_device_check_expire(device); + /* find final requests on ccw queue */ + __dasd_device_process_ccw_queue(device, &final_queue); spin_unlock_irq(get_ccwdev_lock(device->cdev)); /* Now call the callback function of requests with final status */ - list_for_each_safe(l, n, &final_queue) { - cqr = list_entry(l, struct dasd_ccw_req, list); - list_del_init(&cqr->list); - if (cqr->callback != NULL) - (cqr->callback)(cqr, cqr->callback_data); - } - spin_lock_irq(&device->request_queue_lock); - spin_lock(get_ccwdev_lock(device->cdev)); - /* Get new request from the block device request queue */ - __dasd_process_blk_queue(device); + __dasd_device_process_final_queue(device, &final_queue); + spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Now check if the head of the ccw queue needs to be started. */ - __dasd_start_head(device); - spin_unlock(get_ccwdev_lock(device->cdev)); - spin_unlock_irq(&device->request_queue_lock); + __dasd_device_start_head(device); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_put_device(device); } /* * Schedules a call to dasd_tasklet over the device tasklet. */ -void -dasd_schedule_bh(struct dasd_device * device) +void dasd_schedule_device_bh(struct dasd_device *device) { /* Protect against rescheduling. */ if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) @@ -1480,160 +1380,105 @@ dasd_schedule_bh(struct dasd_device * device) } /* - * Queue a request to the head of the ccw_queue. Start the I/O if - * possible. + * Queue a request to the head of the device ccw_queue. + * Start the I/O if possible. */ -void -dasd_add_request_head(struct dasd_ccw_req *req) +void dasd_add_request_head(struct dasd_ccw_req *cqr) { struct dasd_device *device; unsigned long flags; - device = req->device; + device = cqr->startdev; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); - req->status = DASD_CQR_QUEUED; - req->device = device; - list_add(&req->list, &device->ccw_queue); + cqr->status = DASD_CQR_QUEUED; + list_add(&cqr->devlist, &device->ccw_queue); /* let the bh start the request to keep them in order */ - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } /* - * Queue a request to the tail of the ccw_queue. Start the I/O if - * possible. + * Queue a request to the tail of the device ccw_queue. + * Start the I/O if possible. */ -void -dasd_add_request_tail(struct dasd_ccw_req *req) +void dasd_add_request_tail(struct dasd_ccw_req *cqr) { struct dasd_device *device; unsigned long flags; - device = req->device; + device = cqr->startdev; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); - req->status = DASD_CQR_QUEUED; - req->device = device; - list_add_tail(&req->list, &device->ccw_queue); + cqr->status = DASD_CQR_QUEUED; + list_add_tail(&cqr->devlist, &device->ccw_queue); /* let the bh start the request to keep them in order */ - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } /* - * Wakeup callback. + * Wakeup helper for the 'sleep_on' functions. */ -static void -dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) +static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) { wake_up((wait_queue_head_t *) data); } -static inline int -_wait_for_wakeup(struct dasd_ccw_req *cqr) +static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) { struct dasd_device *device; int rc; - device = cqr->device; + device = cqr->startdev; spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = ((cqr->status == DASD_CQR_DONE || - cqr->status == DASD_CQR_FAILED) && - list_empty(&cqr->list)); + cqr->status == DASD_CQR_NEED_ERP || + cqr->status == DASD_CQR_TERMINATED) && + list_empty(&cqr->devlist)); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } /* - * Attempts to start a special ccw queue and waits for its completion. + * Queue a request to the tail of the device ccw_queue and wait for + * it's completion. */ -int -dasd_sleep_on(struct dasd_ccw_req * cqr) +int dasd_sleep_on(struct dasd_ccw_req *cqr) { - wait_queue_head_t wait_q; struct dasd_device *device; int rc; - device = cqr->device; - spin_lock_irq(get_ccwdev_lock(device->cdev)); + device = cqr->startdev; - init_waitqueue_head (&wait_q); cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &wait_q; - cqr->status = DASD_CQR_QUEUED; - list_add_tail(&cqr->list, &device->ccw_queue); - - /* let the bh start the request to keep them in order */ - dasd_schedule_bh(device); - - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - - wait_event(wait_q, _wait_for_wakeup(cqr)); + cqr->callback_data = (void *) &generic_waitq; + dasd_add_request_tail(cqr); + wait_event(generic_waitq, _wait_for_wakeup(cqr)); /* Request status is either done or failed. */ - rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; + rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; return rc; } /* - * Attempts to start a special ccw queue and wait interruptible - * for its completion. + * Queue a request to the tail of the device ccw_queue and wait + * interruptible for it's completion. */ -int -dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) +int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) { - wait_queue_head_t wait_q; struct dasd_device *device; - int rc, finished; - - device = cqr->device; - spin_lock_irq(get_ccwdev_lock(device->cdev)); + int rc; - init_waitqueue_head (&wait_q); + device = cqr->startdev; cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &wait_q; - cqr->status = DASD_CQR_QUEUED; - list_add_tail(&cqr->list, &device->ccw_queue); - - /* let the bh start the request to keep them in order */ - dasd_schedule_bh(device); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - - finished = 0; - while (!finished) { - rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); - if (rc != -ERESTARTSYS) { - /* Request is final (done or failed) */ - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; - break; - } - spin_lock_irq(get_ccwdev_lock(device->cdev)); - switch (cqr->status) { - case DASD_CQR_IN_IO: - /* terminate runnig cqr */ - if (device->discipline->term_IO) { - cqr->retries = -1; - device->discipline->term_IO(cqr); - /* wait (non-interruptible) for final status - * because signal ist still pending */ - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - wait_event(wait_q, _wait_for_wakeup(cqr)); - spin_lock_irq(get_ccwdev_lock(device->cdev)); - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; - finished = 1; - } - break; - case DASD_CQR_QUEUED: - /* request */ - list_del_init(&cqr->list); - rc = -EIO; - finished = 1; - break; - default: - /* cqr with 'non-interruptable' status - just wait */ - break; - } - spin_unlock_irq(get_ccwdev_lock(device->cdev)); + cqr->callback_data = (void *) &generic_waitq; + dasd_add_request_tail(cqr); + rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); + if (rc == -ERESTARTSYS) { + dasd_cancel_req(cqr); + /* wait (non-interruptible) for final status */ + wait_event(generic_waitq, _wait_for_wakeup(cqr)); } + rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; return rc; } @@ -1643,25 +1488,22 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) * and be put back to status queued, before the special request is added * to the head of the queue. Then the special request is waited on normally. */ -static inline int -_dasd_term_running_cqr(struct dasd_device *device) +static inline int _dasd_term_running_cqr(struct dasd_device *device) { struct dasd_ccw_req *cqr; if (list_empty(&device->ccw_queue)) return 0; - cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); + cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); return device->discipline->term_IO(cqr); } -int -dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) +int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) { - wait_queue_head_t wait_q; struct dasd_device *device; int rc; - device = cqr->device; + device = cqr->startdev; spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = _dasd_term_running_cqr(device); if (rc) { @@ -1669,21 +1511,20 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) return rc; } - init_waitqueue_head (&wait_q); cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &wait_q; + cqr->callback_data = (void *) &generic_waitq; cqr->status = DASD_CQR_QUEUED; - list_add(&cqr->list, &device->ccw_queue); + list_add(&cqr->devlist, &device->ccw_queue); /* let the bh start the request to keep them in order */ - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); - wait_event(wait_q, _wait_for_wakeup(cqr)); + wait_event(generic_waitq, _wait_for_wakeup(cqr)); /* Request status is either done or failed. */ - rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; + rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; return rc; } @@ -1692,11 +1533,14 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) * This is useful to timeout requests. The request will be * terminated if it is currently in i/o. * Returns 1 if the request has been terminated. + * 0 if there was no need to terminate the request (not started yet) + * negative error code if termination failed + * Cancellation of a request is an asynchronous operation! The calling + * function has to wait until the request is properly returned via callback. */ -int -dasd_cancel_req(struct dasd_ccw_req *cqr) +int dasd_cancel_req(struct dasd_ccw_req *cqr) { - struct dasd_device *device = cqr->device; + struct dasd_device *device = cqr->startdev; unsigned long flags; int rc; @@ -1704,74 +1548,436 @@ dasd_cancel_req(struct dasd_ccw_req *cqr) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); switch (cqr->status) { case DASD_CQR_QUEUED: - /* request was not started - just set to failed */ - cqr->status = DASD_CQR_FAILED; + /* request was not started - just set to cleared */ + cqr->status = DASD_CQR_CLEARED; break; case DASD_CQR_IN_IO: /* request in IO - terminate IO and release again */ - if (device->discipline->term_IO(cqr) != 0) - /* what to do if unable to terminate ?????? - e.g. not _IN_IO */ - cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); - rc = 1; + rc = device->discipline->term_IO(cqr); + if (rc) { + dev_err(&device->cdev->dev, + "Cancelling request %p failed with rc=%d\n", + cqr, rc); + } else { + cqr->stopclk = get_clock(); + rc = 1; + } break; - case DASD_CQR_DONE: - case DASD_CQR_FAILED: - /* already finished - do nothing */ + default: /* already finished or clear pending - do nothing */ break; - default: - DEV_MESSAGE(KERN_ALERT, device, - "invalid status %02x in request", - cqr->status); - BUG(); - } spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); - dasd_schedule_bh(device); + dasd_schedule_device_bh(device); + return rc; +} + + +/* + * SECTION: Operations of the dasd_block layer. + */ + +/* + * Timeout function for dasd_block. This is used when the block layer + * is waiting for something that may not come reliably, (e.g. a state + * change interrupt) + */ +static void dasd_block_timeout(unsigned long ptr) +{ + unsigned long flags; + struct dasd_block *block; + + block = (struct dasd_block *) ptr; + spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); + /* re-activate request queue */ + block->base->stopped &= ~DASD_STOPPED_PENDING; + spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); + dasd_schedule_block_bh(block); +} + +/* + * Setup timeout for a dasd_block in jiffies. + */ +void dasd_block_set_timer(struct dasd_block *block, int expires) +{ + if (expires == 0) + del_timer(&block->timer); + else + mod_timer(&block->timer, jiffies + expires); +} + +/* + * Clear timeout for a dasd_block. + */ +void dasd_block_clear_timer(struct dasd_block *block) +{ + del_timer(&block->timer); +} + +/* + * Process finished error recovery ccw. + */ +static inline void __dasd_block_process_erp(struct dasd_block *block, + struct dasd_ccw_req *cqr) +{ + dasd_erp_fn_t erp_fn; + struct dasd_device *device = block->base; + + if (cqr->status == DASD_CQR_DONE) + DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); + else + dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); + erp_fn = device->discipline->erp_postaction(cqr); + erp_fn(cqr); +} + +/* + * Fetch requests from the block device queue. + */ +static void __dasd_process_request_queue(struct dasd_block *block) +{ + struct request_queue *queue; + struct request *req; + struct dasd_ccw_req *cqr; + struct dasd_device *basedev; + unsigned long flags; + queue = block->request_queue; + basedev = block->base; + /* No queue ? Then there is nothing to do. */ + if (queue == NULL) + return; + + /* + * We requeue request from the block device queue to the ccw + * queue only in two states. In state DASD_STATE_READY the + * partition detection is done and we need to requeue requests + * for that. State DASD_STATE_ONLINE is normal block device + * operation. + */ + if (basedev->state < DASD_STATE_READY) + return; + /* Now we try to fetch requests from the request queue */ + while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { + if (basedev->features & DASD_FEATURE_READONLY && + rq_data_dir(req) == WRITE) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "Rejecting write request %p", + req); + blk_start_request(req); + __blk_end_request_all(req, -EIO); + continue; + } + cqr = basedev->discipline->build_cp(basedev, block, req); + if (IS_ERR(cqr)) { + if (PTR_ERR(cqr) == -EBUSY) + break; /* normal end condition */ + if (PTR_ERR(cqr) == -ENOMEM) + break; /* terminate request queue loop */ + if (PTR_ERR(cqr) == -EAGAIN) { + /* + * The current request cannot be build right + * now, we have to try later. If this request + * is the head-of-queue we stop the device + * for 1/2 second. + */ + if (!list_empty(&block->ccw_queue)) + break; + spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); + basedev->stopped |= DASD_STOPPED_PENDING; + spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); + dasd_block_set_timer(block, HZ/2); + break; + } + DBF_DEV_EVENT(DBF_ERR, basedev, + "CCW creation failed (rc=%ld) " + "on request %p", + PTR_ERR(cqr), req); + blk_start_request(req); + __blk_end_request_all(req, -EIO); + continue; + } + /* + * Note: callback is set to dasd_return_cqr_cb in + * __dasd_block_start_head to cover erp requests as well + */ + cqr->callback_data = (void *) req; + cqr->status = DASD_CQR_FILLED; + blk_start_request(req); + list_add_tail(&cqr->blocklist, &block->ccw_queue); + dasd_profile_start(block, cqr, req); + } +} + +static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) +{ + struct request *req; + int status; + int error = 0; + + req = (struct request *) cqr->callback_data; + dasd_profile_end(cqr->block, cqr, req); + status = cqr->block->base->discipline->free_cp(cqr, req); + if (status <= 0) + error = status ? status : -EIO; + __blk_end_request_all(req, error); +} + +/* + * Process ccw request queue. + */ +static void __dasd_process_block_ccw_queue(struct dasd_block *block, + struct list_head *final_queue) +{ + struct list_head *l, *n; + struct dasd_ccw_req *cqr; + dasd_erp_fn_t erp_fn; + unsigned long flags; + struct dasd_device *base = block->base; + +restart: + /* Process request with final status. */ + list_for_each_safe(l, n, &block->ccw_queue) { + cqr = list_entry(l, struct dasd_ccw_req, blocklist); + if (cqr->status != DASD_CQR_DONE && + cqr->status != DASD_CQR_FAILED && + cqr->status != DASD_CQR_NEED_ERP && + cqr->status != DASD_CQR_TERMINATED) + continue; + + if (cqr->status == DASD_CQR_TERMINATED) { + base->discipline->handle_terminated_request(cqr); + goto restart; + } + + /* Process requests that may be recovered */ + if (cqr->status == DASD_CQR_NEED_ERP) { + erp_fn = base->discipline->erp_action(cqr); + erp_fn(cqr); + goto restart; + } + + /* log sense for fatal error */ + if (cqr->status == DASD_CQR_FAILED) { + dasd_log_sense(cqr, &cqr->irb); + } + + /* First of all call extended error reporting. */ + if (dasd_eer_enabled(base) && + cqr->status == DASD_CQR_FAILED) { + dasd_eer_write(base, cqr, DASD_EER_FATALERROR); + + /* restart request */ + cqr->status = DASD_CQR_FILLED; + cqr->retries = 255; + spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); + base->stopped |= DASD_STOPPED_QUIESCE; + spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), + flags); + goto restart; + } + + /* Process finished ERP request. */ + if (cqr->refers) { + __dasd_block_process_erp(block, cqr); + goto restart; + } + + /* Rechain finished requests to final queue */ + cqr->endclk = get_clock(); + list_move_tail(&cqr->blocklist, final_queue); + } +} + +static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) +{ + dasd_schedule_block_bh(cqr->block); +} + +static void __dasd_block_start_head(struct dasd_block *block) +{ + struct dasd_ccw_req *cqr; + + if (list_empty(&block->ccw_queue)) + return; + /* We allways begin with the first requests on the queue, as some + * of previously started requests have to be enqueued on a + * dasd_device again for error recovery. + */ + list_for_each_entry(cqr, &block->ccw_queue, blocklist) { + if (cqr->status != DASD_CQR_FILLED) + continue; + /* Non-temporary stop condition will trigger fail fast */ + if (block->base->stopped & ~DASD_STOPPED_PENDING && + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && + (!dasd_eer_enabled(block->base))) { + cqr->status = DASD_CQR_FAILED; + dasd_schedule_block_bh(block); + continue; + } + /* Don't try to start requests if device is stopped */ + if (block->base->stopped) + return; + + /* just a fail safe check, should not happen */ + if (!cqr->startdev) + cqr->startdev = block->base; + + /* make sure that the requests we submit find their way back */ + cqr->callback = dasd_return_cqr_cb; + + dasd_add_request_tail(cqr); + } +} + +/* + * Central dasd_block layer routine. Takes requests from the generic + * block layer request queue, creates ccw requests, enqueues them on + * a dasd_device and processes ccw requests that have been returned. + */ +static void dasd_block_tasklet(struct dasd_block *block) +{ + struct list_head final_queue; + struct list_head *l, *n; + struct dasd_ccw_req *cqr; + + atomic_set(&block->tasklet_scheduled, 0); + INIT_LIST_HEAD(&final_queue); + spin_lock(&block->queue_lock); + /* Finish off requests on ccw queue */ + __dasd_process_block_ccw_queue(block, &final_queue); + spin_unlock(&block->queue_lock); + /* Now call the callback function of requests with final status */ + spin_lock_irq(&block->request_queue_lock); + list_for_each_safe(l, n, &final_queue) { + cqr = list_entry(l, struct dasd_ccw_req, blocklist); + list_del_init(&cqr->blocklist); + __dasd_cleanup_cqr(cqr); + } + spin_lock(&block->queue_lock); + /* Get new request from the block device request queue */ + __dasd_process_request_queue(block); + /* Now check if the head of the ccw queue needs to be started. */ + __dasd_block_start_head(block); + spin_unlock(&block->queue_lock); + spin_unlock_irq(&block->request_queue_lock); + dasd_put_device(block->base); +} + +static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) +{ + wake_up(&dasd_flush_wq); +} + +/* + * Go through all request on the dasd_block request queue, cancel them + * on the respective dasd_device, and return them to the generic + * block layer. + */ +static int dasd_flush_block_queue(struct dasd_block *block) +{ + struct dasd_ccw_req *cqr, *n; + int rc, i; + struct list_head flush_queue; + + INIT_LIST_HEAD(&flush_queue); + spin_lock_bh(&block->queue_lock); + rc = 0; +restart: + list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { + /* if this request currently owned by a dasd_device cancel it */ + if (cqr->status >= DASD_CQR_QUEUED) + rc = dasd_cancel_req(cqr); + if (rc < 0) + break; + /* Rechain request (including erp chain) so it won't be + * touched by the dasd_block_tasklet anymore. + * Replace the callback so we notice when the request + * is returned from the dasd_device layer. + */ + cqr->callback = _dasd_wake_block_flush_cb; + for (i = 0; cqr != NULL; cqr = cqr->refers, i++) + list_move_tail(&cqr->blocklist, &flush_queue); + if (i > 1) + /* moved more than one request - need to restart */ + goto restart; + } + spin_unlock_bh(&block->queue_lock); + /* Now call the callback function of flushed requests */ +restart_cb: + list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { + wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); + /* Process finished ERP request. */ + if (cqr->refers) { + spin_lock_bh(&block->queue_lock); + __dasd_block_process_erp(block, cqr); + spin_unlock_bh(&block->queue_lock); + /* restart list_for_xx loop since dasd_process_erp + * might remove multiple elements */ + goto restart_cb; + } + /* call the callback function */ + spin_lock_irq(&block->request_queue_lock); + cqr->endclk = get_clock(); + list_del_init(&cqr->blocklist); + __dasd_cleanup_cqr(cqr); + spin_unlock_irq(&block->request_queue_lock); + } return rc; } /* - * SECTION: Block device operations (request queue, partitions, open, release). + * Schedules a call to dasd_tasklet over the device tasklet. + */ +void dasd_schedule_block_bh(struct dasd_block *block) +{ + /* Protect against rescheduling. */ + if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) + return; + /* life cycle of block is bound to it's base device */ + dasd_get_device(block->base); + tasklet_hi_schedule(&block->tasklet); +} + + +/* + * SECTION: external block device operations + * (request queue handling, open, release, etc.) */ /* * Dasd request queue function. Called from ll_rw_blk.c */ -static void -do_dasd_request(request_queue_t * queue) +static void do_dasd_request(struct request_queue *queue) { - struct dasd_device *device; + struct dasd_block *block; - device = (struct dasd_device *) queue->queuedata; - spin_lock(get_ccwdev_lock(device->cdev)); + block = queue->queuedata; + spin_lock(&block->queue_lock); /* Get new request from the block device request queue */ - __dasd_process_blk_queue(device); + __dasd_process_request_queue(block); /* Now check if the head of the ccw queue needs to be started. */ - __dasd_start_head(device); - spin_unlock(get_ccwdev_lock(device->cdev)); + __dasd_block_start_head(block); + spin_unlock(&block->queue_lock); } /* * Allocate and initialize request queue and default I/O scheduler. */ -static int -dasd_alloc_queue(struct dasd_device * device) +static int dasd_alloc_queue(struct dasd_block *block) { int rc; - device->request_queue = blk_init_queue(do_dasd_request, - &device->request_queue_lock); - if (device->request_queue == NULL) + block->request_queue = blk_init_queue(do_dasd_request, + &block->request_queue_lock); + if (block->request_queue == NULL) return -ENOMEM; - device->request_queue->queuedata = device; + block->request_queue->queuedata = block; - elevator_exit(device->request_queue->elevator); - rc = elevator_init(device->request_queue, "deadline"); + elevator_exit(block->request_queue->elevator); + block->request_queue->elevator = NULL; + rc = elevator_init(block->request_queue, "deadline"); if (rc) { - blk_cleanup_queue(device->request_queue); + blk_cleanup_queue(block->request_queue); return rc; } return 0; @@ -1780,79 +1986,77 @@ dasd_alloc_queue(struct dasd_device * device) /* * Allocate and initialize request queue. */ -static void -dasd_setup_queue(struct dasd_device * device) +static void dasd_setup_queue(struct dasd_block *block) { int max; - blk_queue_hardsect_size(device->request_queue, device->bp_block); - max = device->discipline->max_blocks << device->s2b_shift; - blk_queue_max_sectors(device->request_queue, max); - blk_queue_max_phys_segments(device->request_queue, -1L); - blk_queue_max_hw_segments(device->request_queue, -1L); - blk_queue_max_segment_size(device->request_queue, -1L); - blk_queue_segment_boundary(device->request_queue, -1L); - blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); + blk_queue_hardsect_size(block->request_queue, block->bp_block); + max = block->base->discipline->max_blocks << block->s2b_shift; + blk_queue_max_sectors(block->request_queue, max); + blk_queue_max_phys_segments(block->request_queue, -1L); + blk_queue_max_hw_segments(block->request_queue, -1L); + /* with page sized segments we can translate each segement into + * one idaw/tidaw + */ + blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); + blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); + blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); } /* * Deactivate and free request queue. */ -static void -dasd_free_queue(struct dasd_device * device) +static void dasd_free_queue(struct dasd_block *block) { - if (device->request_queue) { - blk_cleanup_queue(device->request_queue); - device->request_queue = NULL; + if (block->request_queue) { + blk_cleanup_queue(block->request_queue); + block->request_queue = NULL; } } /* * Flush request on the request queue. */ -static void -dasd_flush_request_queue(struct dasd_device * device) +static void dasd_flush_request_queue(struct dasd_block *block) { struct request *req; - if (!device->request_queue) + if (!block->request_queue) return; - spin_lock_irq(&device->request_queue_lock); - while ((req = elv_next_request(device->request_queue))) { - blkdev_dequeue_request(req); - dasd_end_request(req, 0); - } - spin_unlock_irq(&device->request_queue_lock); + spin_lock_irq(&block->request_queue_lock); + while ((req = blk_fetch_request(block->request_queue))) + __blk_end_request_all(req, -EIO); + spin_unlock_irq(&block->request_queue_lock); } -static int -dasd_open(struct inode *inp, struct file *filp) +static int dasd_open(struct block_device *bdev, fmode_t mode) { - struct gendisk *disk = inp->i_bdev->bd_disk; - struct dasd_device *device = disk->private_data; + struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_device *base = block->base; int rc; - atomic_inc(&device->open_count); - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + atomic_inc(&block->open_count); + if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { rc = -ENODEV; goto unlock; } - if (!try_module_get(device->discipline->owner)) { + if (!try_module_get(base->discipline->owner)) { rc = -EINVAL; goto unlock; } if (dasd_probeonly) { - DEV_MESSAGE(KERN_INFO, device, "%s", - "No access to device due to probeonly mode"); + dev_info(&base->cdev->dev, + "Accessing the DASD failed because it is in " + "probeonly mode\n"); rc = -EPERM; goto out; } - if (device->state <= DASD_STATE_BASIC) { - DBF_DEV_EVENT(DBF_ERR, device, " %s", + if (base->state <= DASD_STATE_BASIC) { + DBF_DEV_EVENT(DBF_ERR, base, " %s", " Cannot open unrecognized device"); rc = -ENODEV; goto out; @@ -1861,41 +2065,40 @@ dasd_open(struct inode *inp, struct file *filp) return 0; out: - module_put(device->discipline->owner); + module_put(base->discipline->owner); unlock: - atomic_dec(&device->open_count); + atomic_dec(&block->open_count); return rc; } -static int -dasd_release(struct inode *inp, struct file *filp) +static int dasd_release(struct gendisk *disk, fmode_t mode) { - struct gendisk *disk = inp->i_bdev->bd_disk; - struct dasd_device *device = disk->private_data; + struct dasd_block *block = disk->private_data; - atomic_dec(&device->open_count); - module_put(device->discipline->owner); + atomic_dec(&block->open_count); + module_put(block->base->discipline->owner); return 0; } /* * Return disk geometry. */ -static int -dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { - struct dasd_device *device; + struct dasd_block *block; + struct dasd_device *base; - device = bdev->bd_disk->private_data; - if (!device) + block = bdev->bd_disk->private_data; + base = block->base; + if (!block) return -ENODEV; - if (!device->discipline || - !device->discipline->fill_geometry) + if (!base->discipline || + !base->discipline->fill_geometry) return -EINVAL; - device->discipline->fill_geometry(device, geo); - geo->start = get_start_sect(bdev) >> device->s2b_shift; + base->discipline->fill_geometry(block, geo); + geo->start = get_start_sect(bdev) >> block->s2b_shift; return 0; } @@ -1905,10 +2108,13 @@ dasd_device_operations = { .open = dasd_open, .release = dasd_release, .ioctl = dasd_ioctl, - .compat_ioctl = dasd_compat_ioctl, + .compat_ioctl = dasd_ioctl, .getgeo = dasd_getgeo, }; +/******************************************************************************* + * end of block device operations + */ static void dasd_exit(void) @@ -1933,28 +2139,43 @@ dasd_exit(void) * SECTION: common functions for ccw_driver use */ +static void dasd_generic_auto_online(void *data, async_cookie_t cookie) +{ + struct ccw_device *cdev = data; + int ret; + + ret = ccw_device_set_online(cdev); + if (ret) + pr_warning("%s: Setting the DASD online failed with rc=%d\n", + dev_name(&cdev->dev), ret); + else { + struct dasd_device *device = dasd_device_from_cdev(cdev); + wait_event(dasd_init_waitq, _wait_for_device(device)); + dasd_put_device(device); + } +} + /* * Initial attempt at a probe function. this can be simplified once * the other detection code is gone. */ -int -dasd_generic_probe (struct ccw_device *cdev, - struct dasd_discipline *discipline) +int dasd_generic_probe(struct ccw_device *cdev, + struct dasd_discipline *discipline) { int ret; ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); if (ret) { - printk(KERN_WARNING + DBF_EVENT(DBF_WARNING, "dasd_generic_probe: could not set ccw-device options " - "for %s\n", cdev->dev.bus_id); + "for %s\n", dev_name(&cdev->dev)); return ret; } ret = dasd_add_sysfs_files(cdev); if (ret) { - printk(KERN_WARNING + DBF_EVENT(DBF_WARNING, "dasd_generic_probe: could not add sysfs entries " - "for %s\n", cdev->dev.bus_id); + "for %s\n", dev_name(&cdev->dev)); return ret; } cdev->handler = &dasd_int_handler; @@ -1965,23 +2186,19 @@ dasd_generic_probe (struct ccw_device *cdev, * initial probe. */ if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || - (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) - ret = ccw_device_set_online(cdev); - if (ret) - printk(KERN_WARNING - "dasd_generic_probe: could not initially online " - "ccw-device %s\n", cdev->dev.bus_id); - return ret; + (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) + async_schedule(dasd_generic_auto_online, cdev); + return 0; } /* * This will one day be called from a global not_oper handler. * It is also used by driver_unregister during module unload. */ -void -dasd_generic_remove (struct ccw_device *cdev) +void dasd_generic_remove(struct ccw_device *cdev) { struct dasd_device *device; + struct dasd_block *block; cdev->handler = NULL; @@ -2001,7 +2218,15 @@ dasd_generic_remove (struct ccw_device *cdev) */ dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ + block = device->block; + device->block = NULL; dasd_delete_device(device); + /* + * life cycle of block is bound to device, so delete it after + * device was safely removed + */ + if (block) + dasd_free_block(block); } /* @@ -2009,10 +2234,8 @@ dasd_generic_remove (struct ccw_device *cdev) * the device is detected for the first time and is supposed to be used * or the user has started activation through sysfs. */ -int -dasd_generic_set_online (struct ccw_device *cdev, - struct dasd_discipline *base_discipline) - +int dasd_generic_set_online(struct ccw_device *cdev, + struct dasd_discipline *base_discipline) { struct dasd_discipline *discipline; struct dasd_device *device; @@ -2027,10 +2250,9 @@ dasd_generic_set_online (struct ccw_device *cdev, discipline = base_discipline; if (device->features & DASD_FEATURE_USEDIAG) { if (!dasd_diag_discipline_pointer) { - printk (KERN_WARNING - "dasd_generic couldn't online device %s " - "- discipline DIAG not available\n", - cdev->dev.bus_id); + pr_warning("%s Setting the DASD online failed because " + "of missing DIAG discipline\n", + dev_name(&cdev->dev)); dasd_delete_device(device); return -ENODEV; } @@ -2048,12 +2270,12 @@ dasd_generic_set_online (struct ccw_device *cdev, device->base_discipline = base_discipline; device->discipline = discipline; + /* check_device will allocate block device if necessary */ rc = discipline->check_device(device); if (rc) { - printk (KERN_WARNING - "dasd_generic couldn't online device %s " - "with discipline %s rc=%i\n", - cdev->dev.bus_id, discipline->name, rc); + pr_warning("%s Setting the DASD online with discipline %s " + "failed with rc=%i\n", + dev_name(&cdev->dev), discipline->name, rc); module_put(discipline->owner); module_put(base_discipline->owner); dasd_delete_device(device); @@ -2062,29 +2284,24 @@ dasd_generic_set_online (struct ccw_device *cdev, dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) { - printk (KERN_WARNING - "dasd_generic discipline not found for %s\n", - cdev->dev.bus_id); + pr_warning("%s Setting the DASD online failed because of a " + "missing discipline\n", dev_name(&cdev->dev)); rc = -ENODEV; dasd_set_target_state(device, DASD_STATE_NEW); + if (device->block) + dasd_free_block(device->block); dasd_delete_device(device); } else pr_debug("dasd_generic device %s found\n", - cdev->dev.bus_id); - - /* FIXME: we have to wait for the root device but we don't want - * to wait for each single device but for all at once. */ - wait_event(dasd_init_waitq, _wait_for_device(device)); - + dev_name(&cdev->dev)); dasd_put_device(device); - return rc; } -int -dasd_generic_set_offline (struct ccw_device *cdev) +int dasd_generic_set_offline(struct ccw_device *cdev) { struct dasd_device *device; + struct dasd_block *block; int max_count, open_count; device = dasd_device_from_cdev(cdev); @@ -2101,43 +2318,50 @@ dasd_generic_set_offline (struct ccw_device *cdev) * the blkdev_get in dasd_scan_partitions. We are only interested * in the other openers. */ - max_count = device->bdev ? 0 : -1; - open_count = (int) atomic_read(&device->open_count); - if (open_count > max_count) { - if (open_count > 0) - printk (KERN_WARNING "Can't offline dasd device with " - "open count = %i.\n", - open_count); - else - printk (KERN_WARNING "%s", - "Can't offline dasd device due to internal " - "use\n"); - clear_bit(DASD_FLAG_OFFLINE, &device->flags); - dasd_put_device(device); - return -EBUSY; + if (device->block) { + max_count = device->block->bdev ? 0 : -1; + open_count = atomic_read(&device->block->open_count); + if (open_count > max_count) { + if (open_count > 0) + pr_warning("%s: The DASD cannot be set offline " + "with open count %i\n", + dev_name(&cdev->dev), open_count); + else + pr_warning("%s: The DASD cannot be set offline " + "while it is in use\n", + dev_name(&cdev->dev)); + clear_bit(DASD_FLAG_OFFLINE, &device->flags); + dasd_put_device(device); + return -EBUSY; + } } dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ + block = device->block; + device->block = NULL; dasd_delete_device(device); - + /* + * life cycle of block is bound to device, so delete it after + * device was safely removed + */ + if (block) + dasd_free_block(block); return 0; } -int -dasd_generic_notify(struct ccw_device *cdev, int event) +int dasd_generic_notify(struct ccw_device *cdev, int event) { struct dasd_device *device; struct dasd_ccw_req *cqr; - unsigned long flags; int ret; - device = dasd_device_from_cdev(cdev); + device = dasd_device_from_cdev_locked(cdev); if (IS_ERR(device)) return 0; - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); ret = 0; switch (event) { case CIO_GONE: + case CIO_BOXED: case CIO_NO_PATH: /* First of all call extended error reporting. */ dasd_eer_write(device, NULL, DASD_EER_NOPATH); @@ -2145,38 +2369,33 @@ dasd_generic_notify(struct ccw_device *cdev, int event) if (device->state < DASD_STATE_BASIC) break; /* Device is active. We want to keep it. */ - if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { - list_for_each_entry(cqr, &device->ccw_queue, list) - if (cqr->status == DASD_CQR_IN_IO) - cqr->status = DASD_CQR_FAILED; - device->stopped |= DASD_STOPPED_DC_EIO; - } else { - list_for_each_entry(cqr, &device->ccw_queue, list) - if (cqr->status == DASD_CQR_IN_IO) { - cqr->status = DASD_CQR_QUEUED; - cqr->retries++; - } - device->stopped |= DASD_STOPPED_DC_WAIT; - dasd_set_timer(device, 0); - } - dasd_schedule_bh(device); + list_for_each_entry(cqr, &device->ccw_queue, devlist) + if (cqr->status == DASD_CQR_IN_IO) { + cqr->status = DASD_CQR_QUEUED; + cqr->retries++; + } + device->stopped |= DASD_STOPPED_DC_WAIT; + dasd_device_clear_timer(device); + dasd_schedule_device_bh(device); ret = 1; break; case CIO_OPER: /* FIXME: add a sanity check. */ - device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); - dasd_schedule_bh(device); + device->stopped &= ~DASD_STOPPED_DC_WAIT; + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); ret = 1; break; } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); dasd_put_device(device); return ret; } -struct dasd_ccw_req * dasd_generic_build_rdc(struct dasd_device *device, - void *rdc_buffer, - int rdc_buffer_size, char *magic) +static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, + void *rdc_buffer, + int rdc_buffer_size, + char *magic) { struct dasd_ccw_req *cqr; struct ccw1 *ccw; @@ -2184,8 +2403,10 @@ struct dasd_ccw_req * dasd_generic_build_rdc(struct dasd_device *device, cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); if (IS_ERR(cqr)) { - DEV_MESSAGE(KERN_WARNING, device, "%s", - "Could not allocate RDC request"); + /* internal error 13 - Allocating the RDC request failed*/ + dev_err(&device->cdev->dev, + "An error occurred in the DASD device driver, " + "reason=%s\n", "13"); return cqr; } @@ -2194,7 +2415,8 @@ struct dasd_ccw_req * dasd_generic_build_rdc(struct dasd_device *device, ccw->cda = (__u32)(addr_t)rdc_buffer; ccw->count = rdc_buffer_size; - cqr->device = device; + cqr->startdev = device; + cqr->memdev = device; cqr->expires = 10*HZ; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); cqr->retries = 2; @@ -2216,21 +2438,55 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, return PTR_ERR(cqr); ret = dasd_sleep_on(cqr); - dasd_sfree_request(cqr, cqr->device); + dasd_sfree_request(cqr, cqr->memdev); return ret; } EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); -static int __init -dasd_init(void) +/* + * In command mode and transport mode we need to look for sense + * data in different places. The sense data itself is allways + * an array of 32 bytes, so we can unify the sense data access + * for both modes. + */ +char *dasd_get_sense(struct irb *irb) +{ + struct tsb *tsb = NULL; + char *sense = NULL; + + if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { + if (irb->scsw.tm.tcw) + tsb = tcw_get_tsb((struct tcw *)(unsigned long) + irb->scsw.tm.tcw); + if (tsb && tsb->length == 64 && tsb->flags) + switch (tsb->flags & 0x07) { + case 1: /* tsa_iostat */ + sense = tsb->tsa.iostat.sense; + break; + case 2: /* tsa_ddpc */ + sense = tsb->tsa.ddpc.sense; + break; + default: + /* currently we don't use interrogate data */ + break; + } + } else if (irb->esw.esw0.erw.cons) { + sense = irb->ecw; + } + return sense; +} +EXPORT_SYMBOL_GPL(dasd_get_sense); + +static int __init dasd_init(void) { int rc; init_waitqueue_head(&dasd_init_waitq); init_waitqueue_head(&dasd_flush_wq); + init_waitqueue_head(&generic_waitq); /* register 'common' DASD debug area, used for all DBF_XXX calls */ - dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); + dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); if (dasd_debug_area == NULL) { rc = -ENOMEM; goto failed; @@ -2262,7 +2518,7 @@ dasd_init(void) return 0; failed: - MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); + pr_info("The DASD device driver could not be initialized\n"); dasd_exit(); return rc; } @@ -2276,15 +2532,18 @@ EXPORT_SYMBOL(dasd_diag_discipline_pointer); EXPORT_SYMBOL(dasd_add_request_head); EXPORT_SYMBOL(dasd_add_request_tail); EXPORT_SYMBOL(dasd_cancel_req); -EXPORT_SYMBOL(dasd_clear_timer); +EXPORT_SYMBOL(dasd_device_clear_timer); +EXPORT_SYMBOL(dasd_block_clear_timer); EXPORT_SYMBOL(dasd_enable_device); EXPORT_SYMBOL(dasd_int_handler); EXPORT_SYMBOL(dasd_kfree_request); EXPORT_SYMBOL(dasd_kick_device); EXPORT_SYMBOL(dasd_kmalloc_request); -EXPORT_SYMBOL(dasd_schedule_bh); +EXPORT_SYMBOL(dasd_schedule_device_bh); +EXPORT_SYMBOL(dasd_schedule_block_bh); EXPORT_SYMBOL(dasd_set_target_state); -EXPORT_SYMBOL(dasd_set_timer); +EXPORT_SYMBOL(dasd_device_set_timer); +EXPORT_SYMBOL(dasd_block_set_timer); EXPORT_SYMBOL(dasd_sfree_request); EXPORT_SYMBOL(dasd_sleep_on); EXPORT_SYMBOL(dasd_sleep_on_immediatly); @@ -2298,4 +2557,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove); EXPORT_SYMBOL_GPL(dasd_generic_notify); EXPORT_SYMBOL_GPL(dasd_generic_set_online); EXPORT_SYMBOL_GPL(dasd_generic_set_offline); - +EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); +EXPORT_SYMBOL_GPL(dasd_flush_device_queue); +EXPORT_SYMBOL_GPL(dasd_alloc_block); +EXPORT_SYMBOL_GPL(dasd_free_block);