2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
12 #include <linux/config.h>
13 #include <linux/kmod.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/ctype.h>
17 #include <linux/major.h>
18 #include <linux/slab.h>
19 #include <linux/buffer_head.h>
20 #include <linux/hdreg.h>
21 #include <linux/notifier.h>
23 #include <asm/ccwdev.h>
24 #include <asm/ebcdic.h>
25 #include <asm/idals.h>
26 #include <asm/todclk.h>
29 #define PRINTK_HEADER "dasd:"
33 * SECTION: Constant definitions to be used within this file
35 #define DASD_CHANQ_MAX_SIZE 4
38 * SECTION: exported variables of dasd.c
40 debug_info_t *dasd_debug_area;
41 struct dasd_discipline *dasd_diag_discipline_pointer;
43 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
44 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
45 " Copyright 2000 IBM Corporation");
46 MODULE_SUPPORTED_DEVICE("dasd");
47 MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
48 MODULE_LICENSE("GPL");
51 * SECTION: prototypes for static functions of dasd.c
53 static int dasd_alloc_queue(struct dasd_device * device);
54 static void dasd_setup_queue(struct dasd_device * device);
55 static void dasd_free_queue(struct dasd_device * device);
56 static void dasd_flush_request_queue(struct dasd_device *);
57 static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
58 static void dasd_flush_ccw_queue(struct dasd_device *, int);
59 static void dasd_tasklet(struct dasd_device *);
60 static void do_kick_device(void *data);
61 static void dasd_disable_eer(struct dasd_device *device);
64 * SECTION: Operations on the device structure.
66 static wait_queue_head_t dasd_init_waitq;
69 * Allocate memory for a new device structure.
72 dasd_alloc_device(void)
74 struct dasd_device *device;
76 device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
78 return ERR_PTR(-ENOMEM);
79 memset(device, 0, sizeof (struct dasd_device));
80 /* open_count = 0 means device online but not in use */
81 atomic_set(&device->open_count, -1);
83 /* Get two pages for normal block device operations. */
84 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
85 if (device->ccw_mem == NULL) {
87 return ERR_PTR(-ENOMEM);
89 /* Get one page for error recovery. */
90 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
91 if (device->erp_mem == NULL) {
92 free_pages((unsigned long) device->ccw_mem, 1);
94 return ERR_PTR(-ENOMEM);
97 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
98 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
99 spin_lock_init(&device->mem_lock);
100 spin_lock_init(&device->request_queue_lock);
101 atomic_set (&device->tasklet_scheduled, 0);
102 tasklet_init(&device->tasklet,
103 (void (*)(unsigned long)) dasd_tasklet,
104 (unsigned long) device);
105 INIT_LIST_HEAD(&device->ccw_queue);
106 init_timer(&device->timer);
107 INIT_WORK(&device->kick_work, do_kick_device, device);
108 device->state = DASD_STATE_NEW;
109 device->target = DASD_STATE_NEW;
115 * Free memory of a device structure.
118 dasd_free_device(struct dasd_device *device)
120 kfree(device->private);
121 free_page((unsigned long) device->erp_mem);
122 free_pages((unsigned long) device->ccw_mem, 1);
127 * Make a new device known to the system.
130 dasd_state_new_to_known(struct dasd_device *device)
135 * As long as the device is not in state DASD_STATE_NEW we want to
136 * keep the reference count > 0.
138 dasd_get_device(device);
140 rc = dasd_alloc_queue(device);
142 dasd_put_device(device);
146 device->state = DASD_STATE_KNOWN;
151 * Let the system forget about a device.
154 dasd_state_known_to_new(struct dasd_device * device)
156 /* disable extended error reporting for this device */
157 dasd_disable_eer(device);
158 /* Forget the discipline information. */
159 if (device->discipline)
160 module_put(device->discipline->owner);
161 device->discipline = NULL;
162 if (device->base_discipline)
163 module_put(device->base_discipline->owner);
164 device->base_discipline = NULL;
165 device->state = DASD_STATE_NEW;
167 dasd_free_queue(device);
169 /* Give up reference we took in dasd_state_new_to_known. */
170 dasd_put_device(device);
174 * Request the irq line for the device.
177 dasd_state_known_to_basic(struct dasd_device * device)
181 /* Allocate and register gendisk structure. */
182 rc = dasd_gendisk_alloc(device);
186 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
187 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
189 debug_register_view(device->debug_area, &debug_sprintf_view);
190 debug_set_level(device->debug_area, DBF_EMERG);
191 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
193 device->state = DASD_STATE_BASIC;
198 * Release the irq line for the device. Terminate any running i/o.
201 dasd_state_basic_to_known(struct dasd_device * device)
203 dasd_gendisk_free(device);
204 dasd_flush_ccw_queue(device, 1);
205 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
206 if (device->debug_area != NULL) {
207 debug_unregister(device->debug_area);
208 device->debug_area = NULL;
210 device->state = DASD_STATE_KNOWN;
214 * Do the initial analysis. The do_analysis function may return
215 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
216 * until the discipline decides to continue the startup sequence
217 * by calling the function dasd_change_state. The eckd disciplines
218 * uses this to start a ccw that detects the format. The completion
219 * interrupt for this detection ccw uses the kernel event daemon to
220 * trigger the call to dasd_change_state. All this is done in the
221 * discipline code, see dasd_eckd.c.
222 * After the analysis ccw is done (do_analysis returned 0 or error)
223 * the block device is setup. Either a fake disk is added to allow
224 * formatting or a proper device request queue is created.
227 dasd_state_basic_to_ready(struct dasd_device * device)
232 if (device->discipline->do_analysis != NULL)
233 rc = device->discipline->do_analysis(device);
236 dasd_setup_queue(device);
237 device->state = DASD_STATE_READY;
238 if (dasd_scan_partitions(device) != 0)
239 device->state = DASD_STATE_BASIC;
244 * Remove device from block device layer. Destroy dirty buffers.
245 * Forget format information. Check if the target level is basic
246 * and if it is create fake disk for formatting.
249 dasd_state_ready_to_basic(struct dasd_device * device)
251 dasd_flush_ccw_queue(device, 0);
252 dasd_destroy_partitions(device);
253 dasd_flush_request_queue(device);
255 device->bp_block = 0;
256 device->s2b_shift = 0;
257 device->state = DASD_STATE_BASIC;
261 * Make the device online and schedule the bottom half to start
262 * the requeueing of requests from the linux request queue to the
266 dasd_state_ready_to_online(struct dasd_device * device)
268 device->state = DASD_STATE_ONLINE;
269 dasd_schedule_bh(device);
274 * Stop the requeueing of requests again.
277 dasd_state_online_to_ready(struct dasd_device * device)
279 device->state = DASD_STATE_READY;
283 * Device startup state changes.
286 dasd_increase_state(struct dasd_device *device)
291 if (device->state == DASD_STATE_NEW &&
292 device->target >= DASD_STATE_KNOWN)
293 rc = dasd_state_new_to_known(device);
296 device->state == DASD_STATE_KNOWN &&
297 device->target >= DASD_STATE_BASIC)
298 rc = dasd_state_known_to_basic(device);
301 device->state == DASD_STATE_BASIC &&
302 device->target >= DASD_STATE_READY)
303 rc = dasd_state_basic_to_ready(device);
306 device->state == DASD_STATE_READY &&
307 device->target >= DASD_STATE_ONLINE)
308 rc = dasd_state_ready_to_online(device);
314 * Device shutdown state changes.
317 dasd_decrease_state(struct dasd_device *device)
319 if (device->state == DASD_STATE_ONLINE &&
320 device->target <= DASD_STATE_READY)
321 dasd_state_online_to_ready(device);
323 if (device->state == DASD_STATE_READY &&
324 device->target <= DASD_STATE_BASIC)
325 dasd_state_ready_to_basic(device);
327 if (device->state == DASD_STATE_BASIC &&
328 device->target <= DASD_STATE_KNOWN)
329 dasd_state_basic_to_known(device);
331 if (device->state == DASD_STATE_KNOWN &&
332 device->target <= DASD_STATE_NEW)
333 dasd_state_known_to_new(device);
339 * This is the main startup/shutdown routine.
342 dasd_change_state(struct dasd_device *device)
346 if (device->state == device->target)
347 /* Already where we want to go today... */
349 if (device->state < device->target)
350 rc = dasd_increase_state(device);
352 rc = dasd_decrease_state(device);
353 if (rc && rc != -EAGAIN)
354 device->target = device->state;
356 if (device->state == device->target)
357 wake_up(&dasd_init_waitq);
361 * Kick starter for devices that did not complete the startup/shutdown
362 * procedure or were sleeping because of a pending state.
363 * dasd_kick_device will schedule a call do do_kick_device to the kernel
367 do_kick_device(void *data)
369 struct dasd_device *device;
371 device = (struct dasd_device *) data;
372 dasd_change_state(device);
373 dasd_schedule_bh(device);
374 dasd_put_device(device);
378 dasd_kick_device(struct dasd_device *device)
380 dasd_get_device(device);
381 /* queue call to dasd_kick_device to the kernel event daemon. */
382 schedule_work(&device->kick_work);
386 * Set the target state for a device and starts the state change.
389 dasd_set_target_state(struct dasd_device *device, int target)
391 /* If we are in probeonly mode stop at DASD_STATE_READY. */
392 if (dasd_probeonly && target > DASD_STATE_READY)
393 target = DASD_STATE_READY;
394 if (device->target != target) {
395 if (device->state == target)
396 wake_up(&dasd_init_waitq);
397 device->target = target;
399 if (device->state != device->target)
400 dasd_change_state(device);
404 * Enable devices with device numbers in [from..to].
407 _wait_for_device(struct dasd_device *device)
409 return (device->state == device->target);
413 dasd_enable_device(struct dasd_device *device)
415 dasd_set_target_state(device, DASD_STATE_ONLINE);
416 if (device->state <= DASD_STATE_KNOWN)
417 /* No discipline for device found. */
418 dasd_set_target_state(device, DASD_STATE_NEW);
419 /* Now wait for the devices to come up. */
420 wait_event(dasd_init_waitq, _wait_for_device(device));
424 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
426 #ifdef CONFIG_DASD_PROFILE
428 struct dasd_profile_info_t dasd_global_profile;
429 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
432 * Increments counter in global and local profiling structures.
434 #define dasd_profile_counter(value, counter, device) \
437 for (index = 0; index < 31 && value >> (2+index); index++); \
438 dasd_global_profile.counter[index]++; \
439 device->profile.counter[index]++; \
443 * Add profiling information for cqr before execution.
446 dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
450 unsigned int counter;
452 if (dasd_profile_level != DASD_PROFILE_ON)
455 /* count the length of the chanq for statistics */
457 list_for_each(l, &device->ccw_queue)
460 dasd_global_profile.dasd_io_nr_req[counter]++;
461 device->profile.dasd_io_nr_req[counter]++;
465 * Add profiling information for cqr after execution.
468 dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
471 long strtime, irqtime, endtime, tottime; /* in microseconds */
472 long tottimeps, sectors;
474 if (dasd_profile_level != DASD_PROFILE_ON)
477 sectors = req->nr_sectors;
478 if (!cqr->buildclk || !cqr->startclk ||
479 !cqr->stopclk || !cqr->endclk ||
483 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
484 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
485 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
486 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
487 tottimeps = tottime / sectors;
489 if (!dasd_global_profile.dasd_io_reqs)
490 memset(&dasd_global_profile, 0,
491 sizeof (struct dasd_profile_info_t));
492 dasd_global_profile.dasd_io_reqs++;
493 dasd_global_profile.dasd_io_sects += sectors;
495 if (!device->profile.dasd_io_reqs)
496 memset(&device->profile, 0,
497 sizeof (struct dasd_profile_info_t));
498 device->profile.dasd_io_reqs++;
499 device->profile.dasd_io_sects += sectors;
501 dasd_profile_counter(sectors, dasd_io_secs, device);
502 dasd_profile_counter(tottime, dasd_io_times, device);
503 dasd_profile_counter(tottimeps, dasd_io_timps, device);
504 dasd_profile_counter(strtime, dasd_io_time1, device);
505 dasd_profile_counter(irqtime, dasd_io_time2, device);
506 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
507 dasd_profile_counter(endtime, dasd_io_time3, device);
510 #define dasd_profile_start(device, cqr, req) do {} while (0)
511 #define dasd_profile_end(device, cqr, req) do {} while (0)
512 #endif /* CONFIG_DASD_PROFILE */
515 * Allocate memory for a channel program with 'cplength' channel
516 * command words and 'datasize' additional space. There are two
517 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
518 * memory and 2) dasd_smalloc_request uses the static ccw memory
519 * that gets allocated for each device.
521 struct dasd_ccw_req *
522 dasd_kmalloc_request(char *magic, int cplength, int datasize,
523 struct dasd_device * device)
525 struct dasd_ccw_req *cqr;
528 if ( magic == NULL || datasize > PAGE_SIZE ||
529 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
532 cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
534 return ERR_PTR(-ENOMEM);
535 memset(cqr, 0, sizeof(struct dasd_ccw_req));
538 cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
539 GFP_ATOMIC | GFP_DMA);
540 if (cqr->cpaddr == NULL) {
542 return ERR_PTR(-ENOMEM);
544 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
548 cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
549 if (cqr->data == NULL) {
552 return ERR_PTR(-ENOMEM);
554 memset(cqr->data, 0, datasize);
556 strncpy((char *) &cqr->magic, magic, 4);
557 ASCEBC((char *) &cqr->magic, 4);
558 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
559 dasd_get_device(device);
563 struct dasd_ccw_req *
564 dasd_smalloc_request(char *magic, int cplength, int datasize,
565 struct dasd_device * device)
568 struct dasd_ccw_req *cqr;
573 if ( magic == NULL || datasize > PAGE_SIZE ||
574 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
577 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
579 size += cplength * sizeof(struct ccw1);
582 spin_lock_irqsave(&device->mem_lock, flags);
583 cqr = (struct dasd_ccw_req *)
584 dasd_alloc_chunk(&device->ccw_chunks, size);
585 spin_unlock_irqrestore(&device->mem_lock, flags);
587 return ERR_PTR(-ENOMEM);
588 memset(cqr, 0, sizeof(struct dasd_ccw_req));
589 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
592 cqr->cpaddr = (struct ccw1 *) data;
593 data += cplength*sizeof(struct ccw1);
594 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
599 memset(cqr->data, 0, datasize);
601 strncpy((char *) &cqr->magic, magic, 4);
602 ASCEBC((char *) &cqr->magic, 4);
603 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
604 dasd_get_device(device);
609 * Free memory of a channel program. This function needs to free all the
610 * idal lists that might have been created by dasd_set_cda and the
611 * struct dasd_ccw_req itself.
614 dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
619 /* Clear any idals used for the request. */
622 clear_normalized_cda(ccw);
623 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
628 dasd_put_device(device);
632 dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
636 spin_lock_irqsave(&device->mem_lock, flags);
637 dasd_free_chunk(&device->ccw_chunks, cqr);
638 spin_unlock_irqrestore(&device->mem_lock, flags);
639 dasd_put_device(device);
643 * Check discipline magic in cqr.
646 dasd_check_cqr(struct dasd_ccw_req *cqr)
648 struct dasd_device *device;
652 device = cqr->device;
653 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
654 DEV_MESSAGE(KERN_WARNING, device,
655 " dasd_ccw_req 0x%08x magic doesn't match"
656 " discipline 0x%08x",
658 *(unsigned int *) device->discipline->name);
665 * Terminate the current i/o and set the request to clear_pending.
666 * Timer keeps device runnig.
667 * ccw_device_clear can fail if the i/o subsystem
671 dasd_term_IO(struct dasd_ccw_req * cqr)
673 struct dasd_device *device;
677 rc = dasd_check_cqr(cqr);
681 device = (struct dasd_device *) cqr->device;
682 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
683 rc = ccw_device_clear(device->cdev, (long) cqr);
685 case 0: /* termination successful */
687 cqr->status = DASD_CQR_CLEAR;
688 cqr->stopclk = get_clock();
689 DBF_DEV_EVENT(DBF_DEBUG, device,
690 "terminate cqr %p successful",
694 DBF_DEV_EVENT(DBF_ERR, device, "%s",
695 "device gone, retry");
698 DBF_DEV_EVENT(DBF_ERR, device, "%s",
703 DBF_DEV_EVENT(DBF_ERR, device, "%s",
704 "device busy, retry later");
707 DEV_MESSAGE(KERN_ERR, device,
708 "line %d unknown RC=%d, please "
709 "report to linux390@de.ibm.com",
716 dasd_schedule_bh(device);
721 * Start the i/o. This start_IO can fail if the channel is really busy.
722 * In that case set up a timer to start the request later.
725 dasd_start_IO(struct dasd_ccw_req * cqr)
727 struct dasd_device *device;
731 rc = dasd_check_cqr(cqr);
734 device = (struct dasd_device *) cqr->device;
735 if (cqr->retries < 0) {
736 DEV_MESSAGE(KERN_DEBUG, device,
737 "start_IO: request %p (%02x/%i) - no retry left.",
738 cqr, cqr->status, cqr->retries);
739 cqr->status = DASD_CQR_FAILED;
742 cqr->startclk = get_clock();
743 cqr->starttime = jiffies;
745 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
749 cqr->status = DASD_CQR_IN_IO;
750 DBF_DEV_EVENT(DBF_DEBUG, device,
751 "start_IO: request %p started successful",
755 DBF_DEV_EVENT(DBF_ERR, device, "%s",
756 "start_IO: device busy, retry later");
759 DBF_DEV_EVENT(DBF_ERR, device, "%s",
760 "start_IO: request timeout, retry later");
763 /* -EACCES indicates that the request used only a
764 * subset of the available pathes and all these
766 * Do a retry with all available pathes.
768 cqr->lpm = LPM_ANYPATH;
769 DBF_DEV_EVENT(DBF_ERR, device, "%s",
770 "start_IO: selected pathes gone,"
771 " retry on all pathes");
775 DBF_DEV_EVENT(DBF_ERR, device, "%s",
776 "start_IO: device gone, retry");
779 DEV_MESSAGE(KERN_ERR, device,
780 "line %d unknown RC=%d, please report"
781 " to linux390@de.ibm.com", __LINE__, rc);
789 * Timeout function for dasd devices. This is used for different purposes
790 * 1) missing interrupt handler for normal operation
791 * 2) delayed start of request where start_IO failed with -EBUSY
792 * 3) timeout for missing state change interrupts
793 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
794 * DASD_CQR_QUEUED for 2) and 3).
797 dasd_timeout_device(unsigned long ptr)
800 struct dasd_device *device;
802 device = (struct dasd_device *) ptr;
803 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
804 /* re-activate request queue */
805 device->stopped &= ~DASD_STOPPED_PENDING;
806 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
807 dasd_schedule_bh(device);
811 * Setup timeout for a device in jiffies.
814 dasd_set_timer(struct dasd_device *device, int expires)
817 if (timer_pending(&device->timer))
818 del_timer(&device->timer);
821 if (timer_pending(&device->timer)) {
822 if (mod_timer(&device->timer, jiffies + expires))
825 device->timer.function = dasd_timeout_device;
826 device->timer.data = (unsigned long) device;
827 device->timer.expires = jiffies + expires;
828 add_timer(&device->timer);
832 * Clear timeout for a device.
835 dasd_clear_timer(struct dasd_device *device)
837 if (timer_pending(&device->timer))
838 del_timer(&device->timer);
842 dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
844 struct dasd_ccw_req *cqr;
845 struct dasd_device *device;
847 cqr = (struct dasd_ccw_req *) intparm;
848 if (cqr->status != DASD_CQR_IN_IO) {
850 "invalid status in handle_killed_request: "
851 "bus_id %s, status %02x",
852 cdev->dev.bus_id, cqr->status);
856 device = (struct dasd_device *) cqr->device;
857 if (device == NULL ||
858 device != dasd_device_from_cdev(cdev) ||
859 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
860 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
865 /* Schedule request to be retried. */
866 cqr->status = DASD_CQR_QUEUED;
868 dasd_clear_timer(device);
869 dasd_schedule_bh(device);
870 dasd_put_device(device);
874 dasd_handle_state_change_pending(struct dasd_device *device)
876 struct dasd_ccw_req *cqr;
877 struct list_head *l, *n;
879 /* first of all call extended error reporting */
880 dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL);
882 device->stopped &= ~DASD_STOPPED_PENDING;
884 /* restart all 'running' IO on queue */
885 list_for_each_safe(l, n, &device->ccw_queue) {
886 cqr = list_entry(l, struct dasd_ccw_req, list);
887 if (cqr->status == DASD_CQR_IN_IO) {
888 cqr->status = DASD_CQR_QUEUED;
891 dasd_clear_timer(device);
892 dasd_schedule_bh(device);
896 * Interrupt handler for "normal" ssch-io based dasd devices.
899 dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
902 struct dasd_ccw_req *cqr, *next;
903 struct dasd_device *device;
904 unsigned long long now;
910 switch (PTR_ERR(irb)) {
912 dasd_handle_killed_request(cdev, intparm);
915 printk(KERN_WARNING"%s(%s): request timed out\n",
916 __FUNCTION__, cdev->dev.bus_id);
917 //FIXME - dasd uses own timeout interface...
920 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
921 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
928 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
929 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
930 (unsigned int) intparm);
932 /* first of all check for state change pending interrupt */
933 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
934 if ((irb->scsw.dstat & mask) == mask) {
935 device = dasd_device_from_cdev(cdev);
936 if (!IS_ERR(device)) {
937 dasd_handle_state_change_pending(device);
938 dasd_put_device(device);
943 cqr = (struct dasd_ccw_req *) intparm;
945 /* check for unsolicited interrupts */
948 "unsolicited interrupt received: bus_id %s",
953 device = (struct dasd_device *) cqr->device;
954 if (device == NULL ||
955 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
956 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
961 /* Check for clear pending */
962 if (cqr->status == DASD_CQR_CLEAR &&
963 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
964 cqr->status = DASD_CQR_QUEUED;
965 dasd_clear_timer(device);
966 dasd_schedule_bh(device);
970 /* check status - the request might have been killed by dyn detach */
971 if (cqr->status != DASD_CQR_IN_IO) {
973 "invalid status: bus_id %s, status %02x",
974 cdev->dev.bus_id, cqr->status);
977 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
978 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
980 /* Find out the appropriate era_action. */
981 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
982 era = dasd_era_fatal;
983 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
984 irb->scsw.cstat == 0 &&
985 !irb->esw.esw0.erw.cons)
987 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
988 era = dasd_era_fatal; /* don't recover this request */
989 else if (irb->esw.esw0.erw.cons)
990 era = device->discipline->examine_error(cqr, irb);
992 era = dasd_era_recover;
994 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
996 if (era == dasd_era_none) {
997 cqr->status = DASD_CQR_DONE;
999 /* Start first request on queue if possible -> fast_io. */
1000 if (cqr->list.next != &device->ccw_queue) {
1001 next = list_entry(cqr->list.next,
1002 struct dasd_ccw_req, list);
1003 if ((next->status == DASD_CQR_QUEUED) &&
1004 (!device->stopped)) {
1005 if (device->discipline->start_IO(next) == 0)
1006 expires = next->expires;
1008 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1009 "Interrupt fastpath "
1013 } else { /* error */
1014 memcpy(&cqr->irb, irb, sizeof (struct irb));
1016 /* dump sense data */
1017 dasd_log_sense(cqr, irb);
1020 case dasd_era_fatal:
1021 cqr->status = DASD_CQR_FAILED;
1024 case dasd_era_recover:
1025 cqr->status = DASD_CQR_ERROR;
1032 dasd_set_timer(device, expires);
1034 dasd_clear_timer(device);
1035 dasd_schedule_bh(device);
1039 * posts the buffer_cache about a finalized request
1042 dasd_end_request(struct request *req, int uptodate)
1044 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1046 add_disk_randomness(req->rq_disk);
1047 end_that_request_last(req, uptodate);
1051 * Process finished error recovery ccw.
1054 __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1056 dasd_erp_fn_t erp_fn;
1058 if (cqr->status == DASD_CQR_DONE)
1059 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1061 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1062 erp_fn = device->discipline->erp_postaction(cqr);
1067 * Process ccw request queue.
1070 __dasd_process_ccw_queue(struct dasd_device * device,
1071 struct list_head *final_queue)
1073 struct list_head *l, *n;
1074 struct dasd_ccw_req *cqr;
1075 dasd_erp_fn_t erp_fn;
1078 /* Process request with final status. */
1079 list_for_each_safe(l, n, &device->ccw_queue) {
1080 cqr = list_entry(l, struct dasd_ccw_req, list);
1081 /* Stop list processing at the first non-final request. */
1082 if (cqr->status != DASD_CQR_DONE &&
1083 cqr->status != DASD_CQR_FAILED &&
1084 cqr->status != DASD_CQR_ERROR)
1086 /* Process requests with DASD_CQR_ERROR */
1087 if (cqr->status == DASD_CQR_ERROR) {
1088 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1089 cqr->status = DASD_CQR_FAILED;
1090 cqr->stopclk = get_clock();
1092 if (cqr->irb.esw.esw0.erw.cons) {
1093 erp_fn = device->discipline->
1097 dasd_default_erp_action(cqr);
1102 /* first of all call extended error reporting */
1103 if (device->eer && cqr->status == DASD_CQR_FAILED) {
1104 dasd_write_eer_trigger(DASD_EER_FATALERROR,
1107 /* restart request */
1108 cqr->status = DASD_CQR_QUEUED;
1110 device->stopped |= DASD_STOPPED_QUIESCE;
1114 /* Process finished ERP request. */
1116 __dasd_process_erp(device, cqr);
1120 /* Rechain finished requests to final queue */
1121 cqr->endclk = get_clock();
1122 list_move_tail(&cqr->list, final_queue);
1127 dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1129 struct request *req;
1130 struct dasd_device *device;
1133 req = (struct request *) data;
1134 device = cqr->device;
1135 dasd_profile_end(device, cqr, req);
1136 status = cqr->device->discipline->free_cp(cqr,req);
1137 spin_lock_irq(&device->request_queue_lock);
1138 dasd_end_request(req, status);
1139 spin_unlock_irq(&device->request_queue_lock);
1144 * Fetch requests from the block device queue.
1147 __dasd_process_blk_queue(struct dasd_device * device)
1149 request_queue_t *queue;
1150 struct request *req;
1151 struct dasd_ccw_req *cqr;
1154 queue = device->request_queue;
1155 /* No queue ? Then there is nothing to do. */
1160 * We requeue request from the block device queue to the ccw
1161 * queue only in two states. In state DASD_STATE_READY the
1162 * partition detection is done and we need to requeue requests
1163 * for that. State DASD_STATE_ONLINE is normal block device
1166 if (device->state != DASD_STATE_READY &&
1167 device->state != DASD_STATE_ONLINE)
1170 /* Now we try to fetch requests from the request queue */
1171 list_for_each_entry(cqr, &device->ccw_queue, list)
1172 if (cqr->status == DASD_CQR_QUEUED)
1174 while (!blk_queue_plugged(queue) &&
1175 elv_next_request(queue) &&
1176 nr_queued < DASD_CHANQ_MAX_SIZE) {
1177 req = elv_next_request(queue);
1179 if (device->features & DASD_FEATURE_READONLY &&
1180 rq_data_dir(req) == WRITE) {
1181 DBF_DEV_EVENT(DBF_ERR, device,
1182 "Rejecting write request %p",
1184 blkdev_dequeue_request(req);
1185 dasd_end_request(req, 0);
1188 if (device->stopped & DASD_STOPPED_DC_EIO) {
1189 blkdev_dequeue_request(req);
1190 dasd_end_request(req, 0);
1193 cqr = device->discipline->build_cp(device, req);
1195 if (PTR_ERR(cqr) == -ENOMEM)
1196 break; /* terminate request queue loop */
1197 DBF_DEV_EVENT(DBF_ERR, device,
1198 "CCW creation failed (rc=%ld) "
1201 blkdev_dequeue_request(req);
1202 dasd_end_request(req, 0);
1205 cqr->callback = dasd_end_request_cb;
1206 cqr->callback_data = (void *) req;
1207 cqr->status = DASD_CQR_QUEUED;
1208 blkdev_dequeue_request(req);
1209 list_add_tail(&cqr->list, &device->ccw_queue);
1210 dasd_profile_start(device, cqr, req);
1216 * Take a look at the first request on the ccw queue and check
1217 * if it reached its expire time. If so, terminate the IO.
1220 __dasd_check_expire(struct dasd_device * device)
1222 struct dasd_ccw_req *cqr;
1224 if (list_empty(&device->ccw_queue))
1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1227 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1228 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1229 if (device->discipline->term_IO(cqr) != 0)
1230 /* Hmpf, try again in 1/10 sec */
1231 dasd_set_timer(device, 10);
1237 * Take a look at the first request on the ccw queue and check
1238 * if it needs to be started.
1241 __dasd_start_head(struct dasd_device * device)
1243 struct dasd_ccw_req *cqr;
1246 if (list_empty(&device->ccw_queue))
1248 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1249 /* check FAILFAST */
1250 if (device->stopped & ~DASD_STOPPED_PENDING &&
1251 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1253 cqr->status = DASD_CQR_FAILED;
1254 dasd_schedule_bh(device);
1256 if ((cqr->status == DASD_CQR_QUEUED) &&
1257 (!device->stopped)) {
1258 /* try to start the first I/O that can be started */
1259 rc = device->discipline->start_IO(cqr);
1261 dasd_set_timer(device, cqr->expires);
1262 else if (rc == -EACCES) {
1263 dasd_schedule_bh(device);
1265 /* Hmpf, try again in 1/2 sec */
1266 dasd_set_timer(device, 50);
1271 * Remove requests from the ccw queue.
1274 dasd_flush_ccw_queue(struct dasd_device * device, int all)
1276 struct list_head flush_queue;
1277 struct list_head *l, *n;
1278 struct dasd_ccw_req *cqr;
1280 INIT_LIST_HEAD(&flush_queue);
1281 spin_lock_irq(get_ccwdev_lock(device->cdev));
1282 list_for_each_safe(l, n, &device->ccw_queue) {
1283 cqr = list_entry(l, struct dasd_ccw_req, list);
1284 /* Flush all request or only block device requests? */
1285 if (all == 0 && cqr->callback == dasd_end_request_cb)
1287 if (cqr->status == DASD_CQR_IN_IO)
1288 device->discipline->term_IO(cqr);
1289 if (cqr->status != DASD_CQR_DONE ||
1290 cqr->status != DASD_CQR_FAILED) {
1291 cqr->status = DASD_CQR_FAILED;
1292 cqr->stopclk = get_clock();
1294 /* Process finished ERP request. */
1296 __dasd_process_erp(device, cqr);
1299 /* Rechain request on device request queue */
1300 cqr->endclk = get_clock();
1301 list_move_tail(&cqr->list, &flush_queue);
1303 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1304 /* Now call the callback function of flushed requests */
1305 list_for_each_safe(l, n, &flush_queue) {
1306 cqr = list_entry(l, struct dasd_ccw_req, list);
1307 if (cqr->callback != NULL)
1308 (cqr->callback)(cqr, cqr->callback_data);
1313 * Acquire the device lock and process queues for the device.
1316 dasd_tasklet(struct dasd_device * device)
1318 struct list_head final_queue;
1319 struct list_head *l, *n;
1320 struct dasd_ccw_req *cqr;
1322 atomic_set (&device->tasklet_scheduled, 0);
1323 INIT_LIST_HEAD(&final_queue);
1324 spin_lock_irq(get_ccwdev_lock(device->cdev));
1325 /* Check expire time of first request on the ccw queue. */
1326 __dasd_check_expire(device);
1327 /* Finish off requests on ccw queue */
1328 __dasd_process_ccw_queue(device, &final_queue);
1329 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1330 /* Now call the callback function of requests with final status */
1331 list_for_each_safe(l, n, &final_queue) {
1332 cqr = list_entry(l, struct dasd_ccw_req, list);
1333 list_del_init(&cqr->list);
1334 if (cqr->callback != NULL)
1335 (cqr->callback)(cqr, cqr->callback_data);
1337 spin_lock_irq(&device->request_queue_lock);
1338 spin_lock(get_ccwdev_lock(device->cdev));
1339 /* Get new request from the block device request queue */
1340 __dasd_process_blk_queue(device);
1341 /* Now check if the head of the ccw queue needs to be started. */
1342 __dasd_start_head(device);
1343 spin_unlock(get_ccwdev_lock(device->cdev));
1344 spin_unlock_irq(&device->request_queue_lock);
1345 dasd_put_device(device);
1349 * Schedules a call to dasd_tasklet over the device tasklet.
1352 dasd_schedule_bh(struct dasd_device * device)
1354 /* Protect against rescheduling. */
1355 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1357 dasd_get_device(device);
1358 tasklet_hi_schedule(&device->tasklet);
1362 * Queue a request to the head of the ccw_queue. Start the I/O if
1366 dasd_add_request_head(struct dasd_ccw_req *req)
1368 struct dasd_device *device;
1369 unsigned long flags;
1371 device = req->device;
1372 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1373 req->status = DASD_CQR_QUEUED;
1374 req->device = device;
1375 list_add(&req->list, &device->ccw_queue);
1376 /* let the bh start the request to keep them in order */
1377 dasd_schedule_bh(device);
1378 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1382 * Queue a request to the tail of the ccw_queue. Start the I/O if
1386 dasd_add_request_tail(struct dasd_ccw_req *req)
1388 struct dasd_device *device;
1389 unsigned long flags;
1391 device = req->device;
1392 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1393 req->status = DASD_CQR_QUEUED;
1394 req->device = device;
1395 list_add_tail(&req->list, &device->ccw_queue);
1396 /* let the bh start the request to keep them in order */
1397 dasd_schedule_bh(device);
1398 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1405 dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1407 wake_up((wait_queue_head_t *) data);
1411 _wait_for_wakeup(struct dasd_ccw_req *cqr)
1413 struct dasd_device *device;
1416 device = cqr->device;
1417 spin_lock_irq(get_ccwdev_lock(device->cdev));
1418 rc = ((cqr->status == DASD_CQR_DONE ||
1419 cqr->status == DASD_CQR_FAILED) &&
1420 list_empty(&cqr->list));
1421 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1426 * Attempts to start a special ccw queue and waits for its completion.
1429 dasd_sleep_on(struct dasd_ccw_req * cqr)
1431 wait_queue_head_t wait_q;
1432 struct dasd_device *device;
1435 device = cqr->device;
1436 spin_lock_irq(get_ccwdev_lock(device->cdev));
1438 init_waitqueue_head (&wait_q);
1439 cqr->callback = dasd_wakeup_cb;
1440 cqr->callback_data = (void *) &wait_q;
1441 cqr->status = DASD_CQR_QUEUED;
1442 list_add_tail(&cqr->list, &device->ccw_queue);
1444 /* let the bh start the request to keep them in order */
1445 dasd_schedule_bh(device);
1447 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1449 wait_event(wait_q, _wait_for_wakeup(cqr));
1451 /* Request status is either done or failed. */
1452 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1457 * Attempts to start a special ccw queue and wait interruptible
1458 * for its completion.
1461 dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1463 wait_queue_head_t wait_q;
1464 struct dasd_device *device;
1467 device = cqr->device;
1468 spin_lock_irq(get_ccwdev_lock(device->cdev));
1470 init_waitqueue_head (&wait_q);
1471 cqr->callback = dasd_wakeup_cb;
1472 cqr->callback_data = (void *) &wait_q;
1473 cqr->status = DASD_CQR_QUEUED;
1474 list_add_tail(&cqr->list, &device->ccw_queue);
1476 /* let the bh start the request to keep them in order */
1477 dasd_schedule_bh(device);
1478 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1482 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1483 if (rc != -ERESTARTSYS) {
1484 /* Request is final (done or failed) */
1485 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1488 spin_lock_irq(get_ccwdev_lock(device->cdev));
1489 switch (cqr->status) {
1490 case DASD_CQR_IN_IO:
1491 /* terminate runnig cqr */
1492 if (device->discipline->term_IO) {
1494 device->discipline->term_IO(cqr);
1496 * wait (non-interruptible) for final status
1497 * because signal ist still pending
1499 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1500 wait_event(wait_q, _wait_for_wakeup(cqr));
1501 spin_lock_irq(get_ccwdev_lock(device->cdev));
1502 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1506 case DASD_CQR_QUEUED:
1508 list_del_init(&cqr->list);
1513 /* cqr with 'non-interruptable' status - just wait */
1516 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1522 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1523 * for eckd devices) the currently running request has to be terminated
1524 * and be put back to status queued, before the special request is added
1525 * to the head of the queue. Then the special request is waited on normally.
1528 _dasd_term_running_cqr(struct dasd_device *device)
1530 struct dasd_ccw_req *cqr;
1533 if (list_empty(&device->ccw_queue))
1535 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1536 rc = device->discipline->term_IO(cqr);
1538 /* termination successful */
1539 cqr->status = DASD_CQR_QUEUED;
1540 cqr->startclk = cqr->stopclk = 0;
1547 dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1549 wait_queue_head_t wait_q;
1550 struct dasd_device *device;
1553 device = cqr->device;
1554 spin_lock_irq(get_ccwdev_lock(device->cdev));
1555 rc = _dasd_term_running_cqr(device);
1557 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1561 init_waitqueue_head (&wait_q);
1562 cqr->callback = dasd_wakeup_cb;
1563 cqr->callback_data = (void *) &wait_q;
1564 cqr->status = DASD_CQR_QUEUED;
1565 list_add(&cqr->list, &device->ccw_queue);
1567 /* let the bh start the request to keep them in order */
1568 dasd_schedule_bh(device);
1570 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1572 wait_event(wait_q, _wait_for_wakeup(cqr));
1574 /* Request status is either done or failed. */
1575 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1580 * Cancels a request that was started with dasd_sleep_on_req.
1581 * This is useful to timeout requests. The request will be
1582 * terminated if it is currently in i/o.
1583 * Returns 1 if the request has been terminated.
1586 dasd_cancel_req(struct dasd_ccw_req *cqr)
1588 struct dasd_device *device = cqr->device;
1589 unsigned long flags;
1593 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1594 switch (cqr->status) {
1595 case DASD_CQR_QUEUED:
1596 /* request was not started - just set to failed */
1597 cqr->status = DASD_CQR_FAILED;
1599 case DASD_CQR_IN_IO:
1600 /* request in IO - terminate IO and release again */
1601 if (device->discipline->term_IO(cqr) != 0)
1602 /* what to do if unable to terminate ??????
1604 cqr->status = DASD_CQR_FAILED;
1605 cqr->stopclk = get_clock();
1609 case DASD_CQR_FAILED:
1610 /* already finished - do nothing */
1613 DEV_MESSAGE(KERN_ALERT, device,
1614 "invalid status %02x in request",
1619 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1620 dasd_schedule_bh(device);
1625 * SECTION: Block device operations (request queue, partitions, open, release).
1629 * Dasd request queue function. Called from ll_rw_blk.c
1632 do_dasd_request(request_queue_t * queue)
1634 struct dasd_device *device;
1636 device = (struct dasd_device *) queue->queuedata;
1637 spin_lock(get_ccwdev_lock(device->cdev));
1638 /* Get new request from the block device request queue */
1639 __dasd_process_blk_queue(device);
1640 /* Now check if the head of the ccw queue needs to be started. */
1641 __dasd_start_head(device);
1642 spin_unlock(get_ccwdev_lock(device->cdev));
1646 * Allocate and initialize request queue and default I/O scheduler.
1649 dasd_alloc_queue(struct dasd_device * device)
1653 device->request_queue = blk_init_queue(do_dasd_request,
1654 &device->request_queue_lock);
1655 if (device->request_queue == NULL)
1658 device->request_queue->queuedata = device;
1660 elevator_exit(device->request_queue->elevator);
1661 rc = elevator_init(device->request_queue, "deadline");
1663 blk_cleanup_queue(device->request_queue);
1670 * Allocate and initialize request queue.
1673 dasd_setup_queue(struct dasd_device * device)
1677 blk_queue_hardsect_size(device->request_queue, device->bp_block);
1678 max = device->discipline->max_blocks << device->s2b_shift;
1679 blk_queue_max_sectors(device->request_queue, max);
1680 blk_queue_max_phys_segments(device->request_queue, -1L);
1681 blk_queue_max_hw_segments(device->request_queue, -1L);
1682 blk_queue_max_segment_size(device->request_queue, -1L);
1683 blk_queue_segment_boundary(device->request_queue, -1L);
1684 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
1688 * Deactivate and free request queue.
1691 dasd_free_queue(struct dasd_device * device)
1693 if (device->request_queue) {
1694 blk_cleanup_queue(device->request_queue);
1695 device->request_queue = NULL;
1700 * Flush request on the request queue.
1703 dasd_flush_request_queue(struct dasd_device * device)
1705 struct request *req;
1707 if (!device->request_queue)
1710 spin_lock_irq(&device->request_queue_lock);
1711 while (!list_empty(&device->request_queue->queue_head)) {
1712 req = elv_next_request(device->request_queue);
1715 dasd_end_request(req, 0);
1716 blkdev_dequeue_request(req);
1718 spin_unlock_irq(&device->request_queue_lock);
1722 dasd_open(struct inode *inp, struct file *filp)
1724 struct gendisk *disk = inp->i_bdev->bd_disk;
1725 struct dasd_device *device = disk->private_data;
1728 atomic_inc(&device->open_count);
1729 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1734 if (!try_module_get(device->discipline->owner)) {
1739 if (dasd_probeonly) {
1740 DEV_MESSAGE(KERN_INFO, device, "%s",
1741 "No access to device due to probeonly mode");
1746 if (device->state < DASD_STATE_BASIC) {
1747 DBF_DEV_EVENT(DBF_ERR, device, " %s",
1748 " Cannot open unrecognized device");
1756 module_put(device->discipline->owner);
1758 atomic_dec(&device->open_count);
1763 dasd_release(struct inode *inp, struct file *filp)
1765 struct gendisk *disk = inp->i_bdev->bd_disk;
1766 struct dasd_device *device = disk->private_data;
1768 atomic_dec(&device->open_count);
1769 module_put(device->discipline->owner);
1774 * Return disk geometry.
1777 dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1779 struct dasd_device *device;
1781 device = bdev->bd_disk->private_data;
1785 if (!device->discipline ||
1786 !device->discipline->fill_geometry)
1789 device->discipline->fill_geometry(device, geo);
1790 geo->start = get_start_sect(bdev) >> device->s2b_shift;
1794 struct block_device_operations
1795 dasd_device_operations = {
1796 .owner = THIS_MODULE,
1798 .release = dasd_release,
1799 .ioctl = dasd_ioctl,
1800 .compat_ioctl = dasd_compat_ioctl,
1801 .getgeo = dasd_getgeo,
1808 #ifdef CONFIG_PROC_FS
1812 if (dasd_page_cache != NULL) {
1813 kmem_cache_destroy(dasd_page_cache);
1814 dasd_page_cache = NULL;
1816 dasd_gendisk_exit();
1818 devfs_remove("dasd");
1819 if (dasd_debug_area != NULL) {
1820 debug_unregister(dasd_debug_area);
1821 dasd_debug_area = NULL;
1826 * SECTION: common functions for ccw_driver use
1830 * Initial attempt at a probe function. this can be simplified once
1831 * the other detection code is gone.
1834 dasd_generic_probe (struct ccw_device *cdev,
1835 struct dasd_discipline *discipline)
1839 ret = dasd_add_sysfs_files(cdev);
1842 "dasd_generic_probe: could not add sysfs entries "
1843 "for %s\n", cdev->dev.bus_id);
1845 cdev->handler = &dasd_int_handler;
1852 * This will one day be called from a global not_oper handler.
1853 * It is also used by driver_unregister during module unload.
1856 dasd_generic_remove (struct ccw_device *cdev)
1858 struct dasd_device *device;
1860 cdev->handler = NULL;
1862 dasd_remove_sysfs_files(cdev);
1863 device = dasd_device_from_cdev(cdev);
1866 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1867 /* Already doing offline processing */
1868 dasd_put_device(device);
1872 * This device is removed unconditionally. Set offline
1873 * flag to prevent dasd_open from opening it while it is
1874 * no quite down yet.
1876 dasd_set_target_state(device, DASD_STATE_NEW);
1877 /* dasd_delete_device destroys the device reference. */
1878 dasd_delete_device(device);
1882 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1883 * the device is detected for the first time and is supposed to be used
1884 * or the user has started activation through sysfs.
1887 dasd_generic_set_online (struct ccw_device *cdev,
1888 struct dasd_discipline *base_discipline)
1891 struct dasd_discipline *discipline;
1892 struct dasd_device *device;
1895 device = dasd_create_device(cdev);
1897 return PTR_ERR(device);
1899 discipline = base_discipline;
1900 if (device->features & DASD_FEATURE_USEDIAG) {
1901 if (!dasd_diag_discipline_pointer) {
1902 printk (KERN_WARNING
1903 "dasd_generic couldn't online device %s "
1904 "- discipline DIAG not available\n",
1906 dasd_delete_device(device);
1909 discipline = dasd_diag_discipline_pointer;
1911 if (!try_module_get(base_discipline->owner)) {
1912 dasd_delete_device(device);
1915 if (!try_module_get(discipline->owner)) {
1916 module_put(base_discipline->owner);
1917 dasd_delete_device(device);
1920 device->base_discipline = base_discipline;
1921 device->discipline = discipline;
1923 rc = discipline->check_device(device);
1925 printk (KERN_WARNING
1926 "dasd_generic couldn't online device %s "
1927 "with discipline %s rc=%i\n",
1928 cdev->dev.bus_id, discipline->name, rc);
1929 module_put(discipline->owner);
1930 module_put(base_discipline->owner);
1931 dasd_delete_device(device);
1935 dasd_set_target_state(device, DASD_STATE_ONLINE);
1936 if (device->state <= DASD_STATE_KNOWN) {
1937 printk (KERN_WARNING
1938 "dasd_generic discipline not found for %s\n",
1941 dasd_set_target_state(device, DASD_STATE_NEW);
1942 dasd_delete_device(device);
1944 pr_debug("dasd_generic device %s found\n",
1947 /* FIXME: we have to wait for the root device but we don't want
1948 * to wait for each single device but for all at once. */
1949 wait_event(dasd_init_waitq, _wait_for_device(device));
1951 dasd_put_device(device);
1957 dasd_generic_set_offline (struct ccw_device *cdev)
1959 struct dasd_device *device;
1962 device = dasd_device_from_cdev(cdev);
1964 return PTR_ERR(device);
1965 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1966 /* Already doing offline processing */
1967 dasd_put_device(device);
1971 * We must make sure that this device is currently not in use.
1972 * The open_count is increased for every opener, that includes
1973 * the blkdev_get in dasd_scan_partitions. We are only interested
1974 * in the other openers.
1976 max_count = device->bdev ? 0 : -1;
1977 if (atomic_read(&device->open_count) > max_count) {
1978 printk (KERN_WARNING "Can't offline dasd device with open"
1980 atomic_read(&device->open_count));
1981 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
1982 dasd_put_device(device);
1985 dasd_set_target_state(device, DASD_STATE_NEW);
1986 /* dasd_delete_device destroys the device reference. */
1987 dasd_delete_device(device);
1993 dasd_generic_notify(struct ccw_device *cdev, int event)
1995 struct dasd_device *device;
1996 struct dasd_ccw_req *cqr;
1997 unsigned long flags;
2000 device = dasd_device_from_cdev(cdev);
2003 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
2008 /* first of all call extended error reporting */
2009 dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL);
2011 if (device->state < DASD_STATE_BASIC)
2013 /* Device is active. We want to keep it. */
2014 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
2015 list_for_each_entry(cqr, &device->ccw_queue, list)
2016 if (cqr->status == DASD_CQR_IN_IO)
2017 cqr->status = DASD_CQR_FAILED;
2018 device->stopped |= DASD_STOPPED_DC_EIO;
2020 list_for_each_entry(cqr, &device->ccw_queue, list)
2021 if (cqr->status == DASD_CQR_IN_IO) {
2022 cqr->status = DASD_CQR_QUEUED;
2025 device->stopped |= DASD_STOPPED_DC_WAIT;
2026 dasd_set_timer(device, 0);
2028 dasd_schedule_bh(device);
2032 /* FIXME: add a sanity check. */
2033 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
2034 dasd_schedule_bh(device);
2038 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2039 dasd_put_device(device);
2044 * Automatically online either all dasd devices (dasd_autodetect) or
2045 * all devices specified with dasd= parameters.
2048 __dasd_auto_online(struct device *dev, void *data)
2050 struct ccw_device *cdev;
2052 cdev = to_ccwdev(dev);
2053 if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
2054 ccw_device_set_online(cdev);
2059 dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
2061 struct device_driver *drv;
2063 drv = get_driver(&dasd_discipline_driver->driver);
2064 driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
2069 * notifications for extended error reports
2071 static struct notifier_block *dasd_eer_chain;
2074 dasd_register_eer_notifier(struct notifier_block *nb)
2076 return notifier_chain_register(&dasd_eer_chain, nb);
2080 dasd_unregister_eer_notifier(struct notifier_block *nb)
2082 return notifier_chain_unregister(&dasd_eer_chain, nb);
2086 * Notify the registered error reporting module of a problem
2089 dasd_write_eer_trigger(unsigned int id, struct dasd_device *device,
2090 struct dasd_ccw_req *cqr)
2093 struct dasd_eer_trigger temp;
2095 temp.device = device;
2097 notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER,
2103 * Tell the registered error reporting module to disable error reporting for
2104 * a given device and to cleanup any private data structures on that device.
2107 dasd_disable_eer(struct dasd_device *device)
2109 notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device);
2118 init_waitqueue_head(&dasd_init_waitq);
2120 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2121 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
2122 if (dasd_debug_area == NULL) {
2126 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2127 debug_set_level(dasd_debug_area, DBF_EMERG);
2129 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2131 dasd_diag_discipline_pointer = NULL;
2133 rc = devfs_mk_dir("dasd");
2136 rc = dasd_devmap_init();
2139 rc = dasd_gendisk_init();
2145 rc = dasd_ioctl_init();
2148 #ifdef CONFIG_PROC_FS
2149 rc = dasd_proc_init();
2156 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2161 module_init(dasd_init);
2162 module_exit(dasd_exit);
2164 EXPORT_SYMBOL(dasd_debug_area);
2165 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2167 EXPORT_SYMBOL(dasd_add_request_head);
2168 EXPORT_SYMBOL(dasd_add_request_tail);
2169 EXPORT_SYMBOL(dasd_cancel_req);
2170 EXPORT_SYMBOL(dasd_clear_timer);
2171 EXPORT_SYMBOL(dasd_enable_device);
2172 EXPORT_SYMBOL(dasd_int_handler);
2173 EXPORT_SYMBOL(dasd_kfree_request);
2174 EXPORT_SYMBOL(dasd_kick_device);
2175 EXPORT_SYMBOL(dasd_kmalloc_request);
2176 EXPORT_SYMBOL(dasd_schedule_bh);
2177 EXPORT_SYMBOL(dasd_set_target_state);
2178 EXPORT_SYMBOL(dasd_set_timer);
2179 EXPORT_SYMBOL(dasd_sfree_request);
2180 EXPORT_SYMBOL(dasd_sleep_on);
2181 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2182 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2183 EXPORT_SYMBOL(dasd_smalloc_request);
2184 EXPORT_SYMBOL(dasd_start_IO);
2185 EXPORT_SYMBOL(dasd_term_IO);
2187 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2188 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2189 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2190 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2191 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2192 EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2194 EXPORT_SYMBOL(dasd_register_eer_notifier);
2195 EXPORT_SYMBOL(dasd_unregister_eer_notifier);
2196 EXPORT_SYMBOL(dasd_write_eer_trigger);
2200 * Overrides for Emacs so that we follow Linus's tabbing style.
2201 * Emacs will notice this stuff at the end of the file and automatically
2202 * adjust the settings for this buffer only. This must remain at the end
2204 * ---------------------------------------------------------------------------
2207 * c-brace-imaginary-offset: 0
2208 * c-brace-offset: -4
2209 * c-argdecl-indent: 4
2210 * c-label-offset: -4
2211 * c-continued-statement-offset: 4
2212 * c-continued-brace-offset: 0
2213 * indent-tabs-mode: 1