2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/buffer_head.h>
21 #include <linux/hdreg.h>
22 #include <linux/async.h>
23 #include <linux/mutex.h>
25 #include <asm/ccwdev.h>
26 #include <asm/ebcdic.h>
27 #include <asm/idals.h>
32 #define PRINTK_HEADER "dasd:"
36 * SECTION: Constant definitions to be used within this file
38 #define DASD_CHANQ_MAX_SIZE 4
40 #define DASD_SLEEPON_START_TAG (void *) 1
41 #define DASD_SLEEPON_END_TAG (void *) 2
44 * SECTION: exported variables of dasd.c
46 debug_info_t *dasd_debug_area;
47 struct dasd_discipline *dasd_diag_discipline_pointer;
48 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
50 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
51 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
52 " Copyright 2000 IBM Corporation");
53 MODULE_SUPPORTED_DEVICE("dasd");
54 MODULE_LICENSE("GPL");
57 * SECTION: prototypes for static functions of dasd.c
59 static int dasd_alloc_queue(struct dasd_block *);
60 static void dasd_setup_queue(struct dasd_block *);
61 static void dasd_free_queue(struct dasd_block *);
62 static void dasd_flush_request_queue(struct dasd_block *);
63 static int dasd_flush_block_queue(struct dasd_block *);
64 static void dasd_device_tasklet(struct dasd_device *);
65 static void dasd_block_tasklet(struct dasd_block *);
66 static void do_kick_device(struct work_struct *);
67 static void do_restore_device(struct work_struct *);
68 static void do_reload_device(struct work_struct *);
69 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
70 static void dasd_device_timeout(unsigned long);
71 static void dasd_block_timeout(unsigned long);
72 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
75 * SECTION: Operations on the device structure.
77 static wait_queue_head_t dasd_init_waitq;
78 static wait_queue_head_t dasd_flush_wq;
79 static wait_queue_head_t generic_waitq;
82 * Allocate memory for a new device structure.
84 struct dasd_device *dasd_alloc_device(void)
86 struct dasd_device *device;
88 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
90 return ERR_PTR(-ENOMEM);
92 /* Get two pages for normal block device operations. */
93 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
94 if (!device->ccw_mem) {
96 return ERR_PTR(-ENOMEM);
98 /* Get one page for error recovery. */
99 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
100 if (!device->erp_mem) {
101 free_pages((unsigned long) device->ccw_mem, 1);
103 return ERR_PTR(-ENOMEM);
106 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
107 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
108 spin_lock_init(&device->mem_lock);
109 atomic_set(&device->tasklet_scheduled, 0);
110 tasklet_init(&device->tasklet,
111 (void (*)(unsigned long)) dasd_device_tasklet,
112 (unsigned long) device);
113 INIT_LIST_HEAD(&device->ccw_queue);
114 init_timer(&device->timer);
115 device->timer.function = dasd_device_timeout;
116 device->timer.data = (unsigned long) device;
117 INIT_WORK(&device->kick_work, do_kick_device);
118 INIT_WORK(&device->restore_device, do_restore_device);
119 INIT_WORK(&device->reload_device, do_reload_device);
120 device->state = DASD_STATE_NEW;
121 device->target = DASD_STATE_NEW;
122 mutex_init(&device->state_mutex);
128 * Free memory of a device structure.
130 void dasd_free_device(struct dasd_device *device)
132 kfree(device->private);
133 free_page((unsigned long) device->erp_mem);
134 free_pages((unsigned long) device->ccw_mem, 1);
139 * Allocate memory for a new device structure.
141 struct dasd_block *dasd_alloc_block(void)
143 struct dasd_block *block;
145 block = kzalloc(sizeof(*block), GFP_ATOMIC);
147 return ERR_PTR(-ENOMEM);
148 /* open_count = 0 means device online but not in use */
149 atomic_set(&block->open_count, -1);
151 spin_lock_init(&block->request_queue_lock);
152 atomic_set(&block->tasklet_scheduled, 0);
153 tasklet_init(&block->tasklet,
154 (void (*)(unsigned long)) dasd_block_tasklet,
155 (unsigned long) block);
156 INIT_LIST_HEAD(&block->ccw_queue);
157 spin_lock_init(&block->queue_lock);
158 init_timer(&block->timer);
159 block->timer.function = dasd_block_timeout;
160 block->timer.data = (unsigned long) block;
166 * Free memory of a device structure.
168 void dasd_free_block(struct dasd_block *block)
174 * Make a new device known to the system.
176 static int dasd_state_new_to_known(struct dasd_device *device)
181 * As long as the device is not in state DASD_STATE_NEW we want to
182 * keep the reference count > 0.
184 dasd_get_device(device);
187 rc = dasd_alloc_queue(device->block);
189 dasd_put_device(device);
193 device->state = DASD_STATE_KNOWN;
198 * Let the system forget about a device.
200 static int dasd_state_known_to_new(struct dasd_device *device)
202 /* Disable extended error reporting for this device. */
203 dasd_eer_disable(device);
204 /* Forget the discipline information. */
205 if (device->discipline) {
206 if (device->discipline->uncheck_device)
207 device->discipline->uncheck_device(device);
208 module_put(device->discipline->owner);
210 device->discipline = NULL;
211 if (device->base_discipline)
212 module_put(device->base_discipline->owner);
213 device->base_discipline = NULL;
214 device->state = DASD_STATE_NEW;
217 dasd_free_queue(device->block);
219 /* Give up reference we took in dasd_state_new_to_known. */
220 dasd_put_device(device);
225 * Request the irq line for the device.
227 static int dasd_state_known_to_basic(struct dasd_device *device)
231 /* Allocate and register gendisk structure. */
233 rc = dasd_gendisk_alloc(device->block);
237 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
238 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
240 debug_register_view(device->debug_area, &debug_sprintf_view);
241 debug_set_level(device->debug_area, DBF_WARNING);
242 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
244 device->state = DASD_STATE_BASIC;
249 * Release the irq line for the device. Terminate any running i/o.
251 static int dasd_state_basic_to_known(struct dasd_device *device)
255 dasd_gendisk_free(device->block);
256 dasd_block_clear_timer(device->block);
258 rc = dasd_flush_device_queue(device);
261 dasd_device_clear_timer(device);
263 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
264 if (device->debug_area != NULL) {
265 debug_unregister(device->debug_area);
266 device->debug_area = NULL;
268 device->state = DASD_STATE_KNOWN;
273 * Do the initial analysis. The do_analysis function may return
274 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
275 * until the discipline decides to continue the startup sequence
276 * by calling the function dasd_change_state. The eckd disciplines
277 * uses this to start a ccw that detects the format. The completion
278 * interrupt for this detection ccw uses the kernel event daemon to
279 * trigger the call to dasd_change_state. All this is done in the
280 * discipline code, see dasd_eckd.c.
281 * After the analysis ccw is done (do_analysis returned 0) the block
283 * In case the analysis returns an error, the device setup is stopped
284 * (a fake disk was already added to allow formatting).
286 static int dasd_state_basic_to_ready(struct dasd_device *device)
289 struct dasd_block *block;
292 block = device->block;
293 /* make disk known with correct capacity */
295 if (block->base->discipline->do_analysis != NULL)
296 rc = block->base->discipline->do_analysis(block);
299 device->state = DASD_STATE_UNFMT;
302 dasd_setup_queue(block);
303 set_capacity(block->gdp,
304 block->blocks << block->s2b_shift);
305 device->state = DASD_STATE_READY;
306 rc = dasd_scan_partitions(block);
308 device->state = DASD_STATE_BASIC;
310 device->state = DASD_STATE_READY;
316 * Remove device from block device layer. Destroy dirty buffers.
317 * Forget format information. Check if the target level is basic
318 * and if it is create fake disk for formatting.
320 static int dasd_state_ready_to_basic(struct dasd_device *device)
324 device->state = DASD_STATE_BASIC;
326 struct dasd_block *block = device->block;
327 rc = dasd_flush_block_queue(block);
329 device->state = DASD_STATE_READY;
332 dasd_flush_request_queue(block);
333 dasd_destroy_partitions(block);
336 block->s2b_shift = 0;
344 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
346 device->state = DASD_STATE_BASIC;
351 * Make the device online and schedule the bottom half to start
352 * the requeueing of requests from the linux request queue to the
356 dasd_state_ready_to_online(struct dasd_device * device)
359 struct gendisk *disk;
360 struct disk_part_iter piter;
361 struct hd_struct *part;
363 if (device->discipline->ready_to_online) {
364 rc = device->discipline->ready_to_online(device);
368 device->state = DASD_STATE_ONLINE;
370 dasd_schedule_block_bh(device->block);
371 disk = device->block->bdev->bd_disk;
372 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
373 while ((part = disk_part_iter_next(&piter)))
374 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
375 disk_part_iter_exit(&piter);
381 * Stop the requeueing of requests again.
383 static int dasd_state_online_to_ready(struct dasd_device *device)
386 struct gendisk *disk;
387 struct disk_part_iter piter;
388 struct hd_struct *part;
390 if (device->discipline->online_to_ready) {
391 rc = device->discipline->online_to_ready(device);
395 device->state = DASD_STATE_READY;
397 disk = device->block->bdev->bd_disk;
398 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
399 while ((part = disk_part_iter_next(&piter)))
400 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
401 disk_part_iter_exit(&piter);
407 * Device startup state changes.
409 static int dasd_increase_state(struct dasd_device *device)
414 if (device->state == DASD_STATE_NEW &&
415 device->target >= DASD_STATE_KNOWN)
416 rc = dasd_state_new_to_known(device);
419 device->state == DASD_STATE_KNOWN &&
420 device->target >= DASD_STATE_BASIC)
421 rc = dasd_state_known_to_basic(device);
424 device->state == DASD_STATE_BASIC &&
425 device->target >= DASD_STATE_READY)
426 rc = dasd_state_basic_to_ready(device);
429 device->state == DASD_STATE_UNFMT &&
430 device->target > DASD_STATE_UNFMT)
434 device->state == DASD_STATE_READY &&
435 device->target >= DASD_STATE_ONLINE)
436 rc = dasd_state_ready_to_online(device);
442 * Device shutdown state changes.
444 static int dasd_decrease_state(struct dasd_device *device)
449 if (device->state == DASD_STATE_ONLINE &&
450 device->target <= DASD_STATE_READY)
451 rc = dasd_state_online_to_ready(device);
454 device->state == DASD_STATE_READY &&
455 device->target <= DASD_STATE_BASIC)
456 rc = dasd_state_ready_to_basic(device);
459 device->state == DASD_STATE_UNFMT &&
460 device->target <= DASD_STATE_BASIC)
461 rc = dasd_state_unfmt_to_basic(device);
464 device->state == DASD_STATE_BASIC &&
465 device->target <= DASD_STATE_KNOWN)
466 rc = dasd_state_basic_to_known(device);
469 device->state == DASD_STATE_KNOWN &&
470 device->target <= DASD_STATE_NEW)
471 rc = dasd_state_known_to_new(device);
477 * This is the main startup/shutdown routine.
479 static void dasd_change_state(struct dasd_device *device)
483 if (device->state == device->target)
484 /* Already where we want to go today... */
486 if (device->state < device->target)
487 rc = dasd_increase_state(device);
489 rc = dasd_decrease_state(device);
493 device->target = device->state;
495 if (device->state == device->target)
496 wake_up(&dasd_init_waitq);
498 /* let user-space know that the device status changed */
499 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
503 * Kick starter for devices that did not complete the startup/shutdown
504 * procedure or were sleeping because of a pending state.
505 * dasd_kick_device will schedule a call do do_kick_device to the kernel
508 static void do_kick_device(struct work_struct *work)
510 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
511 mutex_lock(&device->state_mutex);
512 dasd_change_state(device);
513 mutex_unlock(&device->state_mutex);
514 dasd_schedule_device_bh(device);
515 dasd_put_device(device);
518 void dasd_kick_device(struct dasd_device *device)
520 dasd_get_device(device);
521 /* queue call to dasd_kick_device to the kernel event daemon. */
522 schedule_work(&device->kick_work);
526 * dasd_reload_device will schedule a call do do_reload_device to the kernel
529 static void do_reload_device(struct work_struct *work)
531 struct dasd_device *device = container_of(work, struct dasd_device,
533 device->discipline->reload(device);
534 dasd_put_device(device);
537 void dasd_reload_device(struct dasd_device *device)
539 dasd_get_device(device);
540 /* queue call to dasd_reload_device to the kernel event daemon. */
541 schedule_work(&device->reload_device);
543 EXPORT_SYMBOL(dasd_reload_device);
546 * dasd_restore_device will schedule a call do do_restore_device to the kernel
549 static void do_restore_device(struct work_struct *work)
551 struct dasd_device *device = container_of(work, struct dasd_device,
553 device->cdev->drv->restore(device->cdev);
554 dasd_put_device(device);
557 void dasd_restore_device(struct dasd_device *device)
559 dasd_get_device(device);
560 /* queue call to dasd_restore_device to the kernel event daemon. */
561 schedule_work(&device->restore_device);
565 * Set the target state for a device and starts the state change.
567 void dasd_set_target_state(struct dasd_device *device, int target)
569 dasd_get_device(device);
570 mutex_lock(&device->state_mutex);
571 /* If we are in probeonly mode stop at DASD_STATE_READY. */
572 if (dasd_probeonly && target > DASD_STATE_READY)
573 target = DASD_STATE_READY;
574 if (device->target != target) {
575 if (device->state == target)
576 wake_up(&dasd_init_waitq);
577 device->target = target;
579 if (device->state != device->target)
580 dasd_change_state(device);
581 mutex_unlock(&device->state_mutex);
582 dasd_put_device(device);
586 * Enable devices with device numbers in [from..to].
588 static inline int _wait_for_device(struct dasd_device *device)
590 return (device->state == device->target);
593 void dasd_enable_device(struct dasd_device *device)
595 dasd_set_target_state(device, DASD_STATE_ONLINE);
596 if (device->state <= DASD_STATE_KNOWN)
597 /* No discipline for device found. */
598 dasd_set_target_state(device, DASD_STATE_NEW);
599 /* Now wait for the devices to come up. */
600 wait_event(dasd_init_waitq, _wait_for_device(device));
604 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
606 #ifdef CONFIG_DASD_PROFILE
608 struct dasd_profile_info_t dasd_global_profile;
609 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
612 * Increments counter in global and local profiling structures.
614 #define dasd_profile_counter(value, counter, block) \
617 for (index = 0; index < 31 && value >> (2+index); index++); \
618 dasd_global_profile.counter[index]++; \
619 block->profile.counter[index]++; \
623 * Add profiling information for cqr before execution.
625 static void dasd_profile_start(struct dasd_block *block,
626 struct dasd_ccw_req *cqr,
630 unsigned int counter;
632 if (dasd_profile_level != DASD_PROFILE_ON)
635 /* count the length of the chanq for statistics */
637 list_for_each(l, &block->ccw_queue)
640 dasd_global_profile.dasd_io_nr_req[counter]++;
641 block->profile.dasd_io_nr_req[counter]++;
645 * Add profiling information for cqr after execution.
647 static void dasd_profile_end(struct dasd_block *block,
648 struct dasd_ccw_req *cqr,
651 long strtime, irqtime, endtime, tottime; /* in microseconds */
652 long tottimeps, sectors;
654 if (dasd_profile_level != DASD_PROFILE_ON)
657 sectors = blk_rq_sectors(req);
658 if (!cqr->buildclk || !cqr->startclk ||
659 !cqr->stopclk || !cqr->endclk ||
663 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
664 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
665 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
666 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
667 tottimeps = tottime / sectors;
669 if (!dasd_global_profile.dasd_io_reqs)
670 memset(&dasd_global_profile, 0,
671 sizeof(struct dasd_profile_info_t));
672 dasd_global_profile.dasd_io_reqs++;
673 dasd_global_profile.dasd_io_sects += sectors;
675 if (!block->profile.dasd_io_reqs)
676 memset(&block->profile, 0,
677 sizeof(struct dasd_profile_info_t));
678 block->profile.dasd_io_reqs++;
679 block->profile.dasd_io_sects += sectors;
681 dasd_profile_counter(sectors, dasd_io_secs, block);
682 dasd_profile_counter(tottime, dasd_io_times, block);
683 dasd_profile_counter(tottimeps, dasd_io_timps, block);
684 dasd_profile_counter(strtime, dasd_io_time1, block);
685 dasd_profile_counter(irqtime, dasd_io_time2, block);
686 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
687 dasd_profile_counter(endtime, dasd_io_time3, block);
690 #define dasd_profile_start(block, cqr, req) do {} while (0)
691 #define dasd_profile_end(block, cqr, req) do {} while (0)
692 #endif /* CONFIG_DASD_PROFILE */
695 * Allocate memory for a channel program with 'cplength' channel
696 * command words and 'datasize' additional space. There are two
697 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
698 * memory and 2) dasd_smalloc_request uses the static ccw memory
699 * that gets allocated for each device.
701 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
703 struct dasd_device *device)
705 struct dasd_ccw_req *cqr;
708 BUG_ON(datasize > PAGE_SIZE ||
709 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
711 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
713 return ERR_PTR(-ENOMEM);
716 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
717 GFP_ATOMIC | GFP_DMA);
718 if (cqr->cpaddr == NULL) {
720 return ERR_PTR(-ENOMEM);
725 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
726 if (cqr->data == NULL) {
729 return ERR_PTR(-ENOMEM);
733 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
734 dasd_get_device(device);
738 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
740 struct dasd_device *device)
743 struct dasd_ccw_req *cqr;
748 BUG_ON(datasize > PAGE_SIZE ||
749 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
751 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
753 size += cplength * sizeof(struct ccw1);
756 spin_lock_irqsave(&device->mem_lock, flags);
757 cqr = (struct dasd_ccw_req *)
758 dasd_alloc_chunk(&device->ccw_chunks, size);
759 spin_unlock_irqrestore(&device->mem_lock, flags);
761 return ERR_PTR(-ENOMEM);
762 memset(cqr, 0, sizeof(struct dasd_ccw_req));
763 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
766 cqr->cpaddr = (struct ccw1 *) data;
767 data += cplength*sizeof(struct ccw1);
768 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
773 memset(cqr->data, 0, datasize);
776 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
777 dasd_get_device(device);
782 * Free memory of a channel program. This function needs to free all the
783 * idal lists that might have been created by dasd_set_cda and the
784 * struct dasd_ccw_req itself.
786 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
791 /* Clear any idals used for the request. */
794 clear_normalized_cda(ccw);
795 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
800 dasd_put_device(device);
803 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
807 spin_lock_irqsave(&device->mem_lock, flags);
808 dasd_free_chunk(&device->ccw_chunks, cqr);
809 spin_unlock_irqrestore(&device->mem_lock, flags);
810 dasd_put_device(device);
814 * Check discipline magic in cqr.
816 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
818 struct dasd_device *device;
822 device = cqr->startdev;
823 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
824 DBF_DEV_EVENT(DBF_WARNING, device,
825 " dasd_ccw_req 0x%08x magic doesn't match"
826 " discipline 0x%08x",
828 *(unsigned int *) device->discipline->name);
835 * Terminate the current i/o and set the request to clear_pending.
836 * Timer keeps device runnig.
837 * ccw_device_clear can fail if the i/o subsystem
840 int dasd_term_IO(struct dasd_ccw_req *cqr)
842 struct dasd_device *device;
844 char errorstring[ERRORLENGTH];
847 rc = dasd_check_cqr(cqr);
851 device = (struct dasd_device *) cqr->startdev;
852 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
853 rc = ccw_device_clear(device->cdev, (long) cqr);
855 case 0: /* termination successful */
857 cqr->status = DASD_CQR_CLEAR_PENDING;
858 cqr->stopclk = get_clock();
860 DBF_DEV_EVENT(DBF_DEBUG, device,
861 "terminate cqr %p successful",
865 DBF_DEV_EVENT(DBF_ERR, device, "%s",
866 "device gone, retry");
869 DBF_DEV_EVENT(DBF_ERR, device, "%s",
874 DBF_DEV_EVENT(DBF_ERR, device, "%s",
875 "device busy, retry later");
878 /* internal error 10 - unknown rc*/
879 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
880 dev_err(&device->cdev->dev, "An error occurred in the "
881 "DASD device driver, reason=%s\n", errorstring);
887 dasd_schedule_device_bh(device);
892 * Start the i/o. This start_IO can fail if the channel is really busy.
893 * In that case set up a timer to start the request later.
895 int dasd_start_IO(struct dasd_ccw_req *cqr)
897 struct dasd_device *device;
899 char errorstring[ERRORLENGTH];
902 rc = dasd_check_cqr(cqr);
907 device = (struct dasd_device *) cqr->startdev;
908 if (cqr->retries < 0) {
909 /* internal error 14 - start_IO run out of retries */
910 sprintf(errorstring, "14 %p", cqr);
911 dev_err(&device->cdev->dev, "An error occurred in the DASD "
912 "device driver, reason=%s\n", errorstring);
913 cqr->status = DASD_CQR_ERROR;
916 cqr->startclk = get_clock();
917 cqr->starttime = jiffies;
919 if (cqr->cpmode == 1) {
920 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
921 (long) cqr, cqr->lpm);
923 rc = ccw_device_start(device->cdev, cqr->cpaddr,
924 (long) cqr, cqr->lpm, 0);
928 cqr->status = DASD_CQR_IN_IO;
931 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
932 "start_IO: device busy, retry later");
935 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
936 "start_IO: request timeout, retry later");
939 /* -EACCES indicates that the request used only a
940 * subset of the available pathes and all these
942 * Do a retry with all available pathes.
944 cqr->lpm = LPM_ANYPATH;
945 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
946 "start_IO: selected pathes gone,"
947 " retry on all pathes");
950 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
951 "start_IO: -ENODEV device gone, retry");
954 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
955 "start_IO: -EIO device gone, retry");
958 /* most likely caused in power management context */
959 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
960 "start_IO: -EINVAL device currently "
964 /* internal error 11 - unknown rc */
965 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
966 dev_err(&device->cdev->dev,
967 "An error occurred in the DASD device driver, "
968 "reason=%s\n", errorstring);
977 * Timeout function for dasd devices. This is used for different purposes
978 * 1) missing interrupt handler for normal operation
979 * 2) delayed start of request where start_IO failed with -EBUSY
980 * 3) timeout for missing state change interrupts
981 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
982 * DASD_CQR_QUEUED for 2) and 3).
984 static void dasd_device_timeout(unsigned long ptr)
987 struct dasd_device *device;
989 device = (struct dasd_device *) ptr;
990 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
991 /* re-activate request queue */
992 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
993 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
994 dasd_schedule_device_bh(device);
998 * Setup timeout for a device in jiffies.
1000 void dasd_device_set_timer(struct dasd_device *device, int expires)
1003 del_timer(&device->timer);
1005 mod_timer(&device->timer, jiffies + expires);
1009 * Clear timeout for a device.
1011 void dasd_device_clear_timer(struct dasd_device *device)
1013 del_timer(&device->timer);
1016 static void dasd_handle_killed_request(struct ccw_device *cdev,
1017 unsigned long intparm)
1019 struct dasd_ccw_req *cqr;
1020 struct dasd_device *device;
1024 cqr = (struct dasd_ccw_req *) intparm;
1025 if (cqr->status != DASD_CQR_IN_IO) {
1026 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1027 "invalid status in handle_killed_request: "
1028 "%02x", cqr->status);
1032 device = dasd_device_from_cdev_locked(cdev);
1033 if (IS_ERR(device)) {
1034 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1035 "unable to get device from cdev");
1039 if (!cqr->startdev ||
1040 device != cqr->startdev ||
1041 strncmp(cqr->startdev->discipline->ebcname,
1042 (char *) &cqr->magic, 4)) {
1043 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1044 "invalid device in request");
1045 dasd_put_device(device);
1049 /* Schedule request to be retried. */
1050 cqr->status = DASD_CQR_QUEUED;
1052 dasd_device_clear_timer(device);
1053 dasd_schedule_device_bh(device);
1054 dasd_put_device(device);
1057 void dasd_generic_handle_state_change(struct dasd_device *device)
1059 /* First of all start sense subsystem status request. */
1060 dasd_eer_snss(device);
1062 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1063 dasd_schedule_device_bh(device);
1065 dasd_schedule_block_bh(device->block);
1069 * Interrupt handler for "normal" ssch-io based dasd devices.
1071 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1074 struct dasd_ccw_req *cqr, *next;
1075 struct dasd_device *device;
1076 unsigned long long now;
1080 switch (PTR_ERR(irb)) {
1084 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1085 "request timed out\n", __func__);
1088 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1089 "unknown error %ld\n", __func__,
1092 dasd_handle_killed_request(cdev, intparm);
1098 /* check for unsolicited interrupts */
1099 cqr = (struct dasd_ccw_req *) intparm;
1100 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1101 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1102 ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) ||
1103 (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND |
1104 SCSW_STCTL_ALERT_STATUS))))) {
1105 if (cqr && cqr->status == DASD_CQR_IN_IO)
1106 cqr->status = DASD_CQR_QUEUED;
1108 memcpy(&cqr->irb, irb, sizeof(*irb));
1109 device = dasd_device_from_cdev_locked(cdev);
1110 if (!IS_ERR(device)) {
1111 device->discipline->dump_sense_dbf(device, irb,
1113 if ((device->features & DASD_FEATURE_ERPLOG))
1114 device->discipline->dump_sense(device, cqr,
1116 dasd_device_clear_timer(device);
1117 device->discipline->handle_unsolicited_interrupt(device,
1119 dasd_put_device(device);
1124 device = (struct dasd_device *) cqr->startdev;
1126 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1127 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1128 "invalid device in request");
1132 /* Check for clear pending */
1133 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1134 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1135 cqr->status = DASD_CQR_CLEARED;
1136 dasd_device_clear_timer(device);
1137 wake_up(&dasd_flush_wq);
1138 dasd_schedule_device_bh(device);
1142 /* check status - the request might have been killed by dyn detach */
1143 if (cqr->status != DASD_CQR_IN_IO) {
1144 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1145 "status %02x", dev_name(&cdev->dev), cqr->status);
1151 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1152 scsw_cstat(&irb->scsw) == 0) {
1153 /* request was completed successfully */
1154 cqr->status = DASD_CQR_SUCCESS;
1156 /* Start first request on queue if possible -> fast_io. */
1157 if (cqr->devlist.next != &device->ccw_queue) {
1158 next = list_entry(cqr->devlist.next,
1159 struct dasd_ccw_req, devlist);
1161 } else { /* error */
1162 memcpy(&cqr->irb, irb, sizeof(struct irb));
1163 /* log sense for every failed I/O to s390 debugfeature */
1164 dasd_log_sense_dbf(cqr, irb);
1165 if (device->features & DASD_FEATURE_ERPLOG) {
1166 dasd_log_sense(cqr, irb);
1170 * If we don't want complex ERP for this request, then just
1171 * reset this and retry it in the fastpath
1173 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1175 if (cqr->lpm == LPM_ANYPATH)
1176 DBF_DEV_EVENT(DBF_DEBUG, device,
1177 "default ERP in fastpath "
1178 "(%i retries left)",
1180 cqr->lpm = LPM_ANYPATH;
1181 cqr->status = DASD_CQR_QUEUED;
1184 cqr->status = DASD_CQR_ERROR;
1186 if (next && (next->status == DASD_CQR_QUEUED) &&
1187 (!device->stopped)) {
1188 if (device->discipline->start_IO(next) == 0)
1189 expires = next->expires;
1192 dasd_device_set_timer(device, expires);
1194 dasd_device_clear_timer(device);
1195 dasd_schedule_device_bh(device);
1198 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1200 struct dasd_device *device;
1202 device = dasd_device_from_cdev_locked(cdev);
1206 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1207 device->state != device->target ||
1208 !device->discipline->handle_unsolicited_interrupt){
1209 dasd_put_device(device);
1213 dasd_device_clear_timer(device);
1214 device->discipline->handle_unsolicited_interrupt(device, irb);
1215 dasd_put_device(device);
1217 return UC_TODO_RETRY;
1219 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1222 * If we have an error on a dasd_block layer request then we cancel
1223 * and return all further requests from the same dasd_block as well.
1225 static void __dasd_device_recovery(struct dasd_device *device,
1226 struct dasd_ccw_req *ref_cqr)
1228 struct list_head *l, *n;
1229 struct dasd_ccw_req *cqr;
1232 * only requeue request that came from the dasd_block layer
1234 if (!ref_cqr->block)
1237 list_for_each_safe(l, n, &device->ccw_queue) {
1238 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1239 if (cqr->status == DASD_CQR_QUEUED &&
1240 ref_cqr->block == cqr->block) {
1241 cqr->status = DASD_CQR_CLEARED;
1247 * Remove those ccw requests from the queue that need to be returned
1248 * to the upper layer.
1250 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1251 struct list_head *final_queue)
1253 struct list_head *l, *n;
1254 struct dasd_ccw_req *cqr;
1256 /* Process request with final status. */
1257 list_for_each_safe(l, n, &device->ccw_queue) {
1258 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1260 /* Stop list processing at the first non-final request. */
1261 if (cqr->status == DASD_CQR_QUEUED ||
1262 cqr->status == DASD_CQR_IN_IO ||
1263 cqr->status == DASD_CQR_CLEAR_PENDING)
1265 if (cqr->status == DASD_CQR_ERROR) {
1266 __dasd_device_recovery(device, cqr);
1268 /* Rechain finished requests to final queue */
1269 list_move_tail(&cqr->devlist, final_queue);
1274 * the cqrs from the final queue are returned to the upper layer
1275 * by setting a dasd_block state and calling the callback function
1277 static void __dasd_device_process_final_queue(struct dasd_device *device,
1278 struct list_head *final_queue)
1280 struct list_head *l, *n;
1281 struct dasd_ccw_req *cqr;
1282 struct dasd_block *block;
1283 void (*callback)(struct dasd_ccw_req *, void *data);
1284 void *callback_data;
1285 char errorstring[ERRORLENGTH];
1287 list_for_each_safe(l, n, final_queue) {
1288 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1289 list_del_init(&cqr->devlist);
1291 callback = cqr->callback;
1292 callback_data = cqr->callback_data;
1294 spin_lock_bh(&block->queue_lock);
1295 switch (cqr->status) {
1296 case DASD_CQR_SUCCESS:
1297 cqr->status = DASD_CQR_DONE;
1299 case DASD_CQR_ERROR:
1300 cqr->status = DASD_CQR_NEED_ERP;
1302 case DASD_CQR_CLEARED:
1303 cqr->status = DASD_CQR_TERMINATED;
1306 /* internal error 12 - wrong cqr status*/
1307 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1308 dev_err(&device->cdev->dev,
1309 "An error occurred in the DASD device driver, "
1310 "reason=%s\n", errorstring);
1313 if (cqr->callback != NULL)
1314 (callback)(cqr, callback_data);
1316 spin_unlock_bh(&block->queue_lock);
1321 * Take a look at the first request on the ccw queue and check
1322 * if it reached its expire time. If so, terminate the IO.
1324 static void __dasd_device_check_expire(struct dasd_device *device)
1326 struct dasd_ccw_req *cqr;
1328 if (list_empty(&device->ccw_queue))
1330 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1331 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1332 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1333 if (device->discipline->term_IO(cqr) != 0) {
1334 /* Hmpf, try again in 5 sec */
1335 dev_err(&device->cdev->dev,
1336 "cqr %p timed out (%lus) but cannot be "
1337 "ended, retrying in 5 s\n",
1338 cqr, (cqr->expires/HZ));
1339 cqr->expires += 5*HZ;
1340 dasd_device_set_timer(device, 5*HZ);
1342 dev_err(&device->cdev->dev,
1343 "cqr %p timed out (%lus), %i retries "
1344 "remaining\n", cqr, (cqr->expires/HZ),
1351 * Take a look at the first request on the ccw queue and check
1352 * if it needs to be started.
1354 static void __dasd_device_start_head(struct dasd_device *device)
1356 struct dasd_ccw_req *cqr;
1359 if (list_empty(&device->ccw_queue))
1361 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1362 if (cqr->status != DASD_CQR_QUEUED)
1364 /* when device is stopped, return request to previous layer */
1365 if (device->stopped) {
1366 cqr->status = DASD_CQR_CLEARED;
1367 dasd_schedule_device_bh(device);
1371 rc = device->discipline->start_IO(cqr);
1373 dasd_device_set_timer(device, cqr->expires);
1374 else if (rc == -EACCES) {
1375 dasd_schedule_device_bh(device);
1377 /* Hmpf, try again in 1/2 sec */
1378 dasd_device_set_timer(device, 50);
1382 * Go through all request on the dasd_device request queue,
1383 * terminate them on the cdev if necessary, and return them to the
1384 * submitting layer via callback.
1386 * Make sure that all 'submitting layers' still exist when
1387 * this function is called!. In other words, when 'device' is a base
1388 * device then all block layer requests must have been removed before
1389 * via dasd_flush_block_queue.
1391 int dasd_flush_device_queue(struct dasd_device *device)
1393 struct dasd_ccw_req *cqr, *n;
1395 struct list_head flush_queue;
1397 INIT_LIST_HEAD(&flush_queue);
1398 spin_lock_irq(get_ccwdev_lock(device->cdev));
1400 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1401 /* Check status and move request to flush_queue */
1402 switch (cqr->status) {
1403 case DASD_CQR_IN_IO:
1404 rc = device->discipline->term_IO(cqr);
1406 /* unable to terminate requeust */
1407 dev_err(&device->cdev->dev,
1408 "Flushing the DASD request queue "
1409 "failed for request %p\n", cqr);
1410 /* stop flush processing */
1414 case DASD_CQR_QUEUED:
1415 cqr->stopclk = get_clock();
1416 cqr->status = DASD_CQR_CLEARED;
1418 default: /* no need to modify the others */
1421 list_move_tail(&cqr->devlist, &flush_queue);
1424 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1426 * After this point all requests must be in state CLEAR_PENDING,
1427 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1428 * one of the others.
1430 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1431 wait_event(dasd_flush_wq,
1432 (cqr->status != DASD_CQR_CLEAR_PENDING));
1434 * Now set each request back to TERMINATED, DONE or NEED_ERP
1435 * and call the callback function of flushed requests
1437 __dasd_device_process_final_queue(device, &flush_queue);
1442 * Acquire the device lock and process queues for the device.
1444 static void dasd_device_tasklet(struct dasd_device *device)
1446 struct list_head final_queue;
1448 atomic_set (&device->tasklet_scheduled, 0);
1449 INIT_LIST_HEAD(&final_queue);
1450 spin_lock_irq(get_ccwdev_lock(device->cdev));
1451 /* Check expire time of first request on the ccw queue. */
1452 __dasd_device_check_expire(device);
1453 /* find final requests on ccw queue */
1454 __dasd_device_process_ccw_queue(device, &final_queue);
1455 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1456 /* Now call the callback function of requests with final status */
1457 __dasd_device_process_final_queue(device, &final_queue);
1458 spin_lock_irq(get_ccwdev_lock(device->cdev));
1459 /* Now check if the head of the ccw queue needs to be started. */
1460 __dasd_device_start_head(device);
1461 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1462 dasd_put_device(device);
1466 * Schedules a call to dasd_tasklet over the device tasklet.
1468 void dasd_schedule_device_bh(struct dasd_device *device)
1470 /* Protect against rescheduling. */
1471 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1473 dasd_get_device(device);
1474 tasklet_hi_schedule(&device->tasklet);
1477 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1479 device->stopped |= bits;
1481 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1483 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1485 device->stopped &= ~bits;
1486 if (!device->stopped)
1487 wake_up(&generic_waitq);
1489 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1492 * Queue a request to the head of the device ccw_queue.
1493 * Start the I/O if possible.
1495 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1497 struct dasd_device *device;
1498 unsigned long flags;
1500 device = cqr->startdev;
1501 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1502 cqr->status = DASD_CQR_QUEUED;
1503 list_add(&cqr->devlist, &device->ccw_queue);
1504 /* let the bh start the request to keep them in order */
1505 dasd_schedule_device_bh(device);
1506 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1510 * Queue a request to the tail of the device ccw_queue.
1511 * Start the I/O if possible.
1513 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1515 struct dasd_device *device;
1516 unsigned long flags;
1518 device = cqr->startdev;
1519 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1520 cqr->status = DASD_CQR_QUEUED;
1521 list_add_tail(&cqr->devlist, &device->ccw_queue);
1522 /* let the bh start the request to keep them in order */
1523 dasd_schedule_device_bh(device);
1524 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1528 * Wakeup helper for the 'sleep_on' functions.
1530 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1532 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1533 cqr->callback_data = DASD_SLEEPON_END_TAG;
1534 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1535 wake_up(&generic_waitq);
1538 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1540 struct dasd_device *device;
1543 device = cqr->startdev;
1544 spin_lock_irq(get_ccwdev_lock(device->cdev));
1545 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1546 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1551 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1553 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1555 struct dasd_device *device;
1556 dasd_erp_fn_t erp_fn;
1558 if (cqr->status == DASD_CQR_FILLED)
1560 device = cqr->startdev;
1561 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1562 if (cqr->status == DASD_CQR_TERMINATED) {
1563 device->discipline->handle_terminated_request(cqr);
1566 if (cqr->status == DASD_CQR_NEED_ERP) {
1567 erp_fn = device->discipline->erp_action(cqr);
1571 if (cqr->status == DASD_CQR_FAILED)
1572 dasd_log_sense(cqr, &cqr->irb);
1574 __dasd_process_erp(device, cqr);
1581 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1583 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1584 if (cqr->refers) /* erp is not done yet */
1586 return ((cqr->status != DASD_CQR_DONE) &&
1587 (cqr->status != DASD_CQR_FAILED));
1589 return (cqr->status == DASD_CQR_FILLED);
1592 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1594 struct dasd_device *device;
1596 struct list_head ccw_queue;
1597 struct dasd_ccw_req *cqr;
1599 INIT_LIST_HEAD(&ccw_queue);
1600 maincqr->status = DASD_CQR_FILLED;
1601 device = maincqr->startdev;
1602 list_add(&maincqr->blocklist, &ccw_queue);
1603 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1604 cqr = list_first_entry(&ccw_queue,
1605 struct dasd_ccw_req, blocklist)) {
1607 if (__dasd_sleep_on_erp(cqr))
1609 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1612 /* Non-temporary stop condition will trigger fail fast */
1613 if (device->stopped & ~DASD_STOPPED_PENDING &&
1614 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1615 (!dasd_eer_enabled(device))) {
1616 cqr->status = DASD_CQR_FAILED;
1620 /* Don't try to start requests if device is stopped */
1621 if (interruptible) {
1622 rc = wait_event_interruptible(
1623 generic_waitq, !(device->stopped));
1624 if (rc == -ERESTARTSYS) {
1625 cqr->status = DASD_CQR_FAILED;
1626 maincqr->intrc = rc;
1630 wait_event(generic_waitq, !(device->stopped));
1632 cqr->callback = dasd_wakeup_cb;
1633 cqr->callback_data = DASD_SLEEPON_START_TAG;
1634 dasd_add_request_tail(cqr);
1635 if (interruptible) {
1636 rc = wait_event_interruptible(
1637 generic_waitq, _wait_for_wakeup(cqr));
1638 if (rc == -ERESTARTSYS) {
1639 dasd_cancel_req(cqr);
1640 /* wait (non-interruptible) for final status */
1641 wait_event(generic_waitq,
1642 _wait_for_wakeup(cqr));
1643 cqr->status = DASD_CQR_FAILED;
1644 maincqr->intrc = rc;
1648 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1651 maincqr->endclk = get_clock();
1652 if ((maincqr->status != DASD_CQR_DONE) &&
1653 (maincqr->intrc != -ERESTARTSYS))
1654 dasd_log_sense(maincqr, &maincqr->irb);
1655 if (maincqr->status == DASD_CQR_DONE)
1657 else if (maincqr->intrc)
1658 rc = maincqr->intrc;
1665 * Queue a request to the tail of the device ccw_queue and wait for
1668 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1670 return _dasd_sleep_on(cqr, 0);
1674 * Queue a request to the tail of the device ccw_queue and wait
1675 * interruptible for it's completion.
1677 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1679 return _dasd_sleep_on(cqr, 1);
1683 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1684 * for eckd devices) the currently running request has to be terminated
1685 * and be put back to status queued, before the special request is added
1686 * to the head of the queue. Then the special request is waited on normally.
1688 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1690 struct dasd_ccw_req *cqr;
1692 if (list_empty(&device->ccw_queue))
1694 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1695 return device->discipline->term_IO(cqr);
1698 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1700 struct dasd_device *device;
1703 device = cqr->startdev;
1704 spin_lock_irq(get_ccwdev_lock(device->cdev));
1705 rc = _dasd_term_running_cqr(device);
1707 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1711 cqr->callback = dasd_wakeup_cb;
1712 cqr->callback_data = DASD_SLEEPON_START_TAG;
1713 cqr->status = DASD_CQR_QUEUED;
1714 list_add(&cqr->devlist, &device->ccw_queue);
1716 /* let the bh start the request to keep them in order */
1717 dasd_schedule_device_bh(device);
1719 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1721 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1723 if (cqr->status == DASD_CQR_DONE)
1725 else if (cqr->intrc)
1733 * Cancels a request that was started with dasd_sleep_on_req.
1734 * This is useful to timeout requests. The request will be
1735 * terminated if it is currently in i/o.
1736 * Returns 1 if the request has been terminated.
1737 * 0 if there was no need to terminate the request (not started yet)
1738 * negative error code if termination failed
1739 * Cancellation of a request is an asynchronous operation! The calling
1740 * function has to wait until the request is properly returned via callback.
1742 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1744 struct dasd_device *device = cqr->startdev;
1745 unsigned long flags;
1749 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1750 switch (cqr->status) {
1751 case DASD_CQR_QUEUED:
1752 /* request was not started - just set to cleared */
1753 cqr->status = DASD_CQR_CLEARED;
1755 case DASD_CQR_IN_IO:
1756 /* request in IO - terminate IO and release again */
1757 rc = device->discipline->term_IO(cqr);
1759 dev_err(&device->cdev->dev,
1760 "Cancelling request %p failed with rc=%d\n",
1763 cqr->stopclk = get_clock();
1766 default: /* already finished or clear pending - do nothing */
1769 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1770 dasd_schedule_device_bh(device);
1776 * SECTION: Operations of the dasd_block layer.
1780 * Timeout function for dasd_block. This is used when the block layer
1781 * is waiting for something that may not come reliably, (e.g. a state
1784 static void dasd_block_timeout(unsigned long ptr)
1786 unsigned long flags;
1787 struct dasd_block *block;
1789 block = (struct dasd_block *) ptr;
1790 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1791 /* re-activate request queue */
1792 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1793 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1794 dasd_schedule_block_bh(block);
1798 * Setup timeout for a dasd_block in jiffies.
1800 void dasd_block_set_timer(struct dasd_block *block, int expires)
1803 del_timer(&block->timer);
1805 mod_timer(&block->timer, jiffies + expires);
1809 * Clear timeout for a dasd_block.
1811 void dasd_block_clear_timer(struct dasd_block *block)
1813 del_timer(&block->timer);
1817 * Process finished error recovery ccw.
1819 static void __dasd_process_erp(struct dasd_device *device,
1820 struct dasd_ccw_req *cqr)
1822 dasd_erp_fn_t erp_fn;
1824 if (cqr->status == DASD_CQR_DONE)
1825 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1827 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1828 erp_fn = device->discipline->erp_postaction(cqr);
1833 * Fetch requests from the block device queue.
1835 static void __dasd_process_request_queue(struct dasd_block *block)
1837 struct request_queue *queue;
1838 struct request *req;
1839 struct dasd_ccw_req *cqr;
1840 struct dasd_device *basedev;
1841 unsigned long flags;
1842 queue = block->request_queue;
1843 basedev = block->base;
1844 /* No queue ? Then there is nothing to do. */
1849 * We requeue request from the block device queue to the ccw
1850 * queue only in two states. In state DASD_STATE_READY the
1851 * partition detection is done and we need to requeue requests
1852 * for that. State DASD_STATE_ONLINE is normal block device
1855 if (basedev->state < DASD_STATE_READY) {
1856 while ((req = blk_fetch_request(block->request_queue)))
1857 __blk_end_request_all(req, -EIO);
1860 /* Now we try to fetch requests from the request queue */
1861 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1862 if (basedev->features & DASD_FEATURE_READONLY &&
1863 rq_data_dir(req) == WRITE) {
1864 DBF_DEV_EVENT(DBF_ERR, basedev,
1865 "Rejecting write request %p",
1867 blk_start_request(req);
1868 __blk_end_request_all(req, -EIO);
1871 cqr = basedev->discipline->build_cp(basedev, block, req);
1873 if (PTR_ERR(cqr) == -EBUSY)
1874 break; /* normal end condition */
1875 if (PTR_ERR(cqr) == -ENOMEM)
1876 break; /* terminate request queue loop */
1877 if (PTR_ERR(cqr) == -EAGAIN) {
1879 * The current request cannot be build right
1880 * now, we have to try later. If this request
1881 * is the head-of-queue we stop the device
1884 if (!list_empty(&block->ccw_queue))
1887 get_ccwdev_lock(basedev->cdev), flags);
1888 dasd_device_set_stop_bits(basedev,
1889 DASD_STOPPED_PENDING);
1890 spin_unlock_irqrestore(
1891 get_ccwdev_lock(basedev->cdev), flags);
1892 dasd_block_set_timer(block, HZ/2);
1895 DBF_DEV_EVENT(DBF_ERR, basedev,
1896 "CCW creation failed (rc=%ld) "
1899 blk_start_request(req);
1900 __blk_end_request_all(req, -EIO);
1904 * Note: callback is set to dasd_return_cqr_cb in
1905 * __dasd_block_start_head to cover erp requests as well
1907 cqr->callback_data = (void *) req;
1908 cqr->status = DASD_CQR_FILLED;
1909 blk_start_request(req);
1910 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1911 dasd_profile_start(block, cqr, req);
1915 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1917 struct request *req;
1921 req = (struct request *) cqr->callback_data;
1922 dasd_profile_end(cqr->block, cqr, req);
1923 status = cqr->block->base->discipline->free_cp(cqr, req);
1925 error = status ? status : -EIO;
1926 __blk_end_request_all(req, error);
1930 * Process ccw request queue.
1932 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1933 struct list_head *final_queue)
1935 struct list_head *l, *n;
1936 struct dasd_ccw_req *cqr;
1937 dasd_erp_fn_t erp_fn;
1938 unsigned long flags;
1939 struct dasd_device *base = block->base;
1942 /* Process request with final status. */
1943 list_for_each_safe(l, n, &block->ccw_queue) {
1944 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1945 if (cqr->status != DASD_CQR_DONE &&
1946 cqr->status != DASD_CQR_FAILED &&
1947 cqr->status != DASD_CQR_NEED_ERP &&
1948 cqr->status != DASD_CQR_TERMINATED)
1951 if (cqr->status == DASD_CQR_TERMINATED) {
1952 base->discipline->handle_terminated_request(cqr);
1956 /* Process requests that may be recovered */
1957 if (cqr->status == DASD_CQR_NEED_ERP) {
1958 erp_fn = base->discipline->erp_action(cqr);
1959 if (IS_ERR(erp_fn(cqr)))
1964 /* log sense for fatal error */
1965 if (cqr->status == DASD_CQR_FAILED) {
1966 dasd_log_sense(cqr, &cqr->irb);
1969 /* First of all call extended error reporting. */
1970 if (dasd_eer_enabled(base) &&
1971 cqr->status == DASD_CQR_FAILED) {
1972 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1974 /* restart request */
1975 cqr->status = DASD_CQR_FILLED;
1977 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1978 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1979 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1984 /* Process finished ERP request. */
1986 __dasd_process_erp(base, cqr);
1990 /* Rechain finished requests to final queue */
1991 cqr->endclk = get_clock();
1992 list_move_tail(&cqr->blocklist, final_queue);
1996 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1998 dasd_schedule_block_bh(cqr->block);
2001 static void __dasd_block_start_head(struct dasd_block *block)
2003 struct dasd_ccw_req *cqr;
2005 if (list_empty(&block->ccw_queue))
2007 /* We allways begin with the first requests on the queue, as some
2008 * of previously started requests have to be enqueued on a
2009 * dasd_device again for error recovery.
2011 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2012 if (cqr->status != DASD_CQR_FILLED)
2014 /* Non-temporary stop condition will trigger fail fast */
2015 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2016 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2017 (!dasd_eer_enabled(block->base))) {
2018 cqr->status = DASD_CQR_FAILED;
2019 dasd_schedule_block_bh(block);
2022 /* Don't try to start requests if device is stopped */
2023 if (block->base->stopped)
2026 /* just a fail safe check, should not happen */
2028 cqr->startdev = block->base;
2030 /* make sure that the requests we submit find their way back */
2031 cqr->callback = dasd_return_cqr_cb;
2033 dasd_add_request_tail(cqr);
2038 * Central dasd_block layer routine. Takes requests from the generic
2039 * block layer request queue, creates ccw requests, enqueues them on
2040 * a dasd_device and processes ccw requests that have been returned.
2042 static void dasd_block_tasklet(struct dasd_block *block)
2044 struct list_head final_queue;
2045 struct list_head *l, *n;
2046 struct dasd_ccw_req *cqr;
2048 atomic_set(&block->tasklet_scheduled, 0);
2049 INIT_LIST_HEAD(&final_queue);
2050 spin_lock(&block->queue_lock);
2051 /* Finish off requests on ccw queue */
2052 __dasd_process_block_ccw_queue(block, &final_queue);
2053 spin_unlock(&block->queue_lock);
2054 /* Now call the callback function of requests with final status */
2055 spin_lock_irq(&block->request_queue_lock);
2056 list_for_each_safe(l, n, &final_queue) {
2057 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2058 list_del_init(&cqr->blocklist);
2059 __dasd_cleanup_cqr(cqr);
2061 spin_lock(&block->queue_lock);
2062 /* Get new request from the block device request queue */
2063 __dasd_process_request_queue(block);
2064 /* Now check if the head of the ccw queue needs to be started. */
2065 __dasd_block_start_head(block);
2066 spin_unlock(&block->queue_lock);
2067 spin_unlock_irq(&block->request_queue_lock);
2068 dasd_put_device(block->base);
2071 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2073 wake_up(&dasd_flush_wq);
2077 * Go through all request on the dasd_block request queue, cancel them
2078 * on the respective dasd_device, and return them to the generic
2081 static int dasd_flush_block_queue(struct dasd_block *block)
2083 struct dasd_ccw_req *cqr, *n;
2085 struct list_head flush_queue;
2087 INIT_LIST_HEAD(&flush_queue);
2088 spin_lock_bh(&block->queue_lock);
2091 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2092 /* if this request currently owned by a dasd_device cancel it */
2093 if (cqr->status >= DASD_CQR_QUEUED)
2094 rc = dasd_cancel_req(cqr);
2097 /* Rechain request (including erp chain) so it won't be
2098 * touched by the dasd_block_tasklet anymore.
2099 * Replace the callback so we notice when the request
2100 * is returned from the dasd_device layer.
2102 cqr->callback = _dasd_wake_block_flush_cb;
2103 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2104 list_move_tail(&cqr->blocklist, &flush_queue);
2106 /* moved more than one request - need to restart */
2109 spin_unlock_bh(&block->queue_lock);
2110 /* Now call the callback function of flushed requests */
2112 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2113 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2114 /* Process finished ERP request. */
2116 spin_lock_bh(&block->queue_lock);
2117 __dasd_process_erp(block->base, cqr);
2118 spin_unlock_bh(&block->queue_lock);
2119 /* restart list_for_xx loop since dasd_process_erp
2120 * might remove multiple elements */
2123 /* call the callback function */
2124 spin_lock_irq(&block->request_queue_lock);
2125 cqr->endclk = get_clock();
2126 list_del_init(&cqr->blocklist);
2127 __dasd_cleanup_cqr(cqr);
2128 spin_unlock_irq(&block->request_queue_lock);
2134 * Schedules a call to dasd_tasklet over the device tasklet.
2136 void dasd_schedule_block_bh(struct dasd_block *block)
2138 /* Protect against rescheduling. */
2139 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2141 /* life cycle of block is bound to it's base device */
2142 dasd_get_device(block->base);
2143 tasklet_hi_schedule(&block->tasklet);
2148 * SECTION: external block device operations
2149 * (request queue handling, open, release, etc.)
2153 * Dasd request queue function. Called from ll_rw_blk.c
2155 static void do_dasd_request(struct request_queue *queue)
2157 struct dasd_block *block;
2159 block = queue->queuedata;
2160 spin_lock(&block->queue_lock);
2161 /* Get new request from the block device request queue */
2162 __dasd_process_request_queue(block);
2163 /* Now check if the head of the ccw queue needs to be started. */
2164 __dasd_block_start_head(block);
2165 spin_unlock(&block->queue_lock);
2169 * Allocate and initialize request queue and default I/O scheduler.
2171 static int dasd_alloc_queue(struct dasd_block *block)
2175 block->request_queue = blk_init_queue(do_dasd_request,
2176 &block->request_queue_lock);
2177 if (block->request_queue == NULL)
2180 block->request_queue->queuedata = block;
2182 elevator_exit(block->request_queue->elevator);
2183 block->request_queue->elevator = NULL;
2184 rc = elevator_init(block->request_queue, "deadline");
2186 blk_cleanup_queue(block->request_queue);
2193 * Allocate and initialize request queue.
2195 static void dasd_setup_queue(struct dasd_block *block)
2199 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2200 max = block->base->discipline->max_blocks << block->s2b_shift;
2201 blk_queue_max_hw_sectors(block->request_queue, max);
2202 blk_queue_max_segments(block->request_queue, -1L);
2203 /* with page sized segments we can translate each segement into
2206 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2207 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2211 * Deactivate and free request queue.
2213 static void dasd_free_queue(struct dasd_block *block)
2215 if (block->request_queue) {
2216 blk_cleanup_queue(block->request_queue);
2217 block->request_queue = NULL;
2222 * Flush request on the request queue.
2224 static void dasd_flush_request_queue(struct dasd_block *block)
2226 struct request *req;
2228 if (!block->request_queue)
2231 spin_lock_irq(&block->request_queue_lock);
2232 while ((req = blk_fetch_request(block->request_queue)))
2233 __blk_end_request_all(req, -EIO);
2234 spin_unlock_irq(&block->request_queue_lock);
2237 static int dasd_open(struct block_device *bdev, fmode_t mode)
2239 struct dasd_block *block = bdev->bd_disk->private_data;
2240 struct dasd_device *base;
2247 atomic_inc(&block->open_count);
2248 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2253 if (!try_module_get(base->discipline->owner)) {
2258 if (dasd_probeonly) {
2259 dev_info(&base->cdev->dev,
2260 "Accessing the DASD failed because it is in "
2261 "probeonly mode\n");
2266 if (base->state <= DASD_STATE_BASIC) {
2267 DBF_DEV_EVENT(DBF_ERR, base, " %s",
2268 " Cannot open unrecognized device");
2273 if ((mode & FMODE_WRITE) &&
2274 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
2275 (base->features & DASD_FEATURE_READONLY))) {
2283 module_put(base->discipline->owner);
2285 atomic_dec(&block->open_count);
2289 static int dasd_release(struct gendisk *disk, fmode_t mode)
2291 struct dasd_block *block = disk->private_data;
2293 atomic_dec(&block->open_count);
2294 module_put(block->base->discipline->owner);
2299 * Return disk geometry.
2301 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2303 struct dasd_block *block;
2304 struct dasd_device *base;
2306 block = bdev->bd_disk->private_data;
2311 if (!base->discipline ||
2312 !base->discipline->fill_geometry)
2315 base->discipline->fill_geometry(block, geo);
2316 geo->start = get_start_sect(bdev) >> block->s2b_shift;
2320 const struct block_device_operations
2321 dasd_device_operations = {
2322 .owner = THIS_MODULE,
2324 .release = dasd_release,
2325 .ioctl = dasd_ioctl,
2326 .compat_ioctl = dasd_ioctl,
2327 .getgeo = dasd_getgeo,
2330 /*******************************************************************************
2331 * end of block device operations
2337 #ifdef CONFIG_PROC_FS
2341 if (dasd_page_cache != NULL) {
2342 kmem_cache_destroy(dasd_page_cache);
2343 dasd_page_cache = NULL;
2345 dasd_gendisk_exit();
2347 if (dasd_debug_area != NULL) {
2348 debug_unregister(dasd_debug_area);
2349 dasd_debug_area = NULL;
2354 * SECTION: common functions for ccw_driver use
2358 * Is the device read-only?
2359 * Note that this function does not report the setting of the
2360 * readonly device attribute, but how it is configured in z/VM.
2362 int dasd_device_is_ro(struct dasd_device *device)
2364 struct ccw_dev_id dev_id;
2365 struct diag210 diag_data;
2370 ccw_device_get_id(device->cdev, &dev_id);
2371 memset(&diag_data, 0, sizeof(diag_data));
2372 diag_data.vrdcdvno = dev_id.devno;
2373 diag_data.vrdclen = sizeof(diag_data);
2374 rc = diag210(&diag_data);
2375 if (rc == 0 || rc == 2) {
2376 return diag_data.vrdcvfla & 0x80;
2378 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
2383 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
2385 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2387 struct ccw_device *cdev = data;
2390 ret = ccw_device_set_online(cdev);
2392 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2393 dev_name(&cdev->dev), ret);
2397 * Initial attempt at a probe function. this can be simplified once
2398 * the other detection code is gone.
2400 int dasd_generic_probe(struct ccw_device *cdev,
2401 struct dasd_discipline *discipline)
2405 ret = dasd_add_sysfs_files(cdev);
2407 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2408 "dasd_generic_probe: could not add "
2412 cdev->handler = &dasd_int_handler;
2415 * Automatically online either all dasd devices (dasd_autodetect)
2416 * or all devices specified with dasd= parameters during
2419 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2420 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2421 async_schedule(dasd_generic_auto_online, cdev);
2426 * This will one day be called from a global not_oper handler.
2427 * It is also used by driver_unregister during module unload.
2429 void dasd_generic_remove(struct ccw_device *cdev)
2431 struct dasd_device *device;
2432 struct dasd_block *block;
2434 cdev->handler = NULL;
2436 dasd_remove_sysfs_files(cdev);
2437 device = dasd_device_from_cdev(cdev);
2440 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2441 /* Already doing offline processing */
2442 dasd_put_device(device);
2446 * This device is removed unconditionally. Set offline
2447 * flag to prevent dasd_open from opening it while it is
2448 * no quite down yet.
2450 dasd_set_target_state(device, DASD_STATE_NEW);
2451 /* dasd_delete_device destroys the device reference. */
2452 block = device->block;
2453 device->block = NULL;
2454 dasd_delete_device(device);
2456 * life cycle of block is bound to device, so delete it after
2457 * device was safely removed
2460 dasd_free_block(block);
2464 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2465 * the device is detected for the first time and is supposed to be used
2466 * or the user has started activation through sysfs.
2468 int dasd_generic_set_online(struct ccw_device *cdev,
2469 struct dasd_discipline *base_discipline)
2471 struct dasd_discipline *discipline;
2472 struct dasd_device *device;
2475 /* first online clears initial online feature flag */
2476 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2477 device = dasd_create_device(cdev);
2479 return PTR_ERR(device);
2481 discipline = base_discipline;
2482 if (device->features & DASD_FEATURE_USEDIAG) {
2483 if (!dasd_diag_discipline_pointer) {
2484 pr_warning("%s Setting the DASD online failed because "
2485 "of missing DIAG discipline\n",
2486 dev_name(&cdev->dev));
2487 dasd_delete_device(device);
2490 discipline = dasd_diag_discipline_pointer;
2492 if (!try_module_get(base_discipline->owner)) {
2493 dasd_delete_device(device);
2496 if (!try_module_get(discipline->owner)) {
2497 module_put(base_discipline->owner);
2498 dasd_delete_device(device);
2501 device->base_discipline = base_discipline;
2502 device->discipline = discipline;
2504 /* check_device will allocate block device if necessary */
2505 rc = discipline->check_device(device);
2507 pr_warning("%s Setting the DASD online with discipline %s "
2508 "failed with rc=%i\n",
2509 dev_name(&cdev->dev), discipline->name, rc);
2510 module_put(discipline->owner);
2511 module_put(base_discipline->owner);
2512 dasd_delete_device(device);
2516 dasd_set_target_state(device, DASD_STATE_ONLINE);
2517 if (device->state <= DASD_STATE_KNOWN) {
2518 pr_warning("%s Setting the DASD online failed because of a "
2519 "missing discipline\n", dev_name(&cdev->dev));
2521 dasd_set_target_state(device, DASD_STATE_NEW);
2523 dasd_free_block(device->block);
2524 dasd_delete_device(device);
2526 pr_debug("dasd_generic device %s found\n",
2527 dev_name(&cdev->dev));
2529 wait_event(dasd_init_waitq, _wait_for_device(device));
2531 dasd_put_device(device);
2535 int dasd_generic_set_offline(struct ccw_device *cdev)
2537 struct dasd_device *device;
2538 struct dasd_block *block;
2539 int max_count, open_count;
2541 device = dasd_device_from_cdev(cdev);
2543 return PTR_ERR(device);
2544 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2545 /* Already doing offline processing */
2546 dasd_put_device(device);
2550 * We must make sure that this device is currently not in use.
2551 * The open_count is increased for every opener, that includes
2552 * the blkdev_get in dasd_scan_partitions. We are only interested
2553 * in the other openers.
2555 if (device->block) {
2556 max_count = device->block->bdev ? 0 : -1;
2557 open_count = atomic_read(&device->block->open_count);
2558 if (open_count > max_count) {
2560 pr_warning("%s: The DASD cannot be set offline "
2561 "with open count %i\n",
2562 dev_name(&cdev->dev), open_count);
2564 pr_warning("%s: The DASD cannot be set offline "
2565 "while it is in use\n",
2566 dev_name(&cdev->dev));
2567 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2568 dasd_put_device(device);
2572 dasd_set_target_state(device, DASD_STATE_NEW);
2573 /* dasd_delete_device destroys the device reference. */
2574 block = device->block;
2575 device->block = NULL;
2576 dasd_delete_device(device);
2578 * life cycle of block is bound to device, so delete it after
2579 * device was safely removed
2582 dasd_free_block(block);
2586 int dasd_generic_notify(struct ccw_device *cdev, int event)
2588 struct dasd_device *device;
2589 struct dasd_ccw_req *cqr;
2592 device = dasd_device_from_cdev_locked(cdev);
2600 /* First of all call extended error reporting. */
2601 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2603 if (device->state < DASD_STATE_BASIC)
2605 /* Device is active. We want to keep it. */
2606 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2607 if (cqr->status == DASD_CQR_IN_IO) {
2608 cqr->status = DASD_CQR_QUEUED;
2611 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2612 dasd_device_clear_timer(device);
2613 dasd_schedule_device_bh(device);
2617 /* FIXME: add a sanity check. */
2618 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2619 if (device->stopped & DASD_UNRESUMED_PM) {
2620 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2621 dasd_restore_device(device);
2625 dasd_schedule_device_bh(device);
2627 dasd_schedule_block_bh(device->block);
2631 dasd_put_device(device);
2635 int dasd_generic_pm_freeze(struct ccw_device *cdev)
2637 struct dasd_ccw_req *cqr, *n;
2639 struct list_head freeze_queue;
2640 struct dasd_device *device = dasd_device_from_cdev(cdev);
2643 return PTR_ERR(device);
2644 /* disallow new I/O */
2645 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2646 /* clear active requests */
2647 INIT_LIST_HEAD(&freeze_queue);
2648 spin_lock_irq(get_ccwdev_lock(cdev));
2650 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2651 /* Check status and move request to flush_queue */
2652 if (cqr->status == DASD_CQR_IN_IO) {
2653 rc = device->discipline->term_IO(cqr);
2655 /* unable to terminate requeust */
2656 dev_err(&device->cdev->dev,
2657 "Unable to terminate request %p "
2658 "on suspend\n", cqr);
2659 spin_unlock_irq(get_ccwdev_lock(cdev));
2660 dasd_put_device(device);
2664 list_move_tail(&cqr->devlist, &freeze_queue);
2667 spin_unlock_irq(get_ccwdev_lock(cdev));
2669 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2670 wait_event(dasd_flush_wq,
2671 (cqr->status != DASD_CQR_CLEAR_PENDING));
2672 if (cqr->status == DASD_CQR_CLEARED)
2673 cqr->status = DASD_CQR_QUEUED;
2675 /* move freeze_queue to start of the ccw_queue */
2676 spin_lock_irq(get_ccwdev_lock(cdev));
2677 list_splice_tail(&freeze_queue, &device->ccw_queue);
2678 spin_unlock_irq(get_ccwdev_lock(cdev));
2680 if (device->discipline->freeze)
2681 rc = device->discipline->freeze(device);
2683 dasd_put_device(device);
2686 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2688 int dasd_generic_restore_device(struct ccw_device *cdev)
2690 struct dasd_device *device = dasd_device_from_cdev(cdev);
2694 return PTR_ERR(device);
2696 /* allow new IO again */
2697 dasd_device_remove_stop_bits(device,
2698 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2700 dasd_schedule_device_bh(device);
2703 * call discipline restore function
2704 * if device is stopped do nothing e.g. for disconnected devices
2706 if (device->discipline->restore && !(device->stopped))
2707 rc = device->discipline->restore(device);
2708 if (rc || device->stopped)
2710 * if the resume failed for the DASD we put it in
2711 * an UNRESUMED stop state
2713 device->stopped |= DASD_UNRESUMED_PM;
2716 dasd_schedule_block_bh(device->block);
2718 dasd_put_device(device);
2721 EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2723 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2725 int rdc_buffer_size,
2728 struct dasd_ccw_req *cqr;
2730 unsigned long *idaw;
2732 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2735 /* internal error 13 - Allocating the RDC request failed*/
2736 dev_err(&device->cdev->dev,
2737 "An error occurred in the DASD device driver, "
2738 "reason=%s\n", "13");
2743 ccw->cmd_code = CCW_CMD_RDC;
2744 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2745 idaw = (unsigned long *) (cqr->data);
2746 ccw->cda = (__u32)(addr_t) idaw;
2747 ccw->flags = CCW_FLAG_IDA;
2748 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2750 ccw->cda = (__u32)(addr_t) rdc_buffer;
2754 ccw->count = rdc_buffer_size;
2755 cqr->startdev = device;
2756 cqr->memdev = device;
2757 cqr->expires = 10*HZ;
2759 cqr->buildclk = get_clock();
2760 cqr->status = DASD_CQR_FILLED;
2765 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2766 void *rdc_buffer, int rdc_buffer_size)
2769 struct dasd_ccw_req *cqr;
2771 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2774 return PTR_ERR(cqr);
2776 ret = dasd_sleep_on(cqr);
2777 dasd_sfree_request(cqr, cqr->memdev);
2780 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2783 * In command mode and transport mode we need to look for sense
2784 * data in different places. The sense data itself is allways
2785 * an array of 32 bytes, so we can unify the sense data access
2788 char *dasd_get_sense(struct irb *irb)
2790 struct tsb *tsb = NULL;
2793 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2794 if (irb->scsw.tm.tcw)
2795 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2797 if (tsb && tsb->length == 64 && tsb->flags)
2798 switch (tsb->flags & 0x07) {
2799 case 1: /* tsa_iostat */
2800 sense = tsb->tsa.iostat.sense;
2802 case 2: /* tsa_ddpc */
2803 sense = tsb->tsa.ddpc.sense;
2806 /* currently we don't use interrogate data */
2809 } else if (irb->esw.esw0.erw.cons) {
2814 EXPORT_SYMBOL_GPL(dasd_get_sense);
2816 static int __init dasd_init(void)
2820 init_waitqueue_head(&dasd_init_waitq);
2821 init_waitqueue_head(&dasd_flush_wq);
2822 init_waitqueue_head(&generic_waitq);
2824 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2825 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2826 if (dasd_debug_area == NULL) {
2830 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2831 debug_set_level(dasd_debug_area, DBF_WARNING);
2833 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2835 dasd_diag_discipline_pointer = NULL;
2837 rc = dasd_devmap_init();
2840 rc = dasd_gendisk_init();
2846 rc = dasd_eer_init();
2849 #ifdef CONFIG_PROC_FS
2850 rc = dasd_proc_init();
2857 pr_info("The DASD device driver could not be initialized\n");
2862 module_init(dasd_init);
2863 module_exit(dasd_exit);
2865 EXPORT_SYMBOL(dasd_debug_area);
2866 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2868 EXPORT_SYMBOL(dasd_add_request_head);
2869 EXPORT_SYMBOL(dasd_add_request_tail);
2870 EXPORT_SYMBOL(dasd_cancel_req);
2871 EXPORT_SYMBOL(dasd_device_clear_timer);
2872 EXPORT_SYMBOL(dasd_block_clear_timer);
2873 EXPORT_SYMBOL(dasd_enable_device);
2874 EXPORT_SYMBOL(dasd_int_handler);
2875 EXPORT_SYMBOL(dasd_kfree_request);
2876 EXPORT_SYMBOL(dasd_kick_device);
2877 EXPORT_SYMBOL(dasd_kmalloc_request);
2878 EXPORT_SYMBOL(dasd_schedule_device_bh);
2879 EXPORT_SYMBOL(dasd_schedule_block_bh);
2880 EXPORT_SYMBOL(dasd_set_target_state);
2881 EXPORT_SYMBOL(dasd_device_set_timer);
2882 EXPORT_SYMBOL(dasd_block_set_timer);
2883 EXPORT_SYMBOL(dasd_sfree_request);
2884 EXPORT_SYMBOL(dasd_sleep_on);
2885 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2886 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2887 EXPORT_SYMBOL(dasd_smalloc_request);
2888 EXPORT_SYMBOL(dasd_start_IO);
2889 EXPORT_SYMBOL(dasd_term_IO);
2891 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2892 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2893 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2894 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2895 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2896 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2897 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2898 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2899 EXPORT_SYMBOL_GPL(dasd_free_block);