2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/buffer_head.h>
21 #include <linux/hdreg.h>
22 #include <linux/async.h>
23 #include <linux/mutex.h>
25 #include <asm/ccwdev.h>
26 #include <asm/ebcdic.h>
27 #include <asm/idals.h>
32 #define PRINTK_HEADER "dasd:"
36 * SECTION: Constant definitions to be used within this file
38 #define DASD_CHANQ_MAX_SIZE 4
40 #define DASD_SLEEPON_START_TAG (void *) 1
41 #define DASD_SLEEPON_END_TAG (void *) 2
44 * SECTION: exported variables of dasd.c
46 debug_info_t *dasd_debug_area;
47 struct dasd_discipline *dasd_diag_discipline_pointer;
48 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
50 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
51 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
52 " Copyright 2000 IBM Corporation");
53 MODULE_SUPPORTED_DEVICE("dasd");
54 MODULE_LICENSE("GPL");
57 * SECTION: prototypes for static functions of dasd.c
59 static int dasd_alloc_queue(struct dasd_block *);
60 static void dasd_setup_queue(struct dasd_block *);
61 static void dasd_free_queue(struct dasd_block *);
62 static void dasd_flush_request_queue(struct dasd_block *);
63 static int dasd_flush_block_queue(struct dasd_block *);
64 static void dasd_device_tasklet(struct dasd_device *);
65 static void dasd_block_tasklet(struct dasd_block *);
66 static void do_kick_device(struct work_struct *);
67 static void do_restore_device(struct work_struct *);
68 static void do_reload_device(struct work_struct *);
69 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
70 static void dasd_device_timeout(unsigned long);
71 static void dasd_block_timeout(unsigned long);
72 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
75 * SECTION: Operations on the device structure.
77 static wait_queue_head_t dasd_init_waitq;
78 static wait_queue_head_t dasd_flush_wq;
79 static wait_queue_head_t generic_waitq;
82 * Allocate memory for a new device structure.
84 struct dasd_device *dasd_alloc_device(void)
86 struct dasd_device *device;
88 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
90 return ERR_PTR(-ENOMEM);
92 /* Get two pages for normal block device operations. */
93 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
94 if (!device->ccw_mem) {
96 return ERR_PTR(-ENOMEM);
98 /* Get one page for error recovery. */
99 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
100 if (!device->erp_mem) {
101 free_pages((unsigned long) device->ccw_mem, 1);
103 return ERR_PTR(-ENOMEM);
106 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
107 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
108 spin_lock_init(&device->mem_lock);
109 atomic_set(&device->tasklet_scheduled, 0);
110 tasklet_init(&device->tasklet,
111 (void (*)(unsigned long)) dasd_device_tasklet,
112 (unsigned long) device);
113 INIT_LIST_HEAD(&device->ccw_queue);
114 init_timer(&device->timer);
115 device->timer.function = dasd_device_timeout;
116 device->timer.data = (unsigned long) device;
117 INIT_WORK(&device->kick_work, do_kick_device);
118 INIT_WORK(&device->restore_device, do_restore_device);
119 INIT_WORK(&device->reload_device, do_reload_device);
120 device->state = DASD_STATE_NEW;
121 device->target = DASD_STATE_NEW;
122 mutex_init(&device->state_mutex);
128 * Free memory of a device structure.
130 void dasd_free_device(struct dasd_device *device)
132 kfree(device->private);
133 free_page((unsigned long) device->erp_mem);
134 free_pages((unsigned long) device->ccw_mem, 1);
139 * Allocate memory for a new device structure.
141 struct dasd_block *dasd_alloc_block(void)
143 struct dasd_block *block;
145 block = kzalloc(sizeof(*block), GFP_ATOMIC);
147 return ERR_PTR(-ENOMEM);
148 /* open_count = 0 means device online but not in use */
149 atomic_set(&block->open_count, -1);
151 spin_lock_init(&block->request_queue_lock);
152 atomic_set(&block->tasklet_scheduled, 0);
153 tasklet_init(&block->tasklet,
154 (void (*)(unsigned long)) dasd_block_tasklet,
155 (unsigned long) block);
156 INIT_LIST_HEAD(&block->ccw_queue);
157 spin_lock_init(&block->queue_lock);
158 init_timer(&block->timer);
159 block->timer.function = dasd_block_timeout;
160 block->timer.data = (unsigned long) block;
166 * Free memory of a device structure.
168 void dasd_free_block(struct dasd_block *block)
174 * Make a new device known to the system.
176 static int dasd_state_new_to_known(struct dasd_device *device)
181 * As long as the device is not in state DASD_STATE_NEW we want to
182 * keep the reference count > 0.
184 dasd_get_device(device);
187 rc = dasd_alloc_queue(device->block);
189 dasd_put_device(device);
193 device->state = DASD_STATE_KNOWN;
198 * Let the system forget about a device.
200 static int dasd_state_known_to_new(struct dasd_device *device)
202 /* Disable extended error reporting for this device. */
203 dasd_eer_disable(device);
204 /* Forget the discipline information. */
205 if (device->discipline) {
206 if (device->discipline->uncheck_device)
207 device->discipline->uncheck_device(device);
208 module_put(device->discipline->owner);
210 device->discipline = NULL;
211 if (device->base_discipline)
212 module_put(device->base_discipline->owner);
213 device->base_discipline = NULL;
214 device->state = DASD_STATE_NEW;
217 dasd_free_queue(device->block);
219 /* Give up reference we took in dasd_state_new_to_known. */
220 dasd_put_device(device);
225 * Request the irq line for the device.
227 static int dasd_state_known_to_basic(struct dasd_device *device)
231 /* Allocate and register gendisk structure. */
233 rc = dasd_gendisk_alloc(device->block);
237 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
238 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
240 debug_register_view(device->debug_area, &debug_sprintf_view);
241 debug_set_level(device->debug_area, DBF_WARNING);
242 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
244 device->state = DASD_STATE_BASIC;
249 * Release the irq line for the device. Terminate any running i/o.
251 static int dasd_state_basic_to_known(struct dasd_device *device)
255 dasd_gendisk_free(device->block);
256 dasd_block_clear_timer(device->block);
258 rc = dasd_flush_device_queue(device);
261 dasd_device_clear_timer(device);
263 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
264 if (device->debug_area != NULL) {
265 debug_unregister(device->debug_area);
266 device->debug_area = NULL;
268 device->state = DASD_STATE_KNOWN;
273 * Do the initial analysis. The do_analysis function may return
274 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
275 * until the discipline decides to continue the startup sequence
276 * by calling the function dasd_change_state. The eckd disciplines
277 * uses this to start a ccw that detects the format. The completion
278 * interrupt for this detection ccw uses the kernel event daemon to
279 * trigger the call to dasd_change_state. All this is done in the
280 * discipline code, see dasd_eckd.c.
281 * After the analysis ccw is done (do_analysis returned 0) the block
283 * In case the analysis returns an error, the device setup is stopped
284 * (a fake disk was already added to allow formatting).
286 static int dasd_state_basic_to_ready(struct dasd_device *device)
289 struct dasd_block *block;
292 block = device->block;
293 /* make disk known with correct capacity */
295 if (block->base->discipline->do_analysis != NULL)
296 rc = block->base->discipline->do_analysis(block);
299 device->state = DASD_STATE_UNFMT;
302 dasd_setup_queue(block);
303 set_capacity(block->gdp,
304 block->blocks << block->s2b_shift);
305 device->state = DASD_STATE_READY;
306 rc = dasd_scan_partitions(block);
308 device->state = DASD_STATE_BASIC;
310 device->state = DASD_STATE_READY;
316 * Remove device from block device layer. Destroy dirty buffers.
317 * Forget format information. Check if the target level is basic
318 * and if it is create fake disk for formatting.
320 static int dasd_state_ready_to_basic(struct dasd_device *device)
324 device->state = DASD_STATE_BASIC;
326 struct dasd_block *block = device->block;
327 rc = dasd_flush_block_queue(block);
329 device->state = DASD_STATE_READY;
332 dasd_flush_request_queue(block);
333 dasd_destroy_partitions(block);
336 block->s2b_shift = 0;
344 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
346 device->state = DASD_STATE_BASIC;
351 * Make the device online and schedule the bottom half to start
352 * the requeueing of requests from the linux request queue to the
356 dasd_state_ready_to_online(struct dasd_device * device)
359 struct gendisk *disk;
360 struct disk_part_iter piter;
361 struct hd_struct *part;
363 if (device->discipline->ready_to_online) {
364 rc = device->discipline->ready_to_online(device);
368 device->state = DASD_STATE_ONLINE;
370 dasd_schedule_block_bh(device->block);
371 disk = device->block->bdev->bd_disk;
372 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
373 while ((part = disk_part_iter_next(&piter)))
374 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
375 disk_part_iter_exit(&piter);
381 * Stop the requeueing of requests again.
383 static int dasd_state_online_to_ready(struct dasd_device *device)
386 struct gendisk *disk;
387 struct disk_part_iter piter;
388 struct hd_struct *part;
390 if (device->discipline->online_to_ready) {
391 rc = device->discipline->online_to_ready(device);
395 device->state = DASD_STATE_READY;
397 disk = device->block->bdev->bd_disk;
398 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
399 while ((part = disk_part_iter_next(&piter)))
400 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
401 disk_part_iter_exit(&piter);
407 * Device startup state changes.
409 static int dasd_increase_state(struct dasd_device *device)
414 if (device->state == DASD_STATE_NEW &&
415 device->target >= DASD_STATE_KNOWN)
416 rc = dasd_state_new_to_known(device);
419 device->state == DASD_STATE_KNOWN &&
420 device->target >= DASD_STATE_BASIC)
421 rc = dasd_state_known_to_basic(device);
424 device->state == DASD_STATE_BASIC &&
425 device->target >= DASD_STATE_READY)
426 rc = dasd_state_basic_to_ready(device);
429 device->state == DASD_STATE_UNFMT &&
430 device->target > DASD_STATE_UNFMT)
434 device->state == DASD_STATE_READY &&
435 device->target >= DASD_STATE_ONLINE)
436 rc = dasd_state_ready_to_online(device);
442 * Device shutdown state changes.
444 static int dasd_decrease_state(struct dasd_device *device)
449 if (device->state == DASD_STATE_ONLINE &&
450 device->target <= DASD_STATE_READY)
451 rc = dasd_state_online_to_ready(device);
454 device->state == DASD_STATE_READY &&
455 device->target <= DASD_STATE_BASIC)
456 rc = dasd_state_ready_to_basic(device);
459 device->state == DASD_STATE_UNFMT &&
460 device->target <= DASD_STATE_BASIC)
461 rc = dasd_state_unfmt_to_basic(device);
464 device->state == DASD_STATE_BASIC &&
465 device->target <= DASD_STATE_KNOWN)
466 rc = dasd_state_basic_to_known(device);
469 device->state == DASD_STATE_KNOWN &&
470 device->target <= DASD_STATE_NEW)
471 rc = dasd_state_known_to_new(device);
477 * This is the main startup/shutdown routine.
479 static void dasd_change_state(struct dasd_device *device)
483 if (device->state == device->target)
484 /* Already where we want to go today... */
486 if (device->state < device->target)
487 rc = dasd_increase_state(device);
489 rc = dasd_decrease_state(device);
493 device->target = device->state;
495 if (device->state == device->target)
496 wake_up(&dasd_init_waitq);
498 /* let user-space know that the device status changed */
499 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
503 * Kick starter for devices that did not complete the startup/shutdown
504 * procedure or were sleeping because of a pending state.
505 * dasd_kick_device will schedule a call do do_kick_device to the kernel
508 static void do_kick_device(struct work_struct *work)
510 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
511 mutex_lock(&device->state_mutex);
512 dasd_change_state(device);
513 mutex_unlock(&device->state_mutex);
514 dasd_schedule_device_bh(device);
515 dasd_put_device(device);
518 void dasd_kick_device(struct dasd_device *device)
520 dasd_get_device(device);
521 /* queue call to dasd_kick_device to the kernel event daemon. */
522 schedule_work(&device->kick_work);
526 * dasd_reload_device will schedule a call do do_reload_device to the kernel
529 static void do_reload_device(struct work_struct *work)
531 struct dasd_device *device = container_of(work, struct dasd_device,
533 device->discipline->reload(device);
534 dasd_put_device(device);
537 void dasd_reload_device(struct dasd_device *device)
539 dasd_get_device(device);
540 /* queue call to dasd_reload_device to the kernel event daemon. */
541 schedule_work(&device->reload_device);
543 EXPORT_SYMBOL(dasd_reload_device);
546 * dasd_restore_device will schedule a call do do_restore_device to the kernel
549 static void do_restore_device(struct work_struct *work)
551 struct dasd_device *device = container_of(work, struct dasd_device,
553 device->cdev->drv->restore(device->cdev);
554 dasd_put_device(device);
557 void dasd_restore_device(struct dasd_device *device)
559 dasd_get_device(device);
560 /* queue call to dasd_restore_device to the kernel event daemon. */
561 schedule_work(&device->restore_device);
565 * Set the target state for a device and starts the state change.
567 void dasd_set_target_state(struct dasd_device *device, int target)
569 dasd_get_device(device);
570 mutex_lock(&device->state_mutex);
571 /* If we are in probeonly mode stop at DASD_STATE_READY. */
572 if (dasd_probeonly && target > DASD_STATE_READY)
573 target = DASD_STATE_READY;
574 if (device->target != target) {
575 if (device->state == target)
576 wake_up(&dasd_init_waitq);
577 device->target = target;
579 if (device->state != device->target)
580 dasd_change_state(device);
581 mutex_unlock(&device->state_mutex);
582 dasd_put_device(device);
586 * Enable devices with device numbers in [from..to].
588 static inline int _wait_for_device(struct dasd_device *device)
590 return (device->state == device->target);
593 void dasd_enable_device(struct dasd_device *device)
595 dasd_set_target_state(device, DASD_STATE_ONLINE);
596 if (device->state <= DASD_STATE_KNOWN)
597 /* No discipline for device found. */
598 dasd_set_target_state(device, DASD_STATE_NEW);
599 /* Now wait for the devices to come up. */
600 wait_event(dasd_init_waitq, _wait_for_device(device));
604 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
606 #ifdef CONFIG_DASD_PROFILE
608 struct dasd_profile_info_t dasd_global_profile;
609 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
612 * Increments counter in global and local profiling structures.
614 #define dasd_profile_counter(value, counter, block) \
617 for (index = 0; index < 31 && value >> (2+index); index++); \
618 dasd_global_profile.counter[index]++; \
619 block->profile.counter[index]++; \
623 * Add profiling information for cqr before execution.
625 static void dasd_profile_start(struct dasd_block *block,
626 struct dasd_ccw_req *cqr,
630 unsigned int counter;
632 if (dasd_profile_level != DASD_PROFILE_ON)
635 /* count the length of the chanq for statistics */
637 list_for_each(l, &block->ccw_queue)
640 dasd_global_profile.dasd_io_nr_req[counter]++;
641 block->profile.dasd_io_nr_req[counter]++;
645 * Add profiling information for cqr after execution.
647 static void dasd_profile_end(struct dasd_block *block,
648 struct dasd_ccw_req *cqr,
651 long strtime, irqtime, endtime, tottime; /* in microseconds */
652 long tottimeps, sectors;
654 if (dasd_profile_level != DASD_PROFILE_ON)
657 sectors = blk_rq_sectors(req);
658 if (!cqr->buildclk || !cqr->startclk ||
659 !cqr->stopclk || !cqr->endclk ||
663 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
664 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
665 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
666 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
667 tottimeps = tottime / sectors;
669 if (!dasd_global_profile.dasd_io_reqs)
670 memset(&dasd_global_profile, 0,
671 sizeof(struct dasd_profile_info_t));
672 dasd_global_profile.dasd_io_reqs++;
673 dasd_global_profile.dasd_io_sects += sectors;
675 if (!block->profile.dasd_io_reqs)
676 memset(&block->profile, 0,
677 sizeof(struct dasd_profile_info_t));
678 block->profile.dasd_io_reqs++;
679 block->profile.dasd_io_sects += sectors;
681 dasd_profile_counter(sectors, dasd_io_secs, block);
682 dasd_profile_counter(tottime, dasd_io_times, block);
683 dasd_profile_counter(tottimeps, dasd_io_timps, block);
684 dasd_profile_counter(strtime, dasd_io_time1, block);
685 dasd_profile_counter(irqtime, dasd_io_time2, block);
686 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
687 dasd_profile_counter(endtime, dasd_io_time3, block);
690 #define dasd_profile_start(block, cqr, req) do {} while (0)
691 #define dasd_profile_end(block, cqr, req) do {} while (0)
692 #endif /* CONFIG_DASD_PROFILE */
695 * Allocate memory for a channel program with 'cplength' channel
696 * command words and 'datasize' additional space. There are two
697 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
698 * memory and 2) dasd_smalloc_request uses the static ccw memory
699 * that gets allocated for each device.
701 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
703 struct dasd_device *device)
705 struct dasd_ccw_req *cqr;
708 BUG_ON(datasize > PAGE_SIZE ||
709 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
711 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
713 return ERR_PTR(-ENOMEM);
716 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
717 GFP_ATOMIC | GFP_DMA);
718 if (cqr->cpaddr == NULL) {
720 return ERR_PTR(-ENOMEM);
725 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
726 if (cqr->data == NULL) {
729 return ERR_PTR(-ENOMEM);
733 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
734 dasd_get_device(device);
738 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
740 struct dasd_device *device)
743 struct dasd_ccw_req *cqr;
748 BUG_ON(datasize > PAGE_SIZE ||
749 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
751 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
753 size += cplength * sizeof(struct ccw1);
756 spin_lock_irqsave(&device->mem_lock, flags);
757 cqr = (struct dasd_ccw_req *)
758 dasd_alloc_chunk(&device->ccw_chunks, size);
759 spin_unlock_irqrestore(&device->mem_lock, flags);
761 return ERR_PTR(-ENOMEM);
762 memset(cqr, 0, sizeof(struct dasd_ccw_req));
763 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
766 cqr->cpaddr = (struct ccw1 *) data;
767 data += cplength*sizeof(struct ccw1);
768 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
773 memset(cqr->data, 0, datasize);
776 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
777 dasd_get_device(device);
782 * Free memory of a channel program. This function needs to free all the
783 * idal lists that might have been created by dasd_set_cda and the
784 * struct dasd_ccw_req itself.
786 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
791 /* Clear any idals used for the request. */
794 clear_normalized_cda(ccw);
795 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
800 dasd_put_device(device);
803 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
807 spin_lock_irqsave(&device->mem_lock, flags);
808 dasd_free_chunk(&device->ccw_chunks, cqr);
809 spin_unlock_irqrestore(&device->mem_lock, flags);
810 dasd_put_device(device);
814 * Check discipline magic in cqr.
816 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
818 struct dasd_device *device;
822 device = cqr->startdev;
823 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
824 DBF_DEV_EVENT(DBF_WARNING, device,
825 " dasd_ccw_req 0x%08x magic doesn't match"
826 " discipline 0x%08x",
828 *(unsigned int *) device->discipline->name);
835 * Terminate the current i/o and set the request to clear_pending.
836 * Timer keeps device runnig.
837 * ccw_device_clear can fail if the i/o subsystem
840 int dasd_term_IO(struct dasd_ccw_req *cqr)
842 struct dasd_device *device;
844 char errorstring[ERRORLENGTH];
847 rc = dasd_check_cqr(cqr);
851 device = (struct dasd_device *) cqr->startdev;
852 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
853 rc = ccw_device_clear(device->cdev, (long) cqr);
855 case 0: /* termination successful */
857 cqr->status = DASD_CQR_CLEAR_PENDING;
858 cqr->stopclk = get_clock();
860 DBF_DEV_EVENT(DBF_DEBUG, device,
861 "terminate cqr %p successful",
865 DBF_DEV_EVENT(DBF_ERR, device, "%s",
866 "device gone, retry");
869 DBF_DEV_EVENT(DBF_ERR, device, "%s",
874 DBF_DEV_EVENT(DBF_ERR, device, "%s",
875 "device busy, retry later");
878 /* internal error 10 - unknown rc*/
879 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
880 dev_err(&device->cdev->dev, "An error occurred in the "
881 "DASD device driver, reason=%s\n", errorstring);
887 dasd_schedule_device_bh(device);
892 * Start the i/o. This start_IO can fail if the channel is really busy.
893 * In that case set up a timer to start the request later.
895 int dasd_start_IO(struct dasd_ccw_req *cqr)
897 struct dasd_device *device;
899 char errorstring[ERRORLENGTH];
902 rc = dasd_check_cqr(cqr);
907 device = (struct dasd_device *) cqr->startdev;
908 if (cqr->retries < 0) {
909 /* internal error 14 - start_IO run out of retries */
910 sprintf(errorstring, "14 %p", cqr);
911 dev_err(&device->cdev->dev, "An error occurred in the DASD "
912 "device driver, reason=%s\n", errorstring);
913 cqr->status = DASD_CQR_ERROR;
916 cqr->startclk = get_clock();
917 cqr->starttime = jiffies;
919 if (cqr->cpmode == 1) {
920 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
921 (long) cqr, cqr->lpm);
923 rc = ccw_device_start(device->cdev, cqr->cpaddr,
924 (long) cqr, cqr->lpm, 0);
928 cqr->status = DASD_CQR_IN_IO;
931 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
932 "start_IO: device busy, retry later");
935 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
936 "start_IO: request timeout, retry later");
939 /* -EACCES indicates that the request used only a
940 * subset of the available pathes and all these
942 * Do a retry with all available pathes.
944 cqr->lpm = LPM_ANYPATH;
945 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
946 "start_IO: selected pathes gone,"
947 " retry on all pathes");
950 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
951 "start_IO: -ENODEV device gone, retry");
954 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
955 "start_IO: -EIO device gone, retry");
958 /* most likely caused in power management context */
959 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
960 "start_IO: -EINVAL device currently "
964 /* internal error 11 - unknown rc */
965 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
966 dev_err(&device->cdev->dev,
967 "An error occurred in the DASD device driver, "
968 "reason=%s\n", errorstring);
977 * Timeout function for dasd devices. This is used for different purposes
978 * 1) missing interrupt handler for normal operation
979 * 2) delayed start of request where start_IO failed with -EBUSY
980 * 3) timeout for missing state change interrupts
981 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
982 * DASD_CQR_QUEUED for 2) and 3).
984 static void dasd_device_timeout(unsigned long ptr)
987 struct dasd_device *device;
989 device = (struct dasd_device *) ptr;
990 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
991 /* re-activate request queue */
992 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
993 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
994 dasd_schedule_device_bh(device);
998 * Setup timeout for a device in jiffies.
1000 void dasd_device_set_timer(struct dasd_device *device, int expires)
1003 del_timer(&device->timer);
1005 mod_timer(&device->timer, jiffies + expires);
1009 * Clear timeout for a device.
1011 void dasd_device_clear_timer(struct dasd_device *device)
1013 del_timer(&device->timer);
1016 static void dasd_handle_killed_request(struct ccw_device *cdev,
1017 unsigned long intparm)
1019 struct dasd_ccw_req *cqr;
1020 struct dasd_device *device;
1024 cqr = (struct dasd_ccw_req *) intparm;
1025 if (cqr->status != DASD_CQR_IN_IO) {
1026 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1027 "invalid status in handle_killed_request: "
1028 "%02x", cqr->status);
1032 device = dasd_device_from_cdev_locked(cdev);
1033 if (IS_ERR(device)) {
1034 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1035 "unable to get device from cdev");
1039 if (!cqr->startdev ||
1040 device != cqr->startdev ||
1041 strncmp(cqr->startdev->discipline->ebcname,
1042 (char *) &cqr->magic, 4)) {
1043 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1044 "invalid device in request");
1045 dasd_put_device(device);
1049 /* Schedule request to be retried. */
1050 cqr->status = DASD_CQR_QUEUED;
1052 dasd_device_clear_timer(device);
1053 dasd_schedule_device_bh(device);
1054 dasd_put_device(device);
1057 void dasd_generic_handle_state_change(struct dasd_device *device)
1059 /* First of all start sense subsystem status request. */
1060 dasd_eer_snss(device);
1062 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1063 dasd_schedule_device_bh(device);
1065 dasd_schedule_block_bh(device->block);
1069 * Interrupt handler for "normal" ssch-io based dasd devices.
1071 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1074 struct dasd_ccw_req *cqr, *next;
1075 struct dasd_device *device;
1076 unsigned long long now;
1080 switch (PTR_ERR(irb)) {
1084 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1085 "request timed out\n", __func__);
1088 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1089 "unknown error %ld\n", __func__,
1092 dasd_handle_killed_request(cdev, intparm);
1098 /* check for unsolicited interrupts */
1099 cqr = (struct dasd_ccw_req *) intparm;
1100 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1101 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1102 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1103 if (cqr && cqr->status == DASD_CQR_IN_IO)
1104 cqr->status = DASD_CQR_QUEUED;
1105 device = dasd_device_from_cdev_locked(cdev);
1106 if (!IS_ERR(device)) {
1107 dasd_device_clear_timer(device);
1108 device->discipline->handle_unsolicited_interrupt(device,
1110 dasd_put_device(device);
1115 device = (struct dasd_device *) cqr->startdev;
1117 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1118 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1119 "invalid device in request");
1123 /* Check for clear pending */
1124 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1125 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1126 cqr->status = DASD_CQR_CLEARED;
1127 dasd_device_clear_timer(device);
1128 wake_up(&dasd_flush_wq);
1129 dasd_schedule_device_bh(device);
1133 /* check status - the request might have been killed by dyn detach */
1134 if (cqr->status != DASD_CQR_IN_IO) {
1135 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1136 "status %02x", dev_name(&cdev->dev), cqr->status);
1142 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1143 scsw_cstat(&irb->scsw) == 0) {
1144 /* request was completed successfully */
1145 cqr->status = DASD_CQR_SUCCESS;
1147 /* Start first request on queue if possible -> fast_io. */
1148 if (cqr->devlist.next != &device->ccw_queue) {
1149 next = list_entry(cqr->devlist.next,
1150 struct dasd_ccw_req, devlist);
1152 } else { /* error */
1153 memcpy(&cqr->irb, irb, sizeof(struct irb));
1154 /* log sense for every failed I/O to s390 debugfeature */
1155 dasd_log_sense_dbf(cqr, irb);
1156 if (device->features & DASD_FEATURE_ERPLOG) {
1157 dasd_log_sense(cqr, irb);
1161 * If we don't want complex ERP for this request, then just
1162 * reset this and retry it in the fastpath
1164 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1166 if (cqr->lpm == LPM_ANYPATH)
1167 DBF_DEV_EVENT(DBF_DEBUG, device,
1168 "default ERP in fastpath "
1169 "(%i retries left)",
1171 cqr->lpm = LPM_ANYPATH;
1172 cqr->status = DASD_CQR_QUEUED;
1175 cqr->status = DASD_CQR_ERROR;
1177 if (next && (next->status == DASD_CQR_QUEUED) &&
1178 (!device->stopped)) {
1179 if (device->discipline->start_IO(next) == 0)
1180 expires = next->expires;
1183 dasd_device_set_timer(device, expires);
1185 dasd_device_clear_timer(device);
1186 dasd_schedule_device_bh(device);
1189 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1191 struct dasd_device *device;
1193 device = dasd_device_from_cdev_locked(cdev);
1197 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1198 device->state != device->target ||
1199 !device->discipline->handle_unsolicited_interrupt){
1200 dasd_put_device(device);
1204 dasd_device_clear_timer(device);
1205 device->discipline->handle_unsolicited_interrupt(device, irb);
1206 dasd_put_device(device);
1208 return UC_TODO_RETRY;
1210 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1213 * If we have an error on a dasd_block layer request then we cancel
1214 * and return all further requests from the same dasd_block as well.
1216 static void __dasd_device_recovery(struct dasd_device *device,
1217 struct dasd_ccw_req *ref_cqr)
1219 struct list_head *l, *n;
1220 struct dasd_ccw_req *cqr;
1223 * only requeue request that came from the dasd_block layer
1225 if (!ref_cqr->block)
1228 list_for_each_safe(l, n, &device->ccw_queue) {
1229 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1230 if (cqr->status == DASD_CQR_QUEUED &&
1231 ref_cqr->block == cqr->block) {
1232 cqr->status = DASD_CQR_CLEARED;
1238 * Remove those ccw requests from the queue that need to be returned
1239 * to the upper layer.
1241 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1242 struct list_head *final_queue)
1244 struct list_head *l, *n;
1245 struct dasd_ccw_req *cqr;
1247 /* Process request with final status. */
1248 list_for_each_safe(l, n, &device->ccw_queue) {
1249 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1251 /* Stop list processing at the first non-final request. */
1252 if (cqr->status == DASD_CQR_QUEUED ||
1253 cqr->status == DASD_CQR_IN_IO ||
1254 cqr->status == DASD_CQR_CLEAR_PENDING)
1256 if (cqr->status == DASD_CQR_ERROR) {
1257 __dasd_device_recovery(device, cqr);
1259 /* Rechain finished requests to final queue */
1260 list_move_tail(&cqr->devlist, final_queue);
1265 * the cqrs from the final queue are returned to the upper layer
1266 * by setting a dasd_block state and calling the callback function
1268 static void __dasd_device_process_final_queue(struct dasd_device *device,
1269 struct list_head *final_queue)
1271 struct list_head *l, *n;
1272 struct dasd_ccw_req *cqr;
1273 struct dasd_block *block;
1274 void (*callback)(struct dasd_ccw_req *, void *data);
1275 void *callback_data;
1276 char errorstring[ERRORLENGTH];
1278 list_for_each_safe(l, n, final_queue) {
1279 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1280 list_del_init(&cqr->devlist);
1282 callback = cqr->callback;
1283 callback_data = cqr->callback_data;
1285 spin_lock_bh(&block->queue_lock);
1286 switch (cqr->status) {
1287 case DASD_CQR_SUCCESS:
1288 cqr->status = DASD_CQR_DONE;
1290 case DASD_CQR_ERROR:
1291 cqr->status = DASD_CQR_NEED_ERP;
1293 case DASD_CQR_CLEARED:
1294 cqr->status = DASD_CQR_TERMINATED;
1297 /* internal error 12 - wrong cqr status*/
1298 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1299 dev_err(&device->cdev->dev,
1300 "An error occurred in the DASD device driver, "
1301 "reason=%s\n", errorstring);
1304 if (cqr->callback != NULL)
1305 (callback)(cqr, callback_data);
1307 spin_unlock_bh(&block->queue_lock);
1312 * Take a look at the first request on the ccw queue and check
1313 * if it reached its expire time. If so, terminate the IO.
1315 static void __dasd_device_check_expire(struct dasd_device *device)
1317 struct dasd_ccw_req *cqr;
1319 if (list_empty(&device->ccw_queue))
1321 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1322 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1323 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1324 if (device->discipline->term_IO(cqr) != 0) {
1325 /* Hmpf, try again in 5 sec */
1326 dev_err(&device->cdev->dev,
1327 "cqr %p timed out (%lus) but cannot be "
1328 "ended, retrying in 5 s\n",
1329 cqr, (cqr->expires/HZ));
1330 cqr->expires += 5*HZ;
1331 dasd_device_set_timer(device, 5*HZ);
1333 dev_err(&device->cdev->dev,
1334 "cqr %p timed out (%lus), %i retries "
1335 "remaining\n", cqr, (cqr->expires/HZ),
1342 * Take a look at the first request on the ccw queue and check
1343 * if it needs to be started.
1345 static void __dasd_device_start_head(struct dasd_device *device)
1347 struct dasd_ccw_req *cqr;
1350 if (list_empty(&device->ccw_queue))
1352 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1353 if (cqr->status != DASD_CQR_QUEUED)
1355 /* when device is stopped, return request to previous layer */
1356 if (device->stopped) {
1357 cqr->status = DASD_CQR_CLEARED;
1358 dasd_schedule_device_bh(device);
1362 rc = device->discipline->start_IO(cqr);
1364 dasd_device_set_timer(device, cqr->expires);
1365 else if (rc == -EACCES) {
1366 dasd_schedule_device_bh(device);
1368 /* Hmpf, try again in 1/2 sec */
1369 dasd_device_set_timer(device, 50);
1373 * Go through all request on the dasd_device request queue,
1374 * terminate them on the cdev if necessary, and return them to the
1375 * submitting layer via callback.
1377 * Make sure that all 'submitting layers' still exist when
1378 * this function is called!. In other words, when 'device' is a base
1379 * device then all block layer requests must have been removed before
1380 * via dasd_flush_block_queue.
1382 int dasd_flush_device_queue(struct dasd_device *device)
1384 struct dasd_ccw_req *cqr, *n;
1386 struct list_head flush_queue;
1388 INIT_LIST_HEAD(&flush_queue);
1389 spin_lock_irq(get_ccwdev_lock(device->cdev));
1391 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1392 /* Check status and move request to flush_queue */
1393 switch (cqr->status) {
1394 case DASD_CQR_IN_IO:
1395 rc = device->discipline->term_IO(cqr);
1397 /* unable to terminate requeust */
1398 dev_err(&device->cdev->dev,
1399 "Flushing the DASD request queue "
1400 "failed for request %p\n", cqr);
1401 /* stop flush processing */
1405 case DASD_CQR_QUEUED:
1406 cqr->stopclk = get_clock();
1407 cqr->status = DASD_CQR_CLEARED;
1409 default: /* no need to modify the others */
1412 list_move_tail(&cqr->devlist, &flush_queue);
1415 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1417 * After this point all requests must be in state CLEAR_PENDING,
1418 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1419 * one of the others.
1421 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1422 wait_event(dasd_flush_wq,
1423 (cqr->status != DASD_CQR_CLEAR_PENDING));
1425 * Now set each request back to TERMINATED, DONE or NEED_ERP
1426 * and call the callback function of flushed requests
1428 __dasd_device_process_final_queue(device, &flush_queue);
1433 * Acquire the device lock and process queues for the device.
1435 static void dasd_device_tasklet(struct dasd_device *device)
1437 struct list_head final_queue;
1439 atomic_set (&device->tasklet_scheduled, 0);
1440 INIT_LIST_HEAD(&final_queue);
1441 spin_lock_irq(get_ccwdev_lock(device->cdev));
1442 /* Check expire time of first request on the ccw queue. */
1443 __dasd_device_check_expire(device);
1444 /* find final requests on ccw queue */
1445 __dasd_device_process_ccw_queue(device, &final_queue);
1446 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1447 /* Now call the callback function of requests with final status */
1448 __dasd_device_process_final_queue(device, &final_queue);
1449 spin_lock_irq(get_ccwdev_lock(device->cdev));
1450 /* Now check if the head of the ccw queue needs to be started. */
1451 __dasd_device_start_head(device);
1452 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1453 dasd_put_device(device);
1457 * Schedules a call to dasd_tasklet over the device tasklet.
1459 void dasd_schedule_device_bh(struct dasd_device *device)
1461 /* Protect against rescheduling. */
1462 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1464 dasd_get_device(device);
1465 tasklet_hi_schedule(&device->tasklet);
1468 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1470 device->stopped |= bits;
1472 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1474 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1476 device->stopped &= ~bits;
1477 if (!device->stopped)
1478 wake_up(&generic_waitq);
1480 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1483 * Queue a request to the head of the device ccw_queue.
1484 * Start the I/O if possible.
1486 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1488 struct dasd_device *device;
1489 unsigned long flags;
1491 device = cqr->startdev;
1492 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1493 cqr->status = DASD_CQR_QUEUED;
1494 list_add(&cqr->devlist, &device->ccw_queue);
1495 /* let the bh start the request to keep them in order */
1496 dasd_schedule_device_bh(device);
1497 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1501 * Queue a request to the tail of the device ccw_queue.
1502 * Start the I/O if possible.
1504 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1506 struct dasd_device *device;
1507 unsigned long flags;
1509 device = cqr->startdev;
1510 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1511 cqr->status = DASD_CQR_QUEUED;
1512 list_add_tail(&cqr->devlist, &device->ccw_queue);
1513 /* let the bh start the request to keep them in order */
1514 dasd_schedule_device_bh(device);
1515 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1519 * Wakeup helper for the 'sleep_on' functions.
1521 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1523 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1524 cqr->callback_data = DASD_SLEEPON_END_TAG;
1525 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1526 wake_up(&generic_waitq);
1529 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1531 struct dasd_device *device;
1534 device = cqr->startdev;
1535 spin_lock_irq(get_ccwdev_lock(device->cdev));
1536 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1537 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1542 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1544 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1546 struct dasd_device *device;
1547 dasd_erp_fn_t erp_fn;
1549 if (cqr->status == DASD_CQR_FILLED)
1551 device = cqr->startdev;
1552 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1553 if (cqr->status == DASD_CQR_TERMINATED) {
1554 device->discipline->handle_terminated_request(cqr);
1557 if (cqr->status == DASD_CQR_NEED_ERP) {
1558 erp_fn = device->discipline->erp_action(cqr);
1562 if (cqr->status == DASD_CQR_FAILED)
1563 dasd_log_sense(cqr, &cqr->irb);
1565 __dasd_process_erp(device, cqr);
1572 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1574 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1575 if (cqr->refers) /* erp is not done yet */
1577 return ((cqr->status != DASD_CQR_DONE) &&
1578 (cqr->status != DASD_CQR_FAILED));
1580 return (cqr->status == DASD_CQR_FILLED);
1583 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1585 struct dasd_device *device;
1587 struct list_head ccw_queue;
1588 struct dasd_ccw_req *cqr;
1590 INIT_LIST_HEAD(&ccw_queue);
1591 maincqr->status = DASD_CQR_FILLED;
1592 device = maincqr->startdev;
1593 list_add(&maincqr->blocklist, &ccw_queue);
1594 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1595 cqr = list_first_entry(&ccw_queue,
1596 struct dasd_ccw_req, blocklist)) {
1598 if (__dasd_sleep_on_erp(cqr))
1600 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1603 /* Non-temporary stop condition will trigger fail fast */
1604 if (device->stopped & ~DASD_STOPPED_PENDING &&
1605 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1606 (!dasd_eer_enabled(device))) {
1607 cqr->status = DASD_CQR_FAILED;
1611 /* Don't try to start requests if device is stopped */
1612 if (interruptible) {
1613 rc = wait_event_interruptible(
1614 generic_waitq, !(device->stopped));
1615 if (rc == -ERESTARTSYS) {
1616 cqr->status = DASD_CQR_FAILED;
1617 maincqr->intrc = rc;
1621 wait_event(generic_waitq, !(device->stopped));
1623 cqr->callback = dasd_wakeup_cb;
1624 cqr->callback_data = DASD_SLEEPON_START_TAG;
1625 dasd_add_request_tail(cqr);
1626 if (interruptible) {
1627 rc = wait_event_interruptible(
1628 generic_waitq, _wait_for_wakeup(cqr));
1629 if (rc == -ERESTARTSYS) {
1630 dasd_cancel_req(cqr);
1631 /* wait (non-interruptible) for final status */
1632 wait_event(generic_waitq,
1633 _wait_for_wakeup(cqr));
1634 cqr->status = DASD_CQR_FAILED;
1635 maincqr->intrc = rc;
1639 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1642 maincqr->endclk = get_clock();
1643 if ((maincqr->status != DASD_CQR_DONE) &&
1644 (maincqr->intrc != -ERESTARTSYS))
1645 dasd_log_sense(maincqr, &maincqr->irb);
1646 if (maincqr->status == DASD_CQR_DONE)
1648 else if (maincqr->intrc)
1649 rc = maincqr->intrc;
1656 * Queue a request to the tail of the device ccw_queue and wait for
1659 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1661 return _dasd_sleep_on(cqr, 0);
1665 * Queue a request to the tail of the device ccw_queue and wait
1666 * interruptible for it's completion.
1668 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1670 return _dasd_sleep_on(cqr, 1);
1674 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1675 * for eckd devices) the currently running request has to be terminated
1676 * and be put back to status queued, before the special request is added
1677 * to the head of the queue. Then the special request is waited on normally.
1679 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1681 struct dasd_ccw_req *cqr;
1683 if (list_empty(&device->ccw_queue))
1685 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1686 return device->discipline->term_IO(cqr);
1689 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1691 struct dasd_device *device;
1694 device = cqr->startdev;
1695 spin_lock_irq(get_ccwdev_lock(device->cdev));
1696 rc = _dasd_term_running_cqr(device);
1698 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1702 cqr->callback = dasd_wakeup_cb;
1703 cqr->callback_data = DASD_SLEEPON_START_TAG;
1704 cqr->status = DASD_CQR_QUEUED;
1705 list_add(&cqr->devlist, &device->ccw_queue);
1707 /* let the bh start the request to keep them in order */
1708 dasd_schedule_device_bh(device);
1710 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1712 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1714 if (cqr->status == DASD_CQR_DONE)
1716 else if (cqr->intrc)
1724 * Cancels a request that was started with dasd_sleep_on_req.
1725 * This is useful to timeout requests. The request will be
1726 * terminated if it is currently in i/o.
1727 * Returns 1 if the request has been terminated.
1728 * 0 if there was no need to terminate the request (not started yet)
1729 * negative error code if termination failed
1730 * Cancellation of a request is an asynchronous operation! The calling
1731 * function has to wait until the request is properly returned via callback.
1733 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1735 struct dasd_device *device = cqr->startdev;
1736 unsigned long flags;
1740 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1741 switch (cqr->status) {
1742 case DASD_CQR_QUEUED:
1743 /* request was not started - just set to cleared */
1744 cqr->status = DASD_CQR_CLEARED;
1746 case DASD_CQR_IN_IO:
1747 /* request in IO - terminate IO and release again */
1748 rc = device->discipline->term_IO(cqr);
1750 dev_err(&device->cdev->dev,
1751 "Cancelling request %p failed with rc=%d\n",
1754 cqr->stopclk = get_clock();
1757 default: /* already finished or clear pending - do nothing */
1760 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1761 dasd_schedule_device_bh(device);
1767 * SECTION: Operations of the dasd_block layer.
1771 * Timeout function for dasd_block. This is used when the block layer
1772 * is waiting for something that may not come reliably, (e.g. a state
1775 static void dasd_block_timeout(unsigned long ptr)
1777 unsigned long flags;
1778 struct dasd_block *block;
1780 block = (struct dasd_block *) ptr;
1781 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1782 /* re-activate request queue */
1783 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1784 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1785 dasd_schedule_block_bh(block);
1789 * Setup timeout for a dasd_block in jiffies.
1791 void dasd_block_set_timer(struct dasd_block *block, int expires)
1794 del_timer(&block->timer);
1796 mod_timer(&block->timer, jiffies + expires);
1800 * Clear timeout for a dasd_block.
1802 void dasd_block_clear_timer(struct dasd_block *block)
1804 del_timer(&block->timer);
1808 * Process finished error recovery ccw.
1810 static void __dasd_process_erp(struct dasd_device *device,
1811 struct dasd_ccw_req *cqr)
1813 dasd_erp_fn_t erp_fn;
1815 if (cqr->status == DASD_CQR_DONE)
1816 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1818 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1819 erp_fn = device->discipline->erp_postaction(cqr);
1824 * Fetch requests from the block device queue.
1826 static void __dasd_process_request_queue(struct dasd_block *block)
1828 struct request_queue *queue;
1829 struct request *req;
1830 struct dasd_ccw_req *cqr;
1831 struct dasd_device *basedev;
1832 unsigned long flags;
1833 queue = block->request_queue;
1834 basedev = block->base;
1835 /* No queue ? Then there is nothing to do. */
1840 * We requeue request from the block device queue to the ccw
1841 * queue only in two states. In state DASD_STATE_READY the
1842 * partition detection is done and we need to requeue requests
1843 * for that. State DASD_STATE_ONLINE is normal block device
1846 if (basedev->state < DASD_STATE_READY) {
1847 while ((req = blk_fetch_request(block->request_queue)))
1848 __blk_end_request_all(req, -EIO);
1851 /* Now we try to fetch requests from the request queue */
1852 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1853 if (basedev->features & DASD_FEATURE_READONLY &&
1854 rq_data_dir(req) == WRITE) {
1855 DBF_DEV_EVENT(DBF_ERR, basedev,
1856 "Rejecting write request %p",
1858 blk_start_request(req);
1859 __blk_end_request_all(req, -EIO);
1862 cqr = basedev->discipline->build_cp(basedev, block, req);
1864 if (PTR_ERR(cqr) == -EBUSY)
1865 break; /* normal end condition */
1866 if (PTR_ERR(cqr) == -ENOMEM)
1867 break; /* terminate request queue loop */
1868 if (PTR_ERR(cqr) == -EAGAIN) {
1870 * The current request cannot be build right
1871 * now, we have to try later. If this request
1872 * is the head-of-queue we stop the device
1875 if (!list_empty(&block->ccw_queue))
1878 get_ccwdev_lock(basedev->cdev), flags);
1879 dasd_device_set_stop_bits(basedev,
1880 DASD_STOPPED_PENDING);
1881 spin_unlock_irqrestore(
1882 get_ccwdev_lock(basedev->cdev), flags);
1883 dasd_block_set_timer(block, HZ/2);
1886 DBF_DEV_EVENT(DBF_ERR, basedev,
1887 "CCW creation failed (rc=%ld) "
1890 blk_start_request(req);
1891 __blk_end_request_all(req, -EIO);
1895 * Note: callback is set to dasd_return_cqr_cb in
1896 * __dasd_block_start_head to cover erp requests as well
1898 cqr->callback_data = (void *) req;
1899 cqr->status = DASD_CQR_FILLED;
1900 blk_start_request(req);
1901 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1902 dasd_profile_start(block, cqr, req);
1906 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1908 struct request *req;
1912 req = (struct request *) cqr->callback_data;
1913 dasd_profile_end(cqr->block, cqr, req);
1914 status = cqr->block->base->discipline->free_cp(cqr, req);
1916 error = status ? status : -EIO;
1917 __blk_end_request_all(req, error);
1921 * Process ccw request queue.
1923 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1924 struct list_head *final_queue)
1926 struct list_head *l, *n;
1927 struct dasd_ccw_req *cqr;
1928 dasd_erp_fn_t erp_fn;
1929 unsigned long flags;
1930 struct dasd_device *base = block->base;
1933 /* Process request with final status. */
1934 list_for_each_safe(l, n, &block->ccw_queue) {
1935 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1936 if (cqr->status != DASD_CQR_DONE &&
1937 cqr->status != DASD_CQR_FAILED &&
1938 cqr->status != DASD_CQR_NEED_ERP &&
1939 cqr->status != DASD_CQR_TERMINATED)
1942 if (cqr->status == DASD_CQR_TERMINATED) {
1943 base->discipline->handle_terminated_request(cqr);
1947 /* Process requests that may be recovered */
1948 if (cqr->status == DASD_CQR_NEED_ERP) {
1949 erp_fn = base->discipline->erp_action(cqr);
1950 if (IS_ERR(erp_fn(cqr)))
1955 /* log sense for fatal error */
1956 if (cqr->status == DASD_CQR_FAILED) {
1957 dasd_log_sense(cqr, &cqr->irb);
1960 /* First of all call extended error reporting. */
1961 if (dasd_eer_enabled(base) &&
1962 cqr->status == DASD_CQR_FAILED) {
1963 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1965 /* restart request */
1966 cqr->status = DASD_CQR_FILLED;
1968 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1969 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1970 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1975 /* Process finished ERP request. */
1977 __dasd_process_erp(base, cqr);
1981 /* Rechain finished requests to final queue */
1982 cqr->endclk = get_clock();
1983 list_move_tail(&cqr->blocklist, final_queue);
1987 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1989 dasd_schedule_block_bh(cqr->block);
1992 static void __dasd_block_start_head(struct dasd_block *block)
1994 struct dasd_ccw_req *cqr;
1996 if (list_empty(&block->ccw_queue))
1998 /* We allways begin with the first requests on the queue, as some
1999 * of previously started requests have to be enqueued on a
2000 * dasd_device again for error recovery.
2002 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2003 if (cqr->status != DASD_CQR_FILLED)
2005 /* Non-temporary stop condition will trigger fail fast */
2006 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2007 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2008 (!dasd_eer_enabled(block->base))) {
2009 cqr->status = DASD_CQR_FAILED;
2010 dasd_schedule_block_bh(block);
2013 /* Don't try to start requests if device is stopped */
2014 if (block->base->stopped)
2017 /* just a fail safe check, should not happen */
2019 cqr->startdev = block->base;
2021 /* make sure that the requests we submit find their way back */
2022 cqr->callback = dasd_return_cqr_cb;
2024 dasd_add_request_tail(cqr);
2029 * Central dasd_block layer routine. Takes requests from the generic
2030 * block layer request queue, creates ccw requests, enqueues them on
2031 * a dasd_device and processes ccw requests that have been returned.
2033 static void dasd_block_tasklet(struct dasd_block *block)
2035 struct list_head final_queue;
2036 struct list_head *l, *n;
2037 struct dasd_ccw_req *cqr;
2039 atomic_set(&block->tasklet_scheduled, 0);
2040 INIT_LIST_HEAD(&final_queue);
2041 spin_lock(&block->queue_lock);
2042 /* Finish off requests on ccw queue */
2043 __dasd_process_block_ccw_queue(block, &final_queue);
2044 spin_unlock(&block->queue_lock);
2045 /* Now call the callback function of requests with final status */
2046 spin_lock_irq(&block->request_queue_lock);
2047 list_for_each_safe(l, n, &final_queue) {
2048 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2049 list_del_init(&cqr->blocklist);
2050 __dasd_cleanup_cqr(cqr);
2052 spin_lock(&block->queue_lock);
2053 /* Get new request from the block device request queue */
2054 __dasd_process_request_queue(block);
2055 /* Now check if the head of the ccw queue needs to be started. */
2056 __dasd_block_start_head(block);
2057 spin_unlock(&block->queue_lock);
2058 spin_unlock_irq(&block->request_queue_lock);
2059 dasd_put_device(block->base);
2062 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2064 wake_up(&dasd_flush_wq);
2068 * Go through all request on the dasd_block request queue, cancel them
2069 * on the respective dasd_device, and return them to the generic
2072 static int dasd_flush_block_queue(struct dasd_block *block)
2074 struct dasd_ccw_req *cqr, *n;
2076 struct list_head flush_queue;
2078 INIT_LIST_HEAD(&flush_queue);
2079 spin_lock_bh(&block->queue_lock);
2082 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2083 /* if this request currently owned by a dasd_device cancel it */
2084 if (cqr->status >= DASD_CQR_QUEUED)
2085 rc = dasd_cancel_req(cqr);
2088 /* Rechain request (including erp chain) so it won't be
2089 * touched by the dasd_block_tasklet anymore.
2090 * Replace the callback so we notice when the request
2091 * is returned from the dasd_device layer.
2093 cqr->callback = _dasd_wake_block_flush_cb;
2094 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2095 list_move_tail(&cqr->blocklist, &flush_queue);
2097 /* moved more than one request - need to restart */
2100 spin_unlock_bh(&block->queue_lock);
2101 /* Now call the callback function of flushed requests */
2103 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2104 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2105 /* Process finished ERP request. */
2107 spin_lock_bh(&block->queue_lock);
2108 __dasd_process_erp(block->base, cqr);
2109 spin_unlock_bh(&block->queue_lock);
2110 /* restart list_for_xx loop since dasd_process_erp
2111 * might remove multiple elements */
2114 /* call the callback function */
2115 spin_lock_irq(&block->request_queue_lock);
2116 cqr->endclk = get_clock();
2117 list_del_init(&cqr->blocklist);
2118 __dasd_cleanup_cqr(cqr);
2119 spin_unlock_irq(&block->request_queue_lock);
2125 * Schedules a call to dasd_tasklet over the device tasklet.
2127 void dasd_schedule_block_bh(struct dasd_block *block)
2129 /* Protect against rescheduling. */
2130 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2132 /* life cycle of block is bound to it's base device */
2133 dasd_get_device(block->base);
2134 tasklet_hi_schedule(&block->tasklet);
2139 * SECTION: external block device operations
2140 * (request queue handling, open, release, etc.)
2144 * Dasd request queue function. Called from ll_rw_blk.c
2146 static void do_dasd_request(struct request_queue *queue)
2148 struct dasd_block *block;
2150 block = queue->queuedata;
2151 spin_lock(&block->queue_lock);
2152 /* Get new request from the block device request queue */
2153 __dasd_process_request_queue(block);
2154 /* Now check if the head of the ccw queue needs to be started. */
2155 __dasd_block_start_head(block);
2156 spin_unlock(&block->queue_lock);
2160 * Allocate and initialize request queue and default I/O scheduler.
2162 static int dasd_alloc_queue(struct dasd_block *block)
2166 block->request_queue = blk_init_queue(do_dasd_request,
2167 &block->request_queue_lock);
2168 if (block->request_queue == NULL)
2171 block->request_queue->queuedata = block;
2173 elevator_exit(block->request_queue->elevator);
2174 block->request_queue->elevator = NULL;
2175 rc = elevator_init(block->request_queue, "deadline");
2177 blk_cleanup_queue(block->request_queue);
2184 * Allocate and initialize request queue.
2186 static void dasd_setup_queue(struct dasd_block *block)
2190 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2191 max = block->base->discipline->max_blocks << block->s2b_shift;
2192 blk_queue_max_hw_sectors(block->request_queue, max);
2193 blk_queue_max_segments(block->request_queue, -1L);
2194 /* with page sized segments we can translate each segement into
2197 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2198 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2199 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
2203 * Deactivate and free request queue.
2205 static void dasd_free_queue(struct dasd_block *block)
2207 if (block->request_queue) {
2208 blk_cleanup_queue(block->request_queue);
2209 block->request_queue = NULL;
2214 * Flush request on the request queue.
2216 static void dasd_flush_request_queue(struct dasd_block *block)
2218 struct request *req;
2220 if (!block->request_queue)
2223 spin_lock_irq(&block->request_queue_lock);
2224 while ((req = blk_fetch_request(block->request_queue)))
2225 __blk_end_request_all(req, -EIO);
2226 spin_unlock_irq(&block->request_queue_lock);
2229 static int dasd_open(struct block_device *bdev, fmode_t mode)
2231 struct dasd_block *block = bdev->bd_disk->private_data;
2232 struct dasd_device *base;
2239 atomic_inc(&block->open_count);
2240 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2245 if (!try_module_get(base->discipline->owner)) {
2250 if (dasd_probeonly) {
2251 dev_info(&base->cdev->dev,
2252 "Accessing the DASD failed because it is in "
2253 "probeonly mode\n");
2258 if (base->state <= DASD_STATE_BASIC) {
2259 DBF_DEV_EVENT(DBF_ERR, base, " %s",
2260 " Cannot open unrecognized device");
2265 if ((mode & FMODE_WRITE) &&
2266 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
2267 (base->features & DASD_FEATURE_READONLY))) {
2275 module_put(base->discipline->owner);
2277 atomic_dec(&block->open_count);
2281 static int dasd_release(struct gendisk *disk, fmode_t mode)
2283 struct dasd_block *block = disk->private_data;
2285 atomic_dec(&block->open_count);
2286 module_put(block->base->discipline->owner);
2291 * Return disk geometry.
2293 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2295 struct dasd_block *block;
2296 struct dasd_device *base;
2298 block = bdev->bd_disk->private_data;
2303 if (!base->discipline ||
2304 !base->discipline->fill_geometry)
2307 base->discipline->fill_geometry(block, geo);
2308 geo->start = get_start_sect(bdev) >> block->s2b_shift;
2312 const struct block_device_operations
2313 dasd_device_operations = {
2314 .owner = THIS_MODULE,
2316 .release = dasd_release,
2317 .ioctl = dasd_ioctl,
2318 .compat_ioctl = dasd_ioctl,
2319 .getgeo = dasd_getgeo,
2322 /*******************************************************************************
2323 * end of block device operations
2329 #ifdef CONFIG_PROC_FS
2333 if (dasd_page_cache != NULL) {
2334 kmem_cache_destroy(dasd_page_cache);
2335 dasd_page_cache = NULL;
2337 dasd_gendisk_exit();
2339 if (dasd_debug_area != NULL) {
2340 debug_unregister(dasd_debug_area);
2341 dasd_debug_area = NULL;
2346 * SECTION: common functions for ccw_driver use
2350 * Is the device read-only?
2351 * Note that this function does not report the setting of the
2352 * readonly device attribute, but how it is configured in z/VM.
2354 int dasd_device_is_ro(struct dasd_device *device)
2356 struct ccw_dev_id dev_id;
2357 struct diag210 diag_data;
2362 ccw_device_get_id(device->cdev, &dev_id);
2363 memset(&diag_data, 0, sizeof(diag_data));
2364 diag_data.vrdcdvno = dev_id.devno;
2365 diag_data.vrdclen = sizeof(diag_data);
2366 rc = diag210(&diag_data);
2367 if (rc == 0 || rc == 2) {
2368 return diag_data.vrdcvfla & 0x80;
2370 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
2375 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
2377 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2379 struct ccw_device *cdev = data;
2382 ret = ccw_device_set_online(cdev);
2384 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2385 dev_name(&cdev->dev), ret);
2389 * Initial attempt at a probe function. this can be simplified once
2390 * the other detection code is gone.
2392 int dasd_generic_probe(struct ccw_device *cdev,
2393 struct dasd_discipline *discipline)
2397 ret = dasd_add_sysfs_files(cdev);
2399 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2400 "dasd_generic_probe: could not add "
2404 cdev->handler = &dasd_int_handler;
2407 * Automatically online either all dasd devices (dasd_autodetect)
2408 * or all devices specified with dasd= parameters during
2411 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2412 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2413 async_schedule(dasd_generic_auto_online, cdev);
2418 * This will one day be called from a global not_oper handler.
2419 * It is also used by driver_unregister during module unload.
2421 void dasd_generic_remove(struct ccw_device *cdev)
2423 struct dasd_device *device;
2424 struct dasd_block *block;
2426 cdev->handler = NULL;
2428 dasd_remove_sysfs_files(cdev);
2429 device = dasd_device_from_cdev(cdev);
2432 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2433 /* Already doing offline processing */
2434 dasd_put_device(device);
2438 * This device is removed unconditionally. Set offline
2439 * flag to prevent dasd_open from opening it while it is
2440 * no quite down yet.
2442 dasd_set_target_state(device, DASD_STATE_NEW);
2443 /* dasd_delete_device destroys the device reference. */
2444 block = device->block;
2445 device->block = NULL;
2446 dasd_delete_device(device);
2448 * life cycle of block is bound to device, so delete it after
2449 * device was safely removed
2452 dasd_free_block(block);
2456 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2457 * the device is detected for the first time and is supposed to be used
2458 * or the user has started activation through sysfs.
2460 int dasd_generic_set_online(struct ccw_device *cdev,
2461 struct dasd_discipline *base_discipline)
2463 struct dasd_discipline *discipline;
2464 struct dasd_device *device;
2467 /* first online clears initial online feature flag */
2468 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2469 device = dasd_create_device(cdev);
2471 return PTR_ERR(device);
2473 discipline = base_discipline;
2474 if (device->features & DASD_FEATURE_USEDIAG) {
2475 if (!dasd_diag_discipline_pointer) {
2476 pr_warning("%s Setting the DASD online failed because "
2477 "of missing DIAG discipline\n",
2478 dev_name(&cdev->dev));
2479 dasd_delete_device(device);
2482 discipline = dasd_diag_discipline_pointer;
2484 if (!try_module_get(base_discipline->owner)) {
2485 dasd_delete_device(device);
2488 if (!try_module_get(discipline->owner)) {
2489 module_put(base_discipline->owner);
2490 dasd_delete_device(device);
2493 device->base_discipline = base_discipline;
2494 device->discipline = discipline;
2496 /* check_device will allocate block device if necessary */
2497 rc = discipline->check_device(device);
2499 pr_warning("%s Setting the DASD online with discipline %s "
2500 "failed with rc=%i\n",
2501 dev_name(&cdev->dev), discipline->name, rc);
2502 module_put(discipline->owner);
2503 module_put(base_discipline->owner);
2504 dasd_delete_device(device);
2508 dasd_set_target_state(device, DASD_STATE_ONLINE);
2509 if (device->state <= DASD_STATE_KNOWN) {
2510 pr_warning("%s Setting the DASD online failed because of a "
2511 "missing discipline\n", dev_name(&cdev->dev));
2513 dasd_set_target_state(device, DASD_STATE_NEW);
2515 dasd_free_block(device->block);
2516 dasd_delete_device(device);
2518 pr_debug("dasd_generic device %s found\n",
2519 dev_name(&cdev->dev));
2521 wait_event(dasd_init_waitq, _wait_for_device(device));
2523 dasd_put_device(device);
2527 int dasd_generic_set_offline(struct ccw_device *cdev)
2529 struct dasd_device *device;
2530 struct dasd_block *block;
2531 int max_count, open_count;
2533 device = dasd_device_from_cdev(cdev);
2535 return PTR_ERR(device);
2536 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2537 /* Already doing offline processing */
2538 dasd_put_device(device);
2542 * We must make sure that this device is currently not in use.
2543 * The open_count is increased for every opener, that includes
2544 * the blkdev_get in dasd_scan_partitions. We are only interested
2545 * in the other openers.
2547 if (device->block) {
2548 max_count = device->block->bdev ? 0 : -1;
2549 open_count = atomic_read(&device->block->open_count);
2550 if (open_count > max_count) {
2552 pr_warning("%s: The DASD cannot be set offline "
2553 "with open count %i\n",
2554 dev_name(&cdev->dev), open_count);
2556 pr_warning("%s: The DASD cannot be set offline "
2557 "while it is in use\n",
2558 dev_name(&cdev->dev));
2559 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2560 dasd_put_device(device);
2564 dasd_set_target_state(device, DASD_STATE_NEW);
2565 /* dasd_delete_device destroys the device reference. */
2566 block = device->block;
2567 device->block = NULL;
2568 dasd_delete_device(device);
2570 * life cycle of block is bound to device, so delete it after
2571 * device was safely removed
2574 dasd_free_block(block);
2578 int dasd_generic_notify(struct ccw_device *cdev, int event)
2580 struct dasd_device *device;
2581 struct dasd_ccw_req *cqr;
2584 device = dasd_device_from_cdev_locked(cdev);
2592 /* First of all call extended error reporting. */
2593 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2595 if (device->state < DASD_STATE_BASIC)
2597 /* Device is active. We want to keep it. */
2598 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2599 if (cqr->status == DASD_CQR_IN_IO) {
2600 cqr->status = DASD_CQR_QUEUED;
2603 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2604 dasd_device_clear_timer(device);
2605 dasd_schedule_device_bh(device);
2609 /* FIXME: add a sanity check. */
2610 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2611 if (device->stopped & DASD_UNRESUMED_PM) {
2612 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2613 dasd_restore_device(device);
2617 dasd_schedule_device_bh(device);
2619 dasd_schedule_block_bh(device->block);
2623 dasd_put_device(device);
2627 int dasd_generic_pm_freeze(struct ccw_device *cdev)
2629 struct dasd_ccw_req *cqr, *n;
2631 struct list_head freeze_queue;
2632 struct dasd_device *device = dasd_device_from_cdev(cdev);
2635 return PTR_ERR(device);
2636 /* disallow new I/O */
2637 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2638 /* clear active requests */
2639 INIT_LIST_HEAD(&freeze_queue);
2640 spin_lock_irq(get_ccwdev_lock(cdev));
2642 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2643 /* Check status and move request to flush_queue */
2644 if (cqr->status == DASD_CQR_IN_IO) {
2645 rc = device->discipline->term_IO(cqr);
2647 /* unable to terminate requeust */
2648 dev_err(&device->cdev->dev,
2649 "Unable to terminate request %p "
2650 "on suspend\n", cqr);
2651 spin_unlock_irq(get_ccwdev_lock(cdev));
2652 dasd_put_device(device);
2656 list_move_tail(&cqr->devlist, &freeze_queue);
2659 spin_unlock_irq(get_ccwdev_lock(cdev));
2661 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2662 wait_event(dasd_flush_wq,
2663 (cqr->status != DASD_CQR_CLEAR_PENDING));
2664 if (cqr->status == DASD_CQR_CLEARED)
2665 cqr->status = DASD_CQR_QUEUED;
2667 /* move freeze_queue to start of the ccw_queue */
2668 spin_lock_irq(get_ccwdev_lock(cdev));
2669 list_splice_tail(&freeze_queue, &device->ccw_queue);
2670 spin_unlock_irq(get_ccwdev_lock(cdev));
2672 if (device->discipline->freeze)
2673 rc = device->discipline->freeze(device);
2675 dasd_put_device(device);
2678 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2680 int dasd_generic_restore_device(struct ccw_device *cdev)
2682 struct dasd_device *device = dasd_device_from_cdev(cdev);
2686 return PTR_ERR(device);
2688 /* allow new IO again */
2689 dasd_device_remove_stop_bits(device,
2690 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2692 dasd_schedule_device_bh(device);
2695 * call discipline restore function
2696 * if device is stopped do nothing e.g. for disconnected devices
2698 if (device->discipline->restore && !(device->stopped))
2699 rc = device->discipline->restore(device);
2700 if (rc || device->stopped)
2702 * if the resume failed for the DASD we put it in
2703 * an UNRESUMED stop state
2705 device->stopped |= DASD_UNRESUMED_PM;
2708 dasd_schedule_block_bh(device->block);
2710 dasd_put_device(device);
2713 EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2715 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2717 int rdc_buffer_size,
2720 struct dasd_ccw_req *cqr;
2722 unsigned long *idaw;
2724 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2727 /* internal error 13 - Allocating the RDC request failed*/
2728 dev_err(&device->cdev->dev,
2729 "An error occurred in the DASD device driver, "
2730 "reason=%s\n", "13");
2735 ccw->cmd_code = CCW_CMD_RDC;
2736 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2737 idaw = (unsigned long *) (cqr->data);
2738 ccw->cda = (__u32)(addr_t) idaw;
2739 ccw->flags = CCW_FLAG_IDA;
2740 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2742 ccw->cda = (__u32)(addr_t) rdc_buffer;
2746 ccw->count = rdc_buffer_size;
2747 cqr->startdev = device;
2748 cqr->memdev = device;
2749 cqr->expires = 10*HZ;
2751 cqr->buildclk = get_clock();
2752 cqr->status = DASD_CQR_FILLED;
2757 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2758 void *rdc_buffer, int rdc_buffer_size)
2761 struct dasd_ccw_req *cqr;
2763 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2766 return PTR_ERR(cqr);
2768 ret = dasd_sleep_on(cqr);
2769 dasd_sfree_request(cqr, cqr->memdev);
2772 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2775 * In command mode and transport mode we need to look for sense
2776 * data in different places. The sense data itself is allways
2777 * an array of 32 bytes, so we can unify the sense data access
2780 char *dasd_get_sense(struct irb *irb)
2782 struct tsb *tsb = NULL;
2785 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2786 if (irb->scsw.tm.tcw)
2787 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2789 if (tsb && tsb->length == 64 && tsb->flags)
2790 switch (tsb->flags & 0x07) {
2791 case 1: /* tsa_iostat */
2792 sense = tsb->tsa.iostat.sense;
2794 case 2: /* tsa_ddpc */
2795 sense = tsb->tsa.ddpc.sense;
2798 /* currently we don't use interrogate data */
2801 } else if (irb->esw.esw0.erw.cons) {
2806 EXPORT_SYMBOL_GPL(dasd_get_sense);
2808 static int __init dasd_init(void)
2812 init_waitqueue_head(&dasd_init_waitq);
2813 init_waitqueue_head(&dasd_flush_wq);
2814 init_waitqueue_head(&generic_waitq);
2816 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2817 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2818 if (dasd_debug_area == NULL) {
2822 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2823 debug_set_level(dasd_debug_area, DBF_WARNING);
2825 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2827 dasd_diag_discipline_pointer = NULL;
2829 rc = dasd_devmap_init();
2832 rc = dasd_gendisk_init();
2838 rc = dasd_eer_init();
2841 #ifdef CONFIG_PROC_FS
2842 rc = dasd_proc_init();
2849 pr_info("The DASD device driver could not be initialized\n");
2854 module_init(dasd_init);
2855 module_exit(dasd_exit);
2857 EXPORT_SYMBOL(dasd_debug_area);
2858 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2860 EXPORT_SYMBOL(dasd_add_request_head);
2861 EXPORT_SYMBOL(dasd_add_request_tail);
2862 EXPORT_SYMBOL(dasd_cancel_req);
2863 EXPORT_SYMBOL(dasd_device_clear_timer);
2864 EXPORT_SYMBOL(dasd_block_clear_timer);
2865 EXPORT_SYMBOL(dasd_enable_device);
2866 EXPORT_SYMBOL(dasd_int_handler);
2867 EXPORT_SYMBOL(dasd_kfree_request);
2868 EXPORT_SYMBOL(dasd_kick_device);
2869 EXPORT_SYMBOL(dasd_kmalloc_request);
2870 EXPORT_SYMBOL(dasd_schedule_device_bh);
2871 EXPORT_SYMBOL(dasd_schedule_block_bh);
2872 EXPORT_SYMBOL(dasd_set_target_state);
2873 EXPORT_SYMBOL(dasd_device_set_timer);
2874 EXPORT_SYMBOL(dasd_block_set_timer);
2875 EXPORT_SYMBOL(dasd_sfree_request);
2876 EXPORT_SYMBOL(dasd_sleep_on);
2877 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2878 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2879 EXPORT_SYMBOL(dasd_smalloc_request);
2880 EXPORT_SYMBOL(dasd_start_IO);
2881 EXPORT_SYMBOL(dasd_term_IO);
2883 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2884 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2885 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2886 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2887 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2888 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2889 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2890 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2891 EXPORT_SYMBOL_GPL(dasd_free_block);