1 #include <linux/kernel.h>
5 int generic_ide_suspend(struct device *dev, pm_message_t mesg)
7 ide_drive_t *drive = to_ide_device(dev);
8 ide_drive_t *pair = ide_get_pair_dev(drive);
9 ide_hwif_t *hwif = drive->hwif;
11 struct ide_pm_state rqpm;
14 if (ide_port_acpi(hwif)) {
15 /* call ACPI _GTM only once */
16 if ((drive->dn & 1) == 0 || pair == NULL)
17 ide_acpi_get_timing(hwif);
20 memset(&rqpm, 0, sizeof(rqpm));
21 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
23 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
25 rqpm.pm_step = IDE_PM_START_SUSPEND;
26 if (mesg.event == PM_EVENT_PRETHAW)
27 mesg.event = PM_EVENT_FREEZE;
28 rqpm.pm_state = mesg.event;
30 blk_execute_rq(drive->queue, NULL, rq, 0);
31 ret = scsi_req(rq)->result ? -EIO : 0;
34 if (ret == 0 && ide_port_acpi(hwif)) {
35 /* call ACPI _PS3 only after both devices are suspended */
36 if ((drive->dn & 1) || pair == NULL)
37 ide_acpi_set_state(hwif, 0);
43 static void ide_end_sync_rq(struct request *rq, int error)
45 complete(rq->end_io_data);
48 static int ide_pm_execute_rq(struct request *rq)
50 struct request_queue *q = rq->q;
51 DECLARE_COMPLETION_ONSTACK(wait);
53 rq->end_io_data = &wait;
54 rq->end_io = ide_end_sync_rq;
56 spin_lock_irq(q->queue_lock);
57 if (unlikely(blk_queue_dying(q))) {
58 rq->rq_flags |= RQF_QUIET;
59 scsi_req(rq)->result = -ENXIO;
60 __blk_end_request_all(rq, 0);
61 spin_unlock_irq(q->queue_lock);
64 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
65 __blk_run_queue_uncond(q);
66 spin_unlock_irq(q->queue_lock);
68 wait_for_completion_io(&wait);
70 return scsi_req(rq)->result ? -EIO : 0;
73 int generic_ide_resume(struct device *dev)
75 ide_drive_t *drive = to_ide_device(dev);
76 ide_drive_t *pair = ide_get_pair_dev(drive);
77 ide_hwif_t *hwif = drive->hwif;
79 struct ide_pm_state rqpm;
82 if (ide_port_acpi(hwif)) {
83 /* call ACPI _PS0 / _STM only once */
84 if ((drive->dn & 1) == 0 || pair == NULL) {
85 ide_acpi_set_state(hwif, 1);
86 ide_acpi_push_timing(hwif);
89 ide_acpi_exec_tfs(drive);
92 memset(&rqpm, 0, sizeof(rqpm));
93 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
95 ide_req(rq)->type = ATA_PRIV_PM_RESUME;
96 rq->rq_flags |= RQF_PREEMPT;
98 rqpm.pm_step = IDE_PM_START_RESUME;
99 rqpm.pm_state = PM_EVENT_ON;
101 err = ide_pm_execute_rq(rq);
104 if (err == 0 && dev->driver) {
105 struct ide_driver *drv = to_ide_driver(dev->driver);
114 void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
116 struct ide_pm_state *pm = rq->special;
119 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
120 drive->name, pm->pm_step);
122 if (drive->media != ide_disk)
125 switch (pm->pm_step) {
126 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
127 if (pm->pm_state == PM_EVENT_FREEZE)
128 pm->pm_step = IDE_PM_COMPLETED;
130 pm->pm_step = IDE_PM_STANDBY;
132 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
133 pm->pm_step = IDE_PM_COMPLETED;
135 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
136 pm->pm_step = IDE_PM_IDLE;
138 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
139 pm->pm_step = IDE_PM_RESTORE_DMA;
144 ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
146 struct ide_pm_state *pm = rq->special;
147 struct ide_cmd cmd = { };
149 switch (pm->pm_step) {
150 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
151 if (drive->media != ide_disk)
153 /* Not supported? Switch to next step now. */
154 if (ata_id_flush_enabled(drive->id) == 0 ||
155 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
156 ide_complete_power_step(drive, rq);
159 if (ata_id_flush_ext_enabled(drive->id))
160 cmd.tf.command = ATA_CMD_FLUSH_EXT;
162 cmd.tf.command = ATA_CMD_FLUSH;
164 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
165 cmd.tf.command = ATA_CMD_STANDBYNOW1;
167 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
168 ide_set_max_pio(drive);
170 * skip IDE_PM_IDLE for ATAPI devices
172 if (drive->media != ide_disk)
173 pm->pm_step = IDE_PM_RESTORE_DMA;
175 ide_complete_power_step(drive, rq);
177 case IDE_PM_IDLE: /* Resume step 2 (idle) */
178 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
180 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
182 * Right now, all we do is call ide_set_dma(drive),
183 * we could be smarter and check for current xfer_speed
184 * in struct drive etc...
186 if (drive->hwif->dma_ops == NULL)
189 * TODO: respect IDE_DFLAG_USING_DMA
195 pm->pm_step = IDE_PM_COMPLETED;
200 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
201 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
202 cmd.protocol = ATA_PROT_NODATA;
204 return do_rw_taskfile(drive, &cmd);
208 * ide_complete_pm_rq - end the current Power Management request
209 * @drive: target drive
212 * This function cleans up the current PM request and stops the queue
215 void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
217 struct request_queue *q = drive->queue;
218 struct ide_pm_state *pm = rq->special;
221 ide_complete_power_step(drive, rq);
222 if (pm->pm_step != IDE_PM_COMPLETED)
226 printk("%s: completing PM request, %s\n", drive->name,
227 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
229 spin_lock_irqsave(q->queue_lock, flags);
230 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
233 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
234 spin_unlock_irqrestore(q->queue_lock, flags);
236 drive->hwif->rq = NULL;
238 if (blk_end_request(rq, 0, 0))
242 void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
244 struct ide_pm_state *pm = rq->special;
246 if (blk_rq_is_private(rq) &&
247 ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
248 pm->pm_step == IDE_PM_START_SUSPEND)
249 /* Mark drive blocked when starting the suspend sequence. */
250 drive->dev_flags |= IDE_DFLAG_BLOCKED;
251 else if (blk_rq_is_private(rq) &&
252 ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
253 pm->pm_step == IDE_PM_START_RESUME) {
255 * The first thing we do on wakeup is to wait for BSY bit to
256 * go away (with a looong timeout) as a drive on this hwif may
257 * just be POSTing itself.
258 * We do that before even selecting as the "other" device on
259 * the bus may be broken enough to walk on our toes at this
262 ide_hwif_t *hwif = drive->hwif;
263 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
264 struct request_queue *q = drive->queue;
268 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
270 rc = ide_wait_not_busy(hwif, 35000);
272 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
273 tp_ops->dev_select(drive);
274 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
275 rc = ide_wait_not_busy(hwif, 100000);
277 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
279 spin_lock_irqsave(q->queue_lock, flags);
281 spin_unlock_irqrestore(q->queue_lock, flags);