2 * Handling of internal CCW device requests.
4 * Copyright IBM Corp. 2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #include <linux/types.h>
10 #include <asm/ccwdev.h>
16 #include "cio_debug.h"
19 * lpm_adjust - adjust path mask
20 * @lpm: path mask to adjust
21 * @mask: mask of available paths
23 * Shift @lpm right until @lpm and @mask have at least one bit in common or
24 * until @lpm is zero. Return the resulting lpm.
26 int lpm_adjust(int lpm, int mask)
28 while (lpm && ((lpm & mask) == 0))
34 * Adjust path mask to use next path and reset retry count. Return resulting
37 static u16 ccwreq_next_path(struct ccw_device *cdev)
39 struct ccw_request *req = &cdev->private->req;
41 req->retries = req->maxretries;
42 req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
48 * Clean up device state and report to callback.
50 static void ccwreq_stop(struct ccw_device *cdev, int rc)
52 struct ccw_request *req = &cdev->private->req;
57 ccw_device_set_timeout(cdev, 0);
58 memset(&cdev->private->irb, 0, sizeof(struct irb));
59 if (rc && rc != -ENODEV && req->drc)
61 req->callback(cdev, req->data, rc);
65 * (Re-)Start the operation until retries and paths are exhausted.
67 static void ccwreq_do(struct ccw_device *cdev)
69 struct ccw_request *req = &cdev->private->req;
70 struct subchannel *sch = to_subchannel(cdev->dev.parent);
71 struct ccw1 *cp = req->cp;
75 if (req->retries-- == 0) {
76 /* Retries exhausted, try next path. */
77 ccwreq_next_path(cdev);
80 /* Perform start function. */
81 memset(&cdev->private->irb, 0, sizeof(struct irb));
82 rc = cio_start(sch, cp, (u8) req->mask);
84 /* I/O started successfully. */
85 ccw_device_set_timeout(cdev, req->timeout);
89 /* Permanent device error. */
93 /* Permant path error. */
94 ccwreq_next_path(cdev);
97 /* Temporary improper status. */
103 ccwreq_stop(cdev, rc);
107 * ccw_request_start - perform I/O request
110 * Perform the I/O request specified by cdev->req.
112 void ccw_request_start(struct ccw_device *cdev)
114 struct ccw_request *req = &cdev->private->req;
116 /* Try all paths twice to counter link flapping. */
118 req->retries = req->maxretries;
119 req->mask = lpm_adjust(req->mask, req->lpm);
129 ccwreq_stop(cdev, -EACCES);
133 * ccw_request_cancel - cancel running I/O request
136 * Cancel the I/O request specified by cdev->req. Return non-zero if request
137 * has already finished, zero otherwise.
139 int ccw_request_cancel(struct ccw_device *cdev)
141 struct subchannel *sch = to_subchannel(cdev->dev.parent);
142 struct ccw_request *req = &cdev->private->req;
150 ccwreq_stop(cdev, rc);
155 * Return the status of the internal I/O started on the specified ccw device.
156 * Perform BASIC SENSE if required.
158 static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
160 struct irb *irb = &cdev->private->irb;
161 struct cmd_scsw *scsw = &irb->scsw.cmd;
163 /* Perform BASIC SENSE if needed. */
164 if (ccw_device_accumulate_and_sense(cdev, lcirb))
166 /* Check for halt/clear interrupt. */
167 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
169 /* Check for path error. */
170 if (scsw->cc == 3 || scsw->pno)
171 return IO_PATH_ERROR;
172 /* Handle BASIC SENSE data. */
173 if (irb->esw.esw0.erw.cons) {
174 CIO_TRACE_EVENT(2, "sensedata");
175 CIO_HEX_EVENT(2, &cdev->private->dev_id,
176 sizeof(struct ccw_dev_id));
177 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
178 /* Check for command reject. */
179 if (irb->ecw[0] & SNS0_CMD_REJECT)
181 /* Assume that unexpected SENSE data implies an error. */
182 return IO_STATUS_ERROR;
184 /* Check for channel errors. */
185 if (scsw->cstat != 0)
186 return IO_STATUS_ERROR;
187 /* Check for device errors. */
188 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
189 return IO_STATUS_ERROR;
190 /* Check for final state. */
191 if (!(scsw->dstat & DEV_STAT_DEV_END))
193 /* Check for other improper status. */
194 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
195 return IO_STATUS_ERROR;
200 * Log ccw request status.
202 static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
204 struct ccw_request *req = &cdev->private->req;
206 struct ccw_dev_id dev_id;
210 } __attribute__ ((packed)) data;
211 data.dev_id = cdev->private->dev_id;
212 data.retries = req->retries;
213 data.lpm = (u8) req->mask;
214 data.status = (u8) status;
215 CIO_TRACE_EVENT(2, "reqstat");
216 CIO_HEX_EVENT(2, &data, sizeof(data));
220 * ccw_request_handler - interrupt handler for I/O request procedure.
223 * Handle interrupt during I/O request procedure.
225 void ccw_request_handler(struct ccw_device *cdev)
227 struct irb *irb = (struct irb *)&S390_lowcore.irb;
228 struct ccw_request *req = &cdev->private->req;
229 enum io_status status;
230 int rc = -EOPNOTSUPP;
232 /* Check status of I/O request. */
233 status = ccwreq_status(cdev, irb);
235 status = req->filter(cdev, req->data, irb, status);
236 if (status != IO_RUNNING)
237 ccw_device_set_timeout(cdev, 0);
238 if (status != IO_DONE && status != IO_RUNNING)
239 ccwreq_log_status(cdev, status);
249 case IO_STATUS_ERROR:
252 /* Check if request was cancelled on purpose. */
259 /* Check back with request initiator. */
262 switch (req->check(cdev, req->data)) {
273 ccwreq_stop(cdev, 0);
277 /* Try next path and restart I/O. */
278 if (!ccwreq_next_path(cdev)) {
287 ccwreq_stop(cdev, rc);
292 * ccw_request_timeout - timeout handler for I/O request procedure
295 * Handle timeout during I/O request procedure.
297 void ccw_request_timeout(struct ccw_device *cdev)
299 struct subchannel *sch = to_subchannel(cdev->dev.parent);
300 struct ccw_request *req = &cdev->private->req;
303 if (!ccwreq_next_path(cdev)) {
304 /* set the final return code for this request */
313 ccwreq_stop(cdev, rc);
317 * ccw_request_notoper - notoper handler for I/O request procedure
320 * Handle timeout during I/O request procedure.
322 void ccw_request_notoper(struct ccw_device *cdev)
324 ccwreq_stop(cdev, -ENODEV);