2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h"
64 static struct scu_sgl_element_pair *to_sgl_element_pair(struct scic_sds_request *sci_req,
68 return &sci_req->tc->sgl_pair_ab;
70 return &sci_req->tc->sgl_pair_cd;
74 return &sci_req->sg_table[idx - 2];
77 static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic,
78 struct scic_sds_request *sci_req, u32 idx)
83 offset = (void *) &sci_req->tc->sgl_pair_ab -
84 (void *) &scic->task_context_table[0];
85 return scic->task_context_dma + offset;
86 } else if (idx == 1) {
87 offset = (void *) &sci_req->tc->sgl_pair_cd -
88 (void *) &scic->task_context_table[0];
89 return scic->task_context_dma + offset;
92 return scic_io_request_get_dma_addr(sci_req, &sci_req->sg_table[idx - 2]);
95 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
97 e->length = sg_dma_len(sg);
98 e->address_upper = upper_32_bits(sg_dma_address(sg));
99 e->address_lower = lower_32_bits(sg_dma_address(sg));
100 e->address_modifier = 0;
103 static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
105 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
106 struct isci_host *isci_host = isci_request->isci_host;
107 struct scic_sds_controller *scic = &isci_host->sci;
108 struct sas_task *task = isci_request_access_task(isci_request);
109 struct scatterlist *sg = NULL;
112 struct scu_sgl_element_pair *scu_sg = NULL;
113 struct scu_sgl_element_pair *prev_sg = NULL;
115 if (task->num_scatter > 0) {
119 scu_sg = to_sgl_element_pair(sds_request, sg_idx);
120 init_sgl_element(&scu_sg->A, sg);
123 init_sgl_element(&scu_sg->B, sg);
126 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
129 dma_addr = to_sgl_element_pair_dma(scic,
133 prev_sg->next_pair_upper =
134 upper_32_bits(dma_addr);
135 prev_sg->next_pair_lower =
136 lower_32_bits(dma_addr);
142 } else { /* handle when no sg */
143 scu_sg = to_sgl_element_pair(sds_request, sg_idx);
145 dma_addr = dma_map_single(&isci_host->pdev->dev,
147 task->total_xfer_len,
150 isci_request->zero_scatter_daddr = dma_addr;
152 scu_sg->A.length = task->total_xfer_len;
153 scu_sg->A.address_upper = upper_32_bits(dma_addr);
154 scu_sg->A.address_lower = lower_32_bits(dma_addr);
158 scu_sg->next_pair_upper = 0;
159 scu_sg->next_pair_lower = 0;
163 static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
165 struct ssp_cmd_iu *cmd_iu;
166 struct isci_request *ireq = sci_req_to_ireq(sci_req);
167 struct sas_task *task = isci_request_access_task(ireq);
169 cmd_iu = &sci_req->ssp.cmd;
171 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
172 cmd_iu->add_cdb_len = 0;
175 cmd_iu->en_fburst = 0; /* unsupported */
176 cmd_iu->task_prio = task->ssp_task.task_prio;
177 cmd_iu->task_attr = task->ssp_task.task_attr;
180 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
181 sizeof(task->ssp_task.cdb) / sizeof(u32));
184 static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
186 struct ssp_task_iu *task_iu;
187 struct isci_request *ireq = sci_req_to_ireq(sci_req);
188 struct sas_task *task = isci_request_access_task(ireq);
189 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
191 task_iu = &sci_req->ssp.tmf;
193 memset(task_iu, 0, sizeof(struct ssp_task_iu));
195 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
197 task_iu->task_func = isci_tmf->tmf_code;
199 (ireq->ttype == tmf_task) ?
201 SCI_CONTROLLER_INVALID_IO_TAG;
205 * This method is will fill in the SCU Task Context for any type of SSP request.
210 static void scu_ssp_reqeust_construct_task_context(
211 struct scic_sds_request *sds_request,
212 struct scu_task_context *task_context)
215 struct scic_sds_remote_device *target_device;
216 struct scic_sds_port *target_port;
218 target_device = scic_sds_request_get_device(sds_request);
219 target_port = scic_sds_request_get_port(sds_request);
221 /* Fill in the TC with the its required data */
222 task_context->abort = 0;
223 task_context->priority = 0;
224 task_context->initiator_request = 1;
225 task_context->connection_rate = target_device->connection_rate;
226 task_context->protocol_engine_index =
227 scic_sds_controller_get_protocol_engine_group(controller);
228 task_context->logical_port_index =
229 scic_sds_port_get_index(target_port);
230 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
231 task_context->valid = SCU_TASK_CONTEXT_VALID;
232 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
234 task_context->remote_node_index =
235 scic_sds_remote_device_get_index(sds_request->target_device);
236 task_context->command_code = 0;
238 task_context->link_layer_control = 0;
239 task_context->do_not_dma_ssp_good_response = 1;
240 task_context->strict_ordering = 0;
241 task_context->control_frame = 0;
242 task_context->timeout_enable = 0;
243 task_context->block_guard_enable = 0;
245 task_context->address_modifier = 0;
247 /* task_context->type.ssp.tag = sci_req->io_tag; */
248 task_context->task_phase = 0x01;
250 sds_request->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
251 (scic_sds_controller_get_protocol_engine_group(controller) <<
252 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
253 (scic_sds_port_get_index(target_port) <<
254 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
255 ISCI_TAG_TCI(sds_request->io_tag));
258 * Copy the physical address for the command buffer to the
261 dma_addr = scic_io_request_get_dma_addr(sds_request,
262 &sds_request->ssp.cmd);
264 task_context->command_iu_upper = upper_32_bits(dma_addr);
265 task_context->command_iu_lower = lower_32_bits(dma_addr);
268 * Copy the physical address for the response buffer to the
271 dma_addr = scic_io_request_get_dma_addr(sds_request,
272 &sds_request->ssp.rsp);
274 task_context->response_iu_upper = upper_32_bits(dma_addr);
275 task_context->response_iu_lower = lower_32_bits(dma_addr);
279 * This method is will fill in the SCU Task Context for a SSP IO request.
283 static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *sci_req,
284 enum dma_data_direction dir,
287 struct scu_task_context *task_context = sci_req->tc;
289 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
291 task_context->ssp_command_iu_length =
292 sizeof(struct ssp_cmd_iu) / sizeof(u32);
293 task_context->type.ssp.frame_type = SSP_COMMAND;
296 case DMA_FROM_DEVICE:
299 task_context->task_type = SCU_TASK_TYPE_IOREAD;
302 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
306 task_context->transfer_length_bytes = len;
308 if (task_context->transfer_length_bytes > 0)
309 scic_sds_request_build_sgl(sci_req);
313 * This method will fill in the SCU Task Context for a SSP Task request. The
314 * following important settings are utilized: -# priority ==
315 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
316 * ahead of other task destined for the same Remote Node. -# task_type ==
317 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
318 * (i.e. non-raw frame) is being utilized to perform task management. -#
319 * control_frame == 1. This ensures that the proper endianess is set so
320 * that the bytes are transmitted in the right order for a task frame.
321 * @sci_req: This parameter specifies the task request object being
325 static void scu_ssp_task_request_construct_task_context(struct scic_sds_request *sci_req)
327 struct scu_task_context *task_context = sci_req->tc;
329 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
331 task_context->control_frame = 1;
332 task_context->priority = SCU_TASK_PRIORITY_HIGH;
333 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
334 task_context->transfer_length_bytes = 0;
335 task_context->type.ssp.frame_type = SSP_TASK;
336 task_context->ssp_command_iu_length =
337 sizeof(struct ssp_task_iu) / sizeof(u32);
341 * This method is will fill in the SCU Task Context for any type of SATA
342 * request. This is called from the various SATA constructors.
343 * @sci_req: The general IO request object which is to be used in
344 * constructing the SCU task context.
345 * @task_context: The buffer pointer for the SCU task context which is being
348 * The general io request construction is complete. The buffer assignment for
349 * the command buffer is complete. none Revisit task context construction to
350 * determine what is common for SSP/SMP/STP task context structures.
352 static void scu_sata_reqeust_construct_task_context(
353 struct scic_sds_request *sci_req,
354 struct scu_task_context *task_context)
357 struct scic_sds_remote_device *target_device;
358 struct scic_sds_port *target_port;
360 target_device = scic_sds_request_get_device(sci_req);
361 target_port = scic_sds_request_get_port(sci_req);
363 /* Fill in the TC with the its required data */
364 task_context->abort = 0;
365 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
366 task_context->initiator_request = 1;
367 task_context->connection_rate = target_device->connection_rate;
368 task_context->protocol_engine_index =
369 scic_sds_controller_get_protocol_engine_group(controller);
370 task_context->logical_port_index =
371 scic_sds_port_get_index(target_port);
372 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
373 task_context->valid = SCU_TASK_CONTEXT_VALID;
374 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
376 task_context->remote_node_index =
377 scic_sds_remote_device_get_index(sci_req->target_device);
378 task_context->command_code = 0;
380 task_context->link_layer_control = 0;
381 task_context->do_not_dma_ssp_good_response = 1;
382 task_context->strict_ordering = 0;
383 task_context->control_frame = 0;
384 task_context->timeout_enable = 0;
385 task_context->block_guard_enable = 0;
387 task_context->address_modifier = 0;
388 task_context->task_phase = 0x01;
390 task_context->ssp_command_iu_length =
391 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
393 /* Set the first word of the H2D REG FIS */
394 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
396 sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
397 (scic_sds_controller_get_protocol_engine_group(controller) <<
398 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
399 (scic_sds_port_get_index(target_port) <<
400 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
401 ISCI_TAG_TCI(sci_req->io_tag));
403 * Copy the physical address for the command buffer to the SCU Task
404 * Context. We must offset the command buffer by 4 bytes because the
405 * first 4 bytes are transfered in the body of the TC.
407 dma_addr = scic_io_request_get_dma_addr(sci_req,
408 ((char *) &sci_req->stp.cmd) +
411 task_context->command_iu_upper = upper_32_bits(dma_addr);
412 task_context->command_iu_lower = lower_32_bits(dma_addr);
414 /* SATA Requests do not have a response buffer */
415 task_context->response_iu_upper = 0;
416 task_context->response_iu_lower = 0;
419 static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *sci_req)
421 struct scu_task_context *task_context = sci_req->tc;
423 scu_sata_reqeust_construct_task_context(sci_req, task_context);
425 task_context->control_frame = 0;
426 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
427 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
428 task_context->type.stp.fis_type = FIS_REGH2D;
429 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
432 static enum sci_status
433 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
436 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
437 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
439 scu_stp_raw_request_construct_task_context(sci_req);
441 pio->current_transfer_bytes = 0;
442 pio->ending_error = 0;
443 pio->ending_status = 0;
445 pio->request_current.sgl_offset = 0;
446 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
449 scic_sds_request_build_sgl(sci_req);
450 pio->request_current.sgl_index = 0;
452 /* The user does not want the data copied to the SGL buffer location */
453 pio->request_current.sgl_index = -1;
461 * @sci_req: This parameter specifies the request to be constructed as an
463 * @optimized_task_type: This parameter specifies whether the request is to be
464 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
465 * value of 1 indicates NCQ.
467 * This method will perform request construction common to all types of STP
468 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
469 * returns an indication as to whether the construction was successful.
471 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
472 u8 optimized_task_type,
474 enum dma_data_direction dir)
476 struct scu_task_context *task_context = sci_req->tc;
478 /* Build the STP task context structure */
479 scu_sata_reqeust_construct_task_context(sci_req, task_context);
481 /* Copy over the SGL elements */
482 scic_sds_request_build_sgl(sci_req);
484 /* Copy over the number of bytes to be transfered */
485 task_context->transfer_length_bytes = len;
487 if (dir == DMA_TO_DEVICE) {
489 * The difference between the DMA IN and DMA OUT request task type
490 * values are consistent with the difference between FPDMA READ
491 * and FPDMA WRITE values. Add the supplied task type parameter
492 * to this difference to set the task type properly for this
493 * DATA OUT (WRITE) case. */
494 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
495 - SCU_TASK_TYPE_DMA_IN);
498 * For the DATA IN (READ) case, simply save the supplied
499 * optimized task type. */
500 task_context->task_type = optimized_task_type;
506 static enum sci_status
507 scic_io_request_construct_sata(struct scic_sds_request *sci_req,
509 enum dma_data_direction dir,
512 enum sci_status status = SCI_SUCCESS;
513 struct isci_request *ireq = sci_req_to_ireq(sci_req);
514 struct sas_task *task = isci_request_access_task(ireq);
516 /* check for management protocols */
517 if (ireq->ttype == tmf_task) {
518 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
520 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
521 tmf->tmf_code == isci_tmf_sata_srst_low) {
522 scu_stp_raw_request_construct_task_context(sci_req);
525 dev_err(scic_to_dev(sci_req->owning_controller),
526 "%s: Request 0x%p received un-handled SAT "
527 "management protocol 0x%x.\n",
528 __func__, sci_req, tmf->tmf_code);
534 if (!sas_protocol_ata(task->task_proto)) {
535 dev_err(scic_to_dev(sci_req->owning_controller),
536 "%s: Non-ATA protocol in SATA path: 0x%x\n",
544 if (task->data_dir == DMA_NONE) {
545 scu_stp_raw_request_construct_task_context(sci_req);
550 if (task->ata_task.use_ncq) {
551 scic_sds_stp_optimized_request_construct(sci_req,
552 SCU_TASK_TYPE_FPDMAQ_READ,
558 if (task->ata_task.dma_xfer) {
559 scic_sds_stp_optimized_request_construct(sci_req,
560 SCU_TASK_TYPE_DMA_IN,
564 return scic_sds_stp_pio_request_construct(sci_req, copy);
569 static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
571 struct isci_request *ireq = sci_req_to_ireq(sci_req);
572 struct sas_task *task = isci_request_access_task(ireq);
574 sci_req->protocol = SCIC_SSP_PROTOCOL;
576 scu_ssp_io_request_construct_task_context(sci_req,
578 task->total_xfer_len);
580 scic_sds_io_request_build_ssp_command_iu(sci_req);
582 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
587 enum sci_status scic_task_request_construct_ssp(
588 struct scic_sds_request *sci_req)
590 /* Construct the SSP Task SCU Task Context */
591 scu_ssp_task_request_construct_task_context(sci_req);
593 /* Fill in the SSP Task IU */
594 scic_sds_task_request_build_ssp_task_iu(sci_req);
596 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
601 static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
603 enum sci_status status;
605 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
606 struct sas_task *task = isci_request_access_task(isci_request);
608 sci_req->protocol = SCIC_STP_PROTOCOL;
610 copy = (task->data_dir == DMA_NONE) ? false : true;
612 status = scic_io_request_construct_sata(sci_req,
613 task->total_xfer_len,
617 if (status == SCI_SUCCESS)
618 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
623 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
625 enum sci_status status = SCI_SUCCESS;
626 struct isci_request *ireq = sci_req_to_ireq(sci_req);
628 /* check for management protocols */
629 if (ireq->ttype == tmf_task) {
630 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
632 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
633 tmf->tmf_code == isci_tmf_sata_srst_low) {
634 scu_stp_raw_request_construct_task_context(sci_req);
636 dev_err(scic_to_dev(sci_req->owning_controller),
637 "%s: Request 0x%p received un-handled SAT "
639 __func__, sci_req, tmf->tmf_code);
645 if (status != SCI_SUCCESS)
647 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
653 * sci_req_tx_bytes - bytes transferred when reply underruns request
654 * @sci_req: request that was terminated early
656 #define SCU_TASK_CONTEXT_SRAM 0x200000
657 static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
659 struct scic_sds_controller *scic = sci_req->owning_controller;
662 if (readl(&scic->smu_registers->address_modifier) == 0) {
663 void __iomem *scu_reg_base = scic->scu_registers;
665 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
666 * BAR1 is the scu_registers
667 * 0x20002C = 0x200000 + 0x2c
668 * = start of task context SRAM + offset of (type.ssp.data_offset)
669 * TCi is the io_tag of struct scic_sds_request
671 ret_val = readl(scu_reg_base +
672 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
673 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(sci_req->io_tag)));
679 enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
681 enum sci_base_request_states state;
682 struct scu_task_context *tc = sci_req->tc;
683 struct scic_sds_controller *scic = sci_req->owning_controller;
685 state = sci_req->sm.current_state_id;
686 if (state != SCI_REQ_CONSTRUCTED) {
687 dev_warn(scic_to_dev(scic),
688 "%s: SCIC IO Request requested to start while in wrong "
689 "state %d\n", __func__, state);
690 return SCI_FAILURE_INVALID_STATE;
693 tc->task_index = ISCI_TAG_TCI(sci_req->io_tag);
695 switch (tc->protocol_type) {
696 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
697 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
699 tc->type.ssp.tag = sci_req->io_tag;
700 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
703 case SCU_TASK_CONTEXT_PROTOCOL_STP:
705 * tc->type.stp.ncq_tag = sci_req->ncq_tag;
709 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
710 /* / @todo When do we set no protocol type? */
714 /* This should never happen since we build the IO
719 /* Add to the post_context the io tag value */
720 sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag);
722 /* Everything is good go ahead and change state */
723 sci_change_state(&sci_req->sm, SCI_REQ_STARTED);
729 scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
731 enum sci_base_request_states state;
733 state = sci_req->sm.current_state_id;
736 case SCI_REQ_CONSTRUCTED:
737 scic_sds_request_set_status(sci_req,
738 SCU_TASK_DONE_TASK_ABORT,
739 SCI_FAILURE_IO_TERMINATED);
741 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
743 case SCI_REQ_STARTED:
744 case SCI_REQ_TASK_WAIT_TC_COMP:
745 case SCI_REQ_SMP_WAIT_RESP:
746 case SCI_REQ_SMP_WAIT_TC_COMP:
747 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
748 case SCI_REQ_STP_UDMA_WAIT_D2H:
749 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
750 case SCI_REQ_STP_NON_DATA_WAIT_D2H:
751 case SCI_REQ_STP_PIO_WAIT_H2D:
752 case SCI_REQ_STP_PIO_WAIT_FRAME:
753 case SCI_REQ_STP_PIO_DATA_IN:
754 case SCI_REQ_STP_PIO_DATA_OUT:
755 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
756 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
757 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
758 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
760 case SCI_REQ_TASK_WAIT_TC_RESP:
761 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
762 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
764 case SCI_REQ_ABORTING:
765 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
767 case SCI_REQ_COMPLETED:
769 dev_warn(scic_to_dev(sci_req->owning_controller),
770 "%s: SCIC IO Request requested to abort while in wrong "
773 sci_req->sm.current_state_id);
777 return SCI_FAILURE_INVALID_STATE;
780 enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req)
782 enum sci_base_request_states state;
783 struct scic_sds_controller *scic = sci_req->owning_controller;
785 state = sci_req->sm.current_state_id;
786 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
787 "isci: request completion from wrong state (%d)\n", state))
788 return SCI_FAILURE_INVALID_STATE;
790 if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
791 scic_sds_controller_release_frame(scic,
792 sci_req->saved_rx_frame_index);
794 /* XXX can we just stop the machine and remove the 'final' state? */
795 sci_change_state(&sci_req->sm, SCI_REQ_FINAL);
799 enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
802 enum sci_base_request_states state;
803 struct scic_sds_controller *scic = sci_req->owning_controller;
805 state = sci_req->sm.current_state_id;
807 if (state != SCI_REQ_STP_PIO_DATA_IN) {
808 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
809 __func__, event_code, state);
811 return SCI_FAILURE_INVALID_STATE;
814 switch (scu_get_event_specifier(event_code)) {
815 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
816 /* We are waiting for data and the SCU has R_ERR the data frame.
817 * Go back to waiting for the D2H Register FIS
819 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
822 dev_err(scic_to_dev(scic),
823 "%s: pio request unexpected event %#x\n",
824 __func__, event_code);
826 /* TODO Should we fail the PIO request when we get an
834 * This function copies response data for requests returning response data
835 * instead of sense data.
836 * @sci_req: This parameter specifies the request object for which to copy
839 static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
843 struct ssp_response_iu *ssp_response;
844 struct isci_request *ireq = sci_req_to_ireq(sci_req);
845 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
847 ssp_response = &sci_req->ssp.rsp;
849 resp_buf = &isci_tmf->resp.resp_iu;
852 SSP_RESP_IU_MAX_SIZE,
853 be32_to_cpu(ssp_response->response_data_len));
855 memcpy(resp_buf, ssp_response->resp_data, len);
858 static enum sci_status
859 request_started_state_tc_event(struct scic_sds_request *sci_req,
862 struct ssp_response_iu *resp_iu;
865 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
866 * to determine SDMA status
868 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
869 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
870 scic_sds_request_set_status(sci_req,
874 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
875 /* There are times when the SCU hardware will return an early
876 * response because the io request specified more data than is
877 * returned by the target device (mode pages, inquiry data,
878 * etc.). We must check the response stats to see if this is
879 * truly a failed request or a good request that just got
882 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
883 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
885 sci_swab32_cpy(&sci_req->ssp.rsp,
889 if (resp->status == 0) {
890 scic_sds_request_set_status(sci_req,
892 SCI_SUCCESS_IO_DONE_EARLY);
894 scic_sds_request_set_status(sci_req,
895 SCU_TASK_DONE_CHECK_RESPONSE,
896 SCI_FAILURE_IO_RESPONSE_VALID);
900 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
901 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
903 sci_swab32_cpy(&sci_req->ssp.rsp,
907 scic_sds_request_set_status(sci_req,
908 SCU_TASK_DONE_CHECK_RESPONSE,
909 SCI_FAILURE_IO_RESPONSE_VALID);
913 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
914 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
915 * guaranteed to be received before this completion status is
918 resp_iu = &sci_req->ssp.rsp;
919 datapres = resp_iu->datapres;
921 if (datapres == 1 || datapres == 2) {
922 scic_sds_request_set_status(sci_req,
923 SCU_TASK_DONE_CHECK_RESPONSE,
924 SCI_FAILURE_IO_RESPONSE_VALID);
926 scic_sds_request_set_status(sci_req,
930 /* only stp device gets suspended. */
931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
933 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
934 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
935 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
936 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
937 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
940 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
941 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
942 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
943 scic_sds_request_set_status(sci_req,
944 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
945 SCU_COMPLETION_TL_STATUS_SHIFT,
946 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
948 scic_sds_request_set_status(sci_req,
949 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
950 SCU_COMPLETION_TL_STATUS_SHIFT,
951 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
955 /* both stp/ssp device gets suspended */
956 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
957 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
958 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
959 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
960 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
961 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
962 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
963 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
964 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
965 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
966 scic_sds_request_set_status(sci_req,
967 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
968 SCU_COMPLETION_TL_STATUS_SHIFT,
969 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
972 /* neither ssp nor stp gets suspended. */
973 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
974 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
975 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
976 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
977 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
978 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
979 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
980 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
981 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
982 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
983 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
984 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
985 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
986 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
987 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
989 scic_sds_request_set_status(
991 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
992 SCU_COMPLETION_TL_STATUS_SHIFT,
993 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
998 * TODO: This is probably wrong for ACK/NAK timeout conditions
1001 /* In all cases we will treat this as the completion of the IO req. */
1002 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1006 static enum sci_status
1007 request_aborting_state_tc_event(struct scic_sds_request *sci_req,
1008 u32 completion_code)
1010 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1011 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1012 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1013 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT,
1014 SCI_FAILURE_IO_TERMINATED);
1016 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1020 /* Unless we get some strange error wait for the task abort to complete
1021 * TODO: Should there be a state change for this completion?
1029 static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req,
1030 u32 completion_code)
1032 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1034 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1037 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1039 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1040 /* Currently, the decision is to simply allow the task request
1041 * to timeout if the task IU wasn't received successfully.
1042 * There is a potential for receiving multiple task responses if
1043 * we decide to send the task IU again.
1045 dev_warn(scic_to_dev(sci_req->owning_controller),
1046 "%s: TaskRequest:0x%p CompletionCode:%x - "
1047 "ACK/NAK timeout\n", __func__, sci_req,
1050 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1054 * All other completion status cause the IO to be complete.
1055 * If a NAK was received, then it is up to the user to retry
1058 scic_sds_request_set_status(sci_req,
1059 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1060 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1062 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1069 static enum sci_status
1070 smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1071 u32 completion_code)
1073 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1075 /* In the AWAIT RESPONSE state, any TC completion is
1076 * unexpected. but if the TC has success status, we
1077 * complete the IO anyway.
1079 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1082 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1085 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1086 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1089 /* These status has been seen in a specific LSI
1090 * expander, which sometimes is not able to send smp
1091 * response within 2 ms. This causes our hardware break
1092 * the connection and set TC completion with one of
1093 * these SMP_XXX_XX_ERR status. For these type of error,
1094 * we ask scic user to retry the request.
1096 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1097 SCI_FAILURE_RETRY_REQUIRED);
1099 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1103 /* All other completion status cause the IO to be complete. If a NAK
1104 * was received, then it is up to the user to retry the request
1106 scic_sds_request_set_status(sci_req,
1107 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1108 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1110 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1117 static enum sci_status
1118 smp_request_await_tc_event(struct scic_sds_request *sci_req,
1119 u32 completion_code)
1121 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1122 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1123 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1126 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1129 /* All other completion status cause the IO to be
1130 * complete. If a NAK was received, then it is up to
1131 * the user to retry the request.
1133 scic_sds_request_set_status(sci_req,
1134 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1135 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1137 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1144 void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1148 * @note This could be made to return an error to the user if the user
1149 * attempts to set the NCQ tag in the wrong state.
1151 req->tc->type.stp.ncq_tag = ncq_tag;
1154 static struct scu_sgl_element *pio_sgl_next(struct scic_sds_stp_request *stp_req)
1156 struct scu_sgl_element *sgl;
1157 struct scu_sgl_element_pair *sgl_pair;
1158 struct scic_sds_request *sci_req = to_sci_req(stp_req);
1159 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1161 sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index);
1164 else if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1165 if (sgl_pair->B.address_lower == 0 &&
1166 sgl_pair->B.address_upper == 0) {
1169 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1173 if (sgl_pair->next_pair_lower == 0 &&
1174 sgl_pair->next_pair_upper == 0) {
1177 pio_sgl->sgl_index++;
1178 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1179 sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index);
1187 static enum sci_status
1188 stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
1189 u32 completion_code)
1191 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1192 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1193 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1196 sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1200 /* All other completion status cause the IO to be
1201 * complete. If a NAK was received, then it is up to
1202 * the user to retry the request.
1204 scic_sds_request_set_status(sci_req,
1205 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1206 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1208 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1215 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1217 /* transmit DATA_FIS from (current sgl + offset) for input
1218 * parameter length. current sgl and offset is alreay stored in the IO request
1220 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1221 struct scic_sds_request *sci_req,
1224 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1225 struct scu_task_context *task_context = sci_req->tc;
1226 struct scu_sgl_element_pair *sgl_pair;
1227 struct scu_sgl_element *current_sgl;
1229 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1230 * for the data from current_sgl+offset for the input length
1232 sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index);
1233 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1234 current_sgl = &sgl_pair->A;
1236 current_sgl = &sgl_pair->B;
1239 task_context->command_iu_upper = current_sgl->address_upper;
1240 task_context->command_iu_lower = current_sgl->address_lower;
1241 task_context->transfer_length_bytes = length;
1242 task_context->type.stp.fis_type = FIS_DATA;
1244 /* send the new TC out. */
1245 return scic_controller_continue_io(sci_req);
1248 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1251 struct scu_sgl_element *current_sgl;
1253 u32 remaining_bytes_in_current_sgl = 0;
1254 enum sci_status status = SCI_SUCCESS;
1255 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1256 struct scu_sgl_element_pair *sgl_pair;
1258 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1259 sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index);
1260 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1263 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1264 current_sgl = &sgl_pair->A;
1265 remaining_bytes_in_current_sgl = sgl_pair->A.length - sgl_offset;
1267 current_sgl = &sgl_pair->B;
1268 remaining_bytes_in_current_sgl = sgl_pair->B.length - sgl_offset;
1271 if (stp_req->type.pio.pio_transfer_bytes > 0) {
1272 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1273 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1274 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1275 if (status == SCI_SUCCESS) {
1276 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1278 /* update the current sgl, sgl_offset and save for future */
1279 current_sgl = pio_sgl_next(stp_req);
1282 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1283 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1284 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1286 if (status == SCI_SUCCESS) {
1287 /* Sgl offset will be adjusted and saved for future */
1288 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1289 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1290 stp_req->type.pio.pio_transfer_bytes = 0;
1295 if (status == SCI_SUCCESS) {
1296 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1304 * @stp_request: The request that is used for the SGL processing.
1305 * @data_buffer: The buffer of data to be copied.
1306 * @length: The length of the data transfer.
1308 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1309 * specified data region. enum sci_status
1311 static enum sci_status
1312 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1313 u8 *data_buf, u32 len)
1315 struct scic_sds_request *sci_req;
1316 struct isci_request *ireq;
1319 struct sas_task *task;
1320 struct scatterlist *sg;
1322 int total_len = len;
1324 sci_req = to_sci_req(stp_req);
1325 ireq = sci_req_to_ireq(sci_req);
1326 task = isci_request_access_task(ireq);
1327 src_addr = data_buf;
1329 if (task->num_scatter > 0) {
1332 while (total_len > 0) {
1333 struct page *page = sg_page(sg);
1335 copy_len = min_t(int, total_len, sg_dma_len(sg));
1336 kaddr = kmap_atomic(page, KM_IRQ0);
1337 memcpy(kaddr + sg->offset, src_addr, copy_len);
1338 kunmap_atomic(kaddr, KM_IRQ0);
1339 total_len -= copy_len;
1340 src_addr += copy_len;
1344 BUG_ON(task->total_xfer_len < total_len);
1345 memcpy(task->scatter, src_addr, total_len);
1353 * @sci_req: The PIO DATA IN request that is to receive the data.
1354 * @data_buffer: The buffer to copy from.
1356 * Copy the data buffer to the io request data region. enum sci_status
1358 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1359 struct scic_sds_stp_request *sci_req,
1362 enum sci_status status;
1365 * If there is less than 1K remaining in the transfer request
1366 * copy just the data for the transfer */
1367 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1368 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1369 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1371 if (status == SCI_SUCCESS)
1372 sci_req->type.pio.pio_transfer_bytes = 0;
1374 /* We are transfering the whole frame so copy */
1375 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1376 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1378 if (status == SCI_SUCCESS)
1379 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1385 static enum sci_status
1386 stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
1387 u32 completion_code)
1389 enum sci_status status = SCI_SUCCESS;
1391 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1392 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1393 scic_sds_request_set_status(sci_req,
1397 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1401 /* All other completion status cause the IO to be
1402 * complete. If a NAK was received, then it is up to
1403 * the user to retry the request.
1405 scic_sds_request_set_status(sci_req,
1406 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1407 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1409 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1416 static enum sci_status
1417 pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1418 u32 completion_code)
1420 enum sci_status status = SCI_SUCCESS;
1421 bool all_frames_transferred = false;
1422 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1424 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1425 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1427 if (stp_req->type.pio.pio_transfer_bytes != 0) {
1428 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1429 if (status == SCI_SUCCESS) {
1430 if (stp_req->type.pio.pio_transfer_bytes == 0)
1431 all_frames_transferred = true;
1433 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1435 * this will happen if the all data is written at the
1436 * first time after the pio setup fis is received
1438 all_frames_transferred = true;
1441 /* all data transferred. */
1442 if (all_frames_transferred) {
1444 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1445 * and wait for PIO_SETUP fis / or D2H REg fis. */
1446 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1452 * All other completion status cause the IO to be complete.
1453 * If a NAK was received, then it is up to the user to retry
1456 scic_sds_request_set_status(
1458 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1459 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1461 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1468 static void scic_sds_stp_request_udma_complete_request(
1469 struct scic_sds_request *request,
1471 enum sci_status sci_status)
1473 scic_sds_request_set_status(request, scu_status, sci_status);
1474 sci_change_state(&request->sm, SCI_REQ_COMPLETED);
1477 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1480 struct scic_sds_controller *scic = sci_req->owning_controller;
1481 struct dev_to_host_fis *frame_header;
1482 enum sci_status status;
1485 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1487 (void **)&frame_header);
1489 if ((status == SCI_SUCCESS) &&
1490 (frame_header->fis_type == FIS_REGD2H)) {
1491 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1493 (void **)&frame_buffer);
1495 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1500 scic_sds_controller_release_frame(scic, frame_index);
1506 scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1509 struct scic_sds_controller *scic = sci_req->owning_controller;
1510 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1511 enum sci_base_request_states state;
1512 enum sci_status status;
1515 state = sci_req->sm.current_state_id;
1517 case SCI_REQ_STARTED: {
1518 struct ssp_frame_hdr ssp_hdr;
1521 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1525 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1526 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1528 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1529 struct ssp_response_iu *resp_iu;
1530 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1532 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1536 sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt);
1538 resp_iu = &sci_req->ssp.rsp;
1540 if (resp_iu->datapres == 0x01 ||
1541 resp_iu->datapres == 0x02) {
1542 scic_sds_request_set_status(sci_req,
1543 SCU_TASK_DONE_CHECK_RESPONSE,
1544 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1546 scic_sds_request_set_status(sci_req,
1550 /* not a response frame, why did it get forwarded? */
1551 dev_err(scic_to_dev(scic),
1552 "%s: SCIC IO Request 0x%p received unexpected "
1553 "frame %d type 0x%02x\n", __func__, sci_req,
1554 frame_index, ssp_hdr.frame_type);
1558 * In any case we are done with this frame buffer return it to
1561 scic_sds_controller_release_frame(scic, frame_index);
1566 case SCI_REQ_TASK_WAIT_TC_RESP:
1567 scic_sds_io_request_copy_response(sci_req);
1568 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1569 scic_sds_controller_release_frame(scic,frame_index);
1572 case SCI_REQ_SMP_WAIT_RESP: {
1573 struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1576 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1580 /* byte swap the header. */
1581 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1582 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1584 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1587 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1591 word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1594 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1595 smp_resp, word_cnt);
1597 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1600 sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1603 * This was not a response frame why did it get
1606 dev_err(scic_to_dev(scic),
1607 "%s: SCIC SMP Request 0x%p received unexpected "
1608 "frame %d type 0x%02x\n",
1612 rsp_hdr->frame_type);
1614 scic_sds_request_set_status(sci_req,
1615 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1616 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1618 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1621 scic_sds_controller_release_frame(scic, frame_index);
1626 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1627 return scic_sds_stp_request_udma_general_frame_handler(sci_req,
1630 case SCI_REQ_STP_UDMA_WAIT_D2H:
1631 /* Use the general frame handler to copy the resposne data */
1632 status = scic_sds_stp_request_udma_general_frame_handler(sci_req,
1635 if (status != SCI_SUCCESS)
1638 scic_sds_stp_request_udma_complete_request(sci_req,
1639 SCU_TASK_DONE_CHECK_RESPONSE,
1640 SCI_FAILURE_IO_RESPONSE_VALID);
1644 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1645 struct dev_to_host_fis *frame_header;
1648 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1650 (void **)&frame_header);
1652 if (status != SCI_SUCCESS) {
1653 dev_err(scic_to_dev(scic),
1654 "%s: SCIC IO Request 0x%p could not get frame "
1655 "header for frame index %d, status %x\n",
1664 switch (frame_header->fis_type) {
1666 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1668 (void **)&frame_buffer);
1670 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1674 /* The command has completed with error */
1675 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1676 SCI_FAILURE_IO_RESPONSE_VALID);
1680 dev_warn(scic_to_dev(scic),
1681 "%s: IO Request:0x%p Frame Id:%d protocol "
1682 "violation occurred\n", __func__, stp_req,
1685 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1686 SCI_FAILURE_PROTOCOL_VIOLATION);
1690 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1692 /* Frame has been decoded return it to the controller */
1693 scic_sds_controller_release_frame(scic, frame_index);
1698 case SCI_REQ_STP_PIO_WAIT_FRAME: {
1699 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1700 struct sas_task *task = isci_request_access_task(ireq);
1701 struct dev_to_host_fis *frame_header;
1704 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1706 (void **)&frame_header);
1708 if (status != SCI_SUCCESS) {
1709 dev_err(scic_to_dev(scic),
1710 "%s: SCIC IO Request 0x%p could not get frame "
1711 "header for frame index %d, status %x\n",
1712 __func__, stp_req, frame_index, status);
1716 switch (frame_header->fis_type) {
1718 /* Get from the frame buffer the PIO Setup Data */
1719 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1721 (void **)&frame_buffer);
1723 /* Get the data from the PIO Setup The SCU Hardware
1724 * returns first word in the frame_header and the rest
1725 * of the data is in the frame buffer so we need to
1729 /* transfer_count: first 16bits in the 4th dword */
1730 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
1732 /* ending_status: 4th byte in the 3rd dword */
1733 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
1735 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1739 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
1741 /* The next state is dependent on whether the
1742 * request was PIO Data-in or Data out
1744 if (task->data_dir == DMA_FROM_DEVICE) {
1745 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN);
1746 } else if (task->data_dir == DMA_TO_DEVICE) {
1748 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1749 if (status != SCI_SUCCESS)
1751 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT);
1755 case FIS_SETDEVBITS:
1756 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1760 if (frame_header->status & ATA_BUSY) {
1762 * Now why is the drive sending a D2H Register
1763 * FIS when it is still busy? Do nothing since
1764 * we are still in the right state.
1766 dev_dbg(scic_to_dev(scic),
1767 "%s: SCIC PIO Request 0x%p received "
1768 "D2H Register FIS with BSY status "
1772 frame_header->status);
1776 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1778 (void **)&frame_buffer);
1780 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
1784 scic_sds_request_set_status(sci_req,
1785 SCU_TASK_DONE_CHECK_RESPONSE,
1786 SCI_FAILURE_IO_RESPONSE_VALID);
1788 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1792 /* FIXME: what do we do here? */
1796 /* Frame is decoded return it to the controller */
1797 scic_sds_controller_release_frame(scic, frame_index);
1802 case SCI_REQ_STP_PIO_DATA_IN: {
1803 struct dev_to_host_fis *frame_header;
1804 struct sata_fis_data *frame_buffer;
1806 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1808 (void **)&frame_header);
1810 if (status != SCI_SUCCESS) {
1811 dev_err(scic_to_dev(scic),
1812 "%s: SCIC IO Request 0x%p could not get frame "
1813 "header for frame index %d, status %x\n",
1821 if (frame_header->fis_type != FIS_DATA) {
1822 dev_err(scic_to_dev(scic),
1823 "%s: SCIC PIO Request 0x%p received frame %d "
1824 "with fis type 0x%02x when expecting a data "
1829 frame_header->fis_type);
1831 scic_sds_request_set_status(sci_req,
1833 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1835 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1837 /* Frame is decoded return it to the controller */
1838 scic_sds_controller_release_frame(scic, frame_index);
1842 if (stp_req->type.pio.request_current.sgl_index < 0) {
1843 sci_req->saved_rx_frame_index = frame_index;
1844 stp_req->type.pio.pio_transfer_bytes = 0;
1846 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1848 (void **)&frame_buffer);
1850 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1851 (u8 *)frame_buffer);
1853 /* Frame is decoded return it to the controller */
1854 scic_sds_controller_release_frame(scic, frame_index);
1857 /* Check for the end of the transfer, are there more
1858 * bytes remaining for this data transfer
1860 if (status != SCI_SUCCESS ||
1861 stp_req->type.pio.pio_transfer_bytes != 0)
1864 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
1865 scic_sds_request_set_status(sci_req,
1866 SCU_TASK_DONE_CHECK_RESPONSE,
1867 SCI_FAILURE_IO_RESPONSE_VALID);
1869 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1871 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1876 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1877 struct dev_to_host_fis *frame_header;
1880 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1882 (void **)&frame_header);
1883 if (status != SCI_SUCCESS) {
1884 dev_err(scic_to_dev(scic),
1885 "%s: SCIC IO Request 0x%p could not get frame "
1886 "header for frame index %d, status %x\n",
1894 switch (frame_header->fis_type) {
1896 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1898 (void **)&frame_buffer);
1900 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1904 /* The command has completed with error */
1905 scic_sds_request_set_status(sci_req,
1906 SCU_TASK_DONE_CHECK_RESPONSE,
1907 SCI_FAILURE_IO_RESPONSE_VALID);
1911 dev_warn(scic_to_dev(scic),
1912 "%s: IO Request:0x%p Frame Id:%d protocol "
1913 "violation occurred\n",
1918 scic_sds_request_set_status(sci_req,
1919 SCU_TASK_DONE_UNEXP_FIS,
1920 SCI_FAILURE_PROTOCOL_VIOLATION);
1924 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1926 /* Frame has been decoded return it to the controller */
1927 scic_sds_controller_release_frame(scic, frame_index);
1931 case SCI_REQ_ABORTING:
1933 * TODO: Is it even possible to get an unsolicited frame in the
1936 scic_sds_controller_release_frame(scic, frame_index);
1940 dev_warn(scic_to_dev(scic),
1941 "%s: SCIC IO Request given unexpected frame %x while "
1947 scic_sds_controller_release_frame(scic, frame_index);
1948 return SCI_FAILURE_INVALID_STATE;
1952 static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req,
1953 u32 completion_code)
1955 enum sci_status status = SCI_SUCCESS;
1957 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1958 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1959 scic_sds_stp_request_udma_complete_request(sci_req,
1963 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1964 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1965 /* We must check ther response buffer to see if the D2H
1966 * Register FIS was received before we got the TC
1969 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
1970 scic_sds_remote_device_suspend(sci_req->target_device,
1971 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1973 scic_sds_stp_request_udma_complete_request(sci_req,
1974 SCU_TASK_DONE_CHECK_RESPONSE,
1975 SCI_FAILURE_IO_RESPONSE_VALID);
1977 /* If we have an error completion status for the
1978 * TC then we can expect a D2H register FIS from
1979 * the device so we must change state to wait
1982 sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
1986 /* TODO Check to see if any of these completion status need to
1987 * wait for the device to host register fis.
1989 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
1990 * - this comes only for B0
1992 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1993 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1994 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1995 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1996 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1997 scic_sds_remote_device_suspend(sci_req->target_device,
1998 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1999 /* Fall through to the default case */
2001 /* All other completion status cause the IO to be complete. */
2002 scic_sds_stp_request_udma_complete_request(sci_req,
2003 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2004 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2011 static enum sci_status
2012 stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
2013 u32 completion_code)
2015 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2016 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2017 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2020 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2025 * All other completion status cause the IO to be complete.
2026 * If a NAK was received, then it is up to the user to retry
2029 scic_sds_request_set_status(sci_req,
2030 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2031 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2033 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2040 static enum sci_status
2041 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req,
2042 u32 completion_code)
2044 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2045 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2046 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2049 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2053 /* All other completion status cause the IO to be complete. If
2054 * a NAK was received, then it is up to the user to retry the
2057 scic_sds_request_set_status(sci_req,
2058 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2059 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2061 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2069 scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req,
2070 u32 completion_code)
2072 enum sci_base_request_states state;
2073 struct scic_sds_controller *scic = sci_req->owning_controller;
2075 state = sci_req->sm.current_state_id;
2078 case SCI_REQ_STARTED:
2079 return request_started_state_tc_event(sci_req, completion_code);
2081 case SCI_REQ_TASK_WAIT_TC_COMP:
2082 return ssp_task_request_await_tc_event(sci_req,
2085 case SCI_REQ_SMP_WAIT_RESP:
2086 return smp_request_await_response_tc_event(sci_req,
2089 case SCI_REQ_SMP_WAIT_TC_COMP:
2090 return smp_request_await_tc_event(sci_req, completion_code);
2092 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2093 return stp_request_udma_await_tc_event(sci_req,
2096 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2097 return stp_request_non_data_await_h2d_tc_event(sci_req,
2100 case SCI_REQ_STP_PIO_WAIT_H2D:
2101 return stp_request_pio_await_h2d_completion_tc_event(sci_req,
2104 case SCI_REQ_STP_PIO_DATA_OUT:
2105 return pio_data_out_tx_done_tc_event(sci_req, completion_code);
2107 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2108 return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req,
2111 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2112 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req,
2115 case SCI_REQ_ABORTING:
2116 return request_aborting_state_tc_event(sci_req,
2120 dev_warn(scic_to_dev(scic),
2121 "%s: SCIC IO Request given task completion "
2122 "notification %x while in wrong state %d\n",
2126 return SCI_FAILURE_INVALID_STATE;
2131 * isci_request_process_response_iu() - This function sets the status and
2132 * response iu, in the task struct, from the request object for the upper
2134 * @sas_task: This parameter is the task struct from the upper layer driver.
2135 * @resp_iu: This parameter points to the response iu of the completed request.
2136 * @dev: This parameter specifies the linux device struct.
2140 static void isci_request_process_response_iu(
2141 struct sas_task *task,
2142 struct ssp_response_iu *resp_iu,
2147 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2148 "resp_iu->response_data_len = %x, "
2149 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2154 resp_iu->response_data_len,
2155 resp_iu->sense_data_len);
2157 task->task_status.stat = resp_iu->status;
2159 /* libsas updates the task status fields based on the response iu. */
2160 sas_ssp_task_response(dev, task, resp_iu);
2164 * isci_request_set_open_reject_status() - This function prepares the I/O
2165 * completion for OPEN_REJECT conditions.
2166 * @request: This parameter is the completed isci_request object.
2167 * @response_ptr: This parameter specifies the service response for the I/O.
2168 * @status_ptr: This parameter specifies the exec status for the I/O.
2169 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2170 * the LLDD with respect to completing this request or forcing an abort
2171 * condition on the I/O.
2172 * @open_rej_reason: This parameter specifies the encoded reason for the
2173 * abandon-class reject.
2177 static void isci_request_set_open_reject_status(
2178 struct isci_request *request,
2179 struct sas_task *task,
2180 enum service_response *response_ptr,
2181 enum exec_status *status_ptr,
2182 enum isci_completion_selection *complete_to_host_ptr,
2183 enum sas_open_rej_reason open_rej_reason)
2185 /* Task in the target is done. */
2186 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2187 *response_ptr = SAS_TASK_UNDELIVERED;
2188 *status_ptr = SAS_OPEN_REJECT;
2189 *complete_to_host_ptr = isci_perform_normal_io_completion;
2190 task->task_status.open_rej_reason = open_rej_reason;
2194 * isci_request_handle_controller_specific_errors() - This function decodes
2195 * controller-specific I/O completion error conditions.
2196 * @request: This parameter is the completed isci_request object.
2197 * @response_ptr: This parameter specifies the service response for the I/O.
2198 * @status_ptr: This parameter specifies the exec status for the I/O.
2199 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2200 * the LLDD with respect to completing this request or forcing an abort
2201 * condition on the I/O.
2205 static void isci_request_handle_controller_specific_errors(
2206 struct isci_remote_device *idev,
2207 struct isci_request *request,
2208 struct sas_task *task,
2209 enum service_response *response_ptr,
2210 enum exec_status *status_ptr,
2211 enum isci_completion_selection *complete_to_host_ptr)
2213 unsigned int cstatus;
2215 cstatus = request->sci.scu_status;
2217 dev_dbg(&request->isci_host->pdev->dev,
2218 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2219 "- controller status = 0x%x\n",
2220 __func__, request, cstatus);
2222 /* Decode the controller-specific errors; most
2223 * important is to recognize those conditions in which
2224 * the target may still have a task outstanding that
2227 * Note that there are SCU completion codes being
2228 * named in the decode below for which SCIC has already
2229 * done work to handle them in a way other than as
2230 * a controller-specific completion code; these are left
2231 * in the decode below for completeness sake.
2234 case SCU_TASK_DONE_DMASETUP_DIRERR:
2235 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2236 case SCU_TASK_DONE_XFERCNT_ERR:
2237 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2238 if (task->task_proto == SAS_PROTOCOL_SMP) {
2239 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2240 *response_ptr = SAS_TASK_COMPLETE;
2242 /* See if the device has been/is being stopped. Note
2243 * that we ignore the quiesce state, since we are
2244 * concerned about the actual device state.
2247 *status_ptr = SAS_DEVICE_UNKNOWN;
2249 *status_ptr = SAS_ABORTED_TASK;
2251 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2253 *complete_to_host_ptr =
2254 isci_perform_normal_io_completion;
2256 /* Task in the target is not done. */
2257 *response_ptr = SAS_TASK_UNDELIVERED;
2260 *status_ptr = SAS_DEVICE_UNKNOWN;
2262 *status_ptr = SAM_STAT_TASK_ABORTED;
2264 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2266 *complete_to_host_ptr =
2267 isci_perform_error_io_completion;
2272 case SCU_TASK_DONE_CRC_ERR:
2273 case SCU_TASK_DONE_NAK_CMD_ERR:
2274 case SCU_TASK_DONE_EXCESS_DATA:
2275 case SCU_TASK_DONE_UNEXP_FIS:
2276 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2277 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2278 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2279 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2280 /* These are conditions in which the target
2281 * has completed the task, so that no cleanup
2284 *response_ptr = SAS_TASK_COMPLETE;
2286 /* See if the device has been/is being stopped. Note
2287 * that we ignore the quiesce state, since we are
2288 * concerned about the actual device state.
2291 *status_ptr = SAS_DEVICE_UNKNOWN;
2293 *status_ptr = SAS_ABORTED_TASK;
2295 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2297 *complete_to_host_ptr = isci_perform_normal_io_completion;
2301 /* Note that the only open reject completion codes seen here will be
2302 * abandon-class codes; all others are automatically retried in the SCU.
2304 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2306 isci_request_set_open_reject_status(
2307 request, task, response_ptr, status_ptr,
2308 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2311 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2313 /* Note - the return of AB0 will change when
2314 * libsas implements detection of zone violations.
2316 isci_request_set_open_reject_status(
2317 request, task, response_ptr, status_ptr,
2318 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2321 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2323 isci_request_set_open_reject_status(
2324 request, task, response_ptr, status_ptr,
2325 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2328 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2330 isci_request_set_open_reject_status(
2331 request, task, response_ptr, status_ptr,
2332 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2335 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2337 isci_request_set_open_reject_status(
2338 request, task, response_ptr, status_ptr,
2339 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2342 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2344 isci_request_set_open_reject_status(
2345 request, task, response_ptr, status_ptr,
2346 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2349 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2351 isci_request_set_open_reject_status(
2352 request, task, response_ptr, status_ptr,
2353 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2356 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2358 isci_request_set_open_reject_status(
2359 request, task, response_ptr, status_ptr,
2360 complete_to_host_ptr, SAS_OREJ_EPROTO);
2363 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2365 isci_request_set_open_reject_status(
2366 request, task, response_ptr, status_ptr,
2367 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2370 case SCU_TASK_DONE_LL_R_ERR:
2371 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2372 case SCU_TASK_DONE_LL_PERR:
2373 case SCU_TASK_DONE_LL_SY_TERM:
2374 /* Also SCU_TASK_DONE_NAK_ERR:*/
2375 case SCU_TASK_DONE_LL_LF_TERM:
2376 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2377 case SCU_TASK_DONE_LL_ABORT_ERR:
2378 case SCU_TASK_DONE_SEQ_INV_TYPE:
2379 /* Also SCU_TASK_DONE_UNEXP_XR: */
2380 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2381 case SCU_TASK_DONE_INV_FIS_LEN:
2382 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2383 case SCU_TASK_DONE_SDMA_ERR:
2384 case SCU_TASK_DONE_OFFSET_ERR:
2385 case SCU_TASK_DONE_MAX_PLD_ERR:
2386 case SCU_TASK_DONE_LF_ERR:
2387 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2388 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2389 case SCU_TASK_DONE_UNEXP_DATA:
2390 case SCU_TASK_DONE_UNEXP_SDBFIS:
2391 case SCU_TASK_DONE_REG_ERR:
2392 case SCU_TASK_DONE_SDB_ERR:
2393 case SCU_TASK_DONE_TASK_ABORT:
2395 /* Task in the target is not done. */
2396 *response_ptr = SAS_TASK_UNDELIVERED;
2397 *status_ptr = SAM_STAT_TASK_ABORTED;
2399 if (task->task_proto == SAS_PROTOCOL_SMP) {
2400 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2402 *complete_to_host_ptr = isci_perform_normal_io_completion;
2404 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2406 *complete_to_host_ptr = isci_perform_error_io_completion;
2413 * isci_task_save_for_upper_layer_completion() - This function saves the
2414 * request for later completion to the upper layer driver.
2415 * @host: This parameter is a pointer to the host on which the the request
2416 * should be queued (either as an error or success).
2417 * @request: This parameter is the completed request.
2418 * @response: This parameter is the response code for the completed task.
2419 * @status: This parameter is the status code for the completed task.
2423 static void isci_task_save_for_upper_layer_completion(
2424 struct isci_host *host,
2425 struct isci_request *request,
2426 enum service_response response,
2427 enum exec_status status,
2428 enum isci_completion_selection task_notification_selection)
2430 struct sas_task *task = isci_request_access_task(request);
2432 task_notification_selection
2433 = isci_task_set_completion_status(task, response, status,
2434 task_notification_selection);
2436 /* Tasks aborted specifically by a call to the lldd_abort_task
2437 * function should not be completed to the host in the regular path.
2439 switch (task_notification_selection) {
2441 case isci_perform_normal_io_completion:
2443 /* Normal notification (task_done) */
2444 dev_dbg(&host->pdev->dev,
2445 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2448 task->task_status.resp, response,
2449 task->task_status.stat, status);
2450 /* Add to the completed list. */
2451 list_add(&request->completed_node,
2452 &host->requests_to_complete);
2454 /* Take the request off the device's pending request list. */
2455 list_del_init(&request->dev_node);
2458 case isci_perform_aborted_io_completion:
2459 /* No notification to libsas because this request is
2460 * already in the abort path.
2462 dev_warn(&host->pdev->dev,
2463 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2466 task->task_status.resp, response,
2467 task->task_status.stat, status);
2469 /* Wake up whatever process was waiting for this
2470 * request to complete.
2472 WARN_ON(request->io_request_completion == NULL);
2474 if (request->io_request_completion != NULL) {
2476 /* Signal whoever is waiting that this
2477 * request is complete.
2479 complete(request->io_request_completion);
2483 case isci_perform_error_io_completion:
2484 /* Use sas_task_abort */
2485 dev_warn(&host->pdev->dev,
2486 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2489 task->task_status.resp, response,
2490 task->task_status.stat, status);
2491 /* Add to the aborted list. */
2492 list_add(&request->completed_node,
2493 &host->requests_to_errorback);
2497 dev_warn(&host->pdev->dev,
2498 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2501 task->task_status.resp, response,
2502 task->task_status.stat, status);
2504 /* Add to the error to libsas list. */
2505 list_add(&request->completed_node,
2506 &host->requests_to_errorback);
2511 static void isci_request_io_request_complete(struct isci_host *isci_host,
2512 struct isci_request *request,
2513 enum sci_io_status completion_status)
2515 struct sas_task *task = isci_request_access_task(request);
2516 struct ssp_response_iu *resp_iu;
2518 unsigned long task_flags;
2519 struct isci_remote_device *idev = isci_lookup_device(task->dev);
2520 enum service_response response = SAS_TASK_UNDELIVERED;
2521 enum exec_status status = SAS_ABORTED_TASK;
2522 enum isci_request_status request_status;
2523 enum isci_completion_selection complete_to_host
2524 = isci_perform_normal_io_completion;
2526 dev_dbg(&isci_host->pdev->dev,
2527 "%s: request = %p, task = %p,\n"
2528 "task->data_dir = %d completion_status = 0x%x\n",
2535 spin_lock(&request->state_lock);
2536 request_status = isci_request_get_state(request);
2538 /* Decode the request status. Note that if the request has been
2539 * aborted by a task management function, we don't care
2540 * what the status is.
2542 switch (request_status) {
2545 /* "aborted" indicates that the request was aborted by a task
2546 * management function, since once a task management request is
2547 * perfomed by the device, the request only completes because
2548 * of the subsequent driver terminate.
2550 * Aborted also means an external thread is explicitly managing
2551 * this request, so that we do not complete it up the stack.
2553 * The target is still there (since the TMF was successful).
2555 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2556 response = SAS_TASK_COMPLETE;
2558 /* See if the device has been/is being stopped. Note
2559 * that we ignore the quiesce state, since we are
2560 * concerned about the actual device state.
2563 status = SAS_DEVICE_UNKNOWN;
2565 status = SAS_ABORTED_TASK;
2567 complete_to_host = isci_perform_aborted_io_completion;
2568 /* This was an aborted request. */
2570 spin_unlock(&request->state_lock);
2574 /* aborting means that the task management function tried and
2575 * failed to abort the request. We need to note the request
2576 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2579 * Aborting also means an external thread is explicitly managing
2580 * this request, so that we do not complete it up the stack.
2582 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2583 response = SAS_TASK_UNDELIVERED;
2586 /* The device has been /is being stopped. Note that
2587 * we ignore the quiesce state, since we are
2588 * concerned about the actual device state.
2590 status = SAS_DEVICE_UNKNOWN;
2592 status = SAS_PHY_DOWN;
2594 complete_to_host = isci_perform_aborted_io_completion;
2596 /* This was an aborted request. */
2598 spin_unlock(&request->state_lock);
2603 /* This was an terminated request. This happens when
2604 * the I/O is being terminated because of an action on
2605 * the device (reset, tear down, etc.), and the I/O needs
2606 * to be completed up the stack.
2608 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2609 response = SAS_TASK_UNDELIVERED;
2611 /* See if the device has been/is being stopped. Note
2612 * that we ignore the quiesce state, since we are
2613 * concerned about the actual device state.
2616 status = SAS_DEVICE_UNKNOWN;
2618 status = SAS_ABORTED_TASK;
2620 complete_to_host = isci_perform_aborted_io_completion;
2622 /* This was a terminated request. */
2624 spin_unlock(&request->state_lock);
2628 /* This was a terminated request that timed-out during the
2629 * termination process. There is no task to complete to
2632 complete_to_host = isci_perform_normal_io_completion;
2633 spin_unlock(&request->state_lock);
2638 /* The request is done from an SCU HW perspective. */
2639 request->status = completed;
2641 spin_unlock(&request->state_lock);
2643 /* This is an active request being completed from the core. */
2644 switch (completion_status) {
2646 case SCI_IO_FAILURE_RESPONSE_VALID:
2647 dev_dbg(&isci_host->pdev->dev,
2648 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2653 if (sas_protocol_ata(task->task_proto)) {
2654 resp_buf = &request->sci.stp.rsp;
2655 isci_request_process_stp_response(task,
2657 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2659 /* crack the iu response buffer. */
2660 resp_iu = &request->sci.ssp.rsp;
2661 isci_request_process_response_iu(task, resp_iu,
2662 &isci_host->pdev->dev);
2664 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2666 dev_err(&isci_host->pdev->dev,
2667 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2668 "SAS_PROTOCOL_SMP protocol\n",
2672 dev_err(&isci_host->pdev->dev,
2673 "%s: unknown protocol\n", __func__);
2675 /* use the task status set in the task struct by the
2676 * isci_request_process_response_iu call.
2678 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2679 response = task->task_status.resp;
2680 status = task->task_status.stat;
2683 case SCI_IO_SUCCESS:
2684 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2686 response = SAS_TASK_COMPLETE;
2687 status = SAM_STAT_GOOD;
2688 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2690 if (task->task_proto == SAS_PROTOCOL_SMP) {
2691 void *rsp = &request->sci.smp.rsp;
2693 dev_dbg(&isci_host->pdev->dev,
2694 "%s: SMP protocol completion\n",
2697 sg_copy_from_buffer(
2698 &task->smp_task.smp_resp, 1,
2699 rsp, sizeof(struct smp_resp));
2700 } else if (completion_status
2701 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2703 /* This was an SSP / STP / SATA transfer.
2704 * There is a possibility that less data than
2705 * the maximum was transferred.
2707 u32 transferred_length = sci_req_tx_bytes(&request->sci);
2709 task->task_status.residual
2710 = task->total_xfer_len - transferred_length;
2712 /* If there were residual bytes, call this an
2715 if (task->task_status.residual != 0)
2716 status = SAS_DATA_UNDERRUN;
2718 dev_dbg(&isci_host->pdev->dev,
2719 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2724 dev_dbg(&isci_host->pdev->dev,
2725 "%s: SCI_IO_SUCCESS\n",
2730 case SCI_IO_FAILURE_TERMINATED:
2731 dev_dbg(&isci_host->pdev->dev,
2732 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2737 /* The request was terminated explicitly. No handling
2738 * is needed in the SCSI error handler path.
2740 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2741 response = SAS_TASK_UNDELIVERED;
2743 /* See if the device has been/is being stopped. Note
2744 * that we ignore the quiesce state, since we are
2745 * concerned about the actual device state.
2748 status = SAS_DEVICE_UNKNOWN;
2750 status = SAS_ABORTED_TASK;
2752 complete_to_host = isci_perform_normal_io_completion;
2755 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2757 isci_request_handle_controller_specific_errors(
2758 idev, request, task, &response, &status,
2763 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2764 /* This is a special case, in that the I/O completion
2765 * is telling us that the device needs a reset.
2766 * In order for the device reset condition to be
2767 * noticed, the I/O has to be handled in the error
2768 * handler. Set the reset flag and cause the
2769 * SCSI error thread to be scheduled.
2771 spin_lock_irqsave(&task->task_state_lock, task_flags);
2772 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2773 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2776 response = SAS_TASK_UNDELIVERED;
2777 status = SAM_STAT_TASK_ABORTED;
2779 complete_to_host = isci_perform_error_io_completion;
2780 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2783 case SCI_FAILURE_RETRY_REQUIRED:
2785 /* Fail the I/O so it can be retried. */
2786 response = SAS_TASK_UNDELIVERED;
2788 status = SAS_DEVICE_UNKNOWN;
2790 status = SAS_ABORTED_TASK;
2792 complete_to_host = isci_perform_normal_io_completion;
2793 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2798 /* Catch any otherwise unhandled error codes here. */
2799 dev_warn(&isci_host->pdev->dev,
2800 "%s: invalid completion code: 0x%x - "
2801 "isci_request = %p\n",
2802 __func__, completion_status, request);
2804 response = SAS_TASK_UNDELIVERED;
2806 /* See if the device has been/is being stopped. Note
2807 * that we ignore the quiesce state, since we are
2808 * concerned about the actual device state.
2811 status = SAS_DEVICE_UNKNOWN;
2813 status = SAS_ABORTED_TASK;
2815 if (SAS_PROTOCOL_SMP == task->task_proto) {
2816 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2817 complete_to_host = isci_perform_normal_io_completion;
2819 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2820 complete_to_host = isci_perform_error_io_completion;
2827 switch (task->task_proto) {
2828 case SAS_PROTOCOL_SSP:
2829 if (task->data_dir == DMA_NONE)
2831 if (task->num_scatter == 0)
2832 /* 0 indicates a single dma address */
2833 dma_unmap_single(&isci_host->pdev->dev,
2834 request->zero_scatter_daddr,
2835 task->total_xfer_len, task->data_dir);
2836 else /* unmap the sgl dma addresses */
2837 dma_unmap_sg(&isci_host->pdev->dev, task->scatter,
2838 request->num_sg_entries, task->data_dir);
2840 case SAS_PROTOCOL_SMP: {
2841 struct scatterlist *sg = &task->smp_task.smp_req;
2842 struct smp_req *smp_req;
2845 dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE);
2847 /* need to swab it back in case the command buffer is re-used */
2848 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2849 smp_req = kaddr + sg->offset;
2850 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2851 kunmap_atomic(kaddr, KM_IRQ0);
2858 /* Put the completed request on the correct list */
2859 isci_task_save_for_upper_layer_completion(isci_host, request, response,
2860 status, complete_to_host
2863 /* complete the io request to the core. */
2864 scic_controller_complete_io(&isci_host->sci,
2865 request->sci.target_device,
2867 isci_put_device(idev);
2869 /* set terminated handle so it cannot be completed or
2870 * terminated again, and to cause any calls into abort
2871 * task to recognize the already completed case.
2873 set_bit(IREQ_TERMINATED, &request->flags);
2876 static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
2878 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2879 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2880 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
2881 struct sas_task *task;
2883 /* XXX as hch said always creating an internal sas_task for tmf
2884 * requests would simplify the driver
2886 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2888 /* all unaccelerated request types (non ssp or ncq) handled with
2891 if (!task && dev->dev_type == SAS_END_DEV) {
2892 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2894 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2895 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2896 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2897 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2898 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2899 } else if (task && sas_protocol_ata(task->task_proto) &&
2900 !task->ata_task.use_ncq) {
2903 if (task->data_dir == DMA_NONE)
2904 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2905 else if (task->ata_task.dma_xfer)
2906 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2908 state = SCI_REQ_STP_PIO_WAIT_H2D;
2910 sci_change_state(sm, state);
2914 static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
2916 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2917 struct scic_sds_controller *scic = sci_req->owning_controller;
2918 struct isci_host *ihost = scic_to_ihost(scic);
2919 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2921 /* Tell the SCI_USER that the IO request is complete */
2922 if (!test_bit(IREQ_TMF, &ireq->flags))
2923 isci_request_io_request_complete(ihost, ireq,
2924 sci_req->sci_status);
2926 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
2929 static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
2931 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2933 /* Setting the abort bit in the Task Context is required by the silicon. */
2934 sci_req->tc->abort = 1;
2937 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2939 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2941 scic_sds_remote_device_set_working_request(sci_req->target_device,
2945 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2947 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2949 scic_sds_remote_device_set_working_request(sci_req->target_device,
2953 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2955 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2957 scic_sds_remote_device_set_working_request(sci_req->target_device,
2961 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2963 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2964 struct scu_task_context *tc = sci_req->tc;
2965 struct host_to_dev_fis *h2d_fis;
2966 enum sci_status status;
2968 /* Clear the SRST bit */
2969 h2d_fis = &sci_req->stp.cmd;
2970 h2d_fis->control = 0;
2972 /* Clear the TC control bit */
2973 tc->control_frame = 0;
2975 status = scic_controller_continue_io(sci_req);
2976 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2979 static const struct sci_base_state scic_sds_request_state_table[] = {
2980 [SCI_REQ_INIT] = { },
2981 [SCI_REQ_CONSTRUCTED] = { },
2982 [SCI_REQ_STARTED] = {
2983 .enter_state = scic_sds_request_started_state_enter,
2985 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
2986 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
2988 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2989 [SCI_REQ_STP_PIO_WAIT_H2D] = {
2990 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
2992 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2993 [SCI_REQ_STP_PIO_DATA_IN] = { },
2994 [SCI_REQ_STP_PIO_DATA_OUT] = { },
2995 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2996 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2997 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
2998 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3000 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3001 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3003 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3004 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3005 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3006 [SCI_REQ_SMP_WAIT_RESP] = { },
3007 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3008 [SCI_REQ_COMPLETED] = {
3009 .enter_state = scic_sds_request_completed_state_enter,
3011 [SCI_REQ_ABORTING] = {
3012 .enter_state = scic_sds_request_aborting_state_enter,
3014 [SCI_REQ_FINAL] = { },
3018 scic_sds_general_request_construct(struct scic_sds_controller *scic,
3019 struct scic_sds_remote_device *sci_dev,
3021 struct scic_sds_request *sci_req)
3023 sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT);
3025 sci_req->io_tag = io_tag;
3026 sci_req->owning_controller = scic;
3027 sci_req->target_device = sci_dev;
3028 sci_req->protocol = SCIC_NO_PROTOCOL;
3029 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3031 sci_req->sci_status = SCI_SUCCESS;
3032 sci_req->scu_status = 0;
3033 sci_req->post_context = 0xFFFFFFFF;
3034 sci_req->tc = &scic->task_context_table[ISCI_TAG_TCI(io_tag)];
3035 WARN_ONCE(io_tag == SCI_CONTROLLER_INVALID_IO_TAG, "straggling invalid tag usage\n");
3038 static enum sci_status
3039 scic_io_request_construct(struct scic_sds_controller *scic,
3040 struct scic_sds_remote_device *sci_dev,
3041 u16 io_tag, struct scic_sds_request *sci_req)
3043 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3044 enum sci_status status = SCI_SUCCESS;
3046 /* Build the common part of the request */
3047 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3049 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3050 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3052 if (dev->dev_type == SAS_END_DEV)
3054 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3055 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
3056 else if (dev_is_expander(dev))
3059 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3061 memset(sci_req->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3066 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3067 struct scic_sds_remote_device *sci_dev,
3068 u16 io_tag, struct scic_sds_request *sci_req)
3070 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3071 enum sci_status status = SCI_SUCCESS;
3073 /* Build the common part of the request */
3074 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3076 if (dev->dev_type == SAS_END_DEV ||
3077 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3078 set_bit(IREQ_TMF, &sci_req_to_ireq(sci_req)->flags);
3079 memset(sci_req->tc, 0, sizeof(struct scu_task_context));
3081 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3086 static enum sci_status isci_request_ssp_request_construct(
3087 struct isci_request *request)
3089 enum sci_status status;
3091 dev_dbg(&request->isci_host->pdev->dev,
3092 "%s: request = %p\n",
3095 status = scic_io_request_construct_basic_ssp(&request->sci);
3099 static enum sci_status isci_request_stp_request_construct(
3100 struct isci_request *request)
3102 struct sas_task *task = isci_request_access_task(request);
3103 enum sci_status status;
3104 struct host_to_dev_fis *register_fis;
3106 dev_dbg(&request->isci_host->pdev->dev,
3107 "%s: request = %p\n",
3111 /* Get the host_to_dev_fis from the core and copy
3112 * the fis from the task into it.
3114 register_fis = isci_sata_task_to_fis_copy(task);
3116 status = scic_io_request_construct_basic_sata(&request->sci);
3118 /* Set the ncq tag in the fis, from the queue
3119 * command in the task.
3121 if (isci_sata_is_task_ncq(task)) {
3123 isci_sata_set_ncq_tag(
3132 static enum sci_status
3133 scic_io_request_construct_smp(struct device *dev,
3134 struct scic_sds_request *sci_req,
3135 struct sas_task *task)
3137 struct scatterlist *sg = &task->smp_task.smp_req;
3138 struct scic_sds_remote_device *sci_dev;
3139 struct scu_task_context *task_context;
3140 struct scic_sds_port *sci_port;
3141 struct smp_req *smp_req;
3146 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3147 smp_req = kaddr + sg->offset;
3149 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3150 * functions under SAS 2.0, a zero request length really indicates
3151 * a non-zero default length.
3153 if (smp_req->req_len == 0) {
3154 switch (smp_req->func) {
3156 case SMP_REPORT_PHY_ERR_LOG:
3157 case SMP_REPORT_PHY_SATA:
3158 case SMP_REPORT_ROUTE_INFO:
3159 smp_req->req_len = 2;
3161 case SMP_CONF_ROUTE_INFO:
3162 case SMP_PHY_CONTROL:
3163 case SMP_PHY_TEST_FUNCTION:
3164 smp_req->req_len = 9;
3166 /* Default - zero is a valid default for 2.0. */
3169 req_len = smp_req->req_len;
3170 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3171 cmd = *(u32 *) smp_req;
3172 kunmap_atomic(kaddr, KM_IRQ0);
3174 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3177 sci_req->protocol = SCIC_SMP_PROTOCOL;
3179 /* byte swap the smp request. */
3181 task_context = sci_req->tc;
3183 sci_dev = scic_sds_request_get_device(sci_req);
3184 sci_port = scic_sds_request_get_port(sci_req);
3187 * Fill in the TC with the its required data
3190 task_context->priority = 0;
3191 task_context->initiator_request = 1;
3192 task_context->connection_rate = sci_dev->connection_rate;
3193 task_context->protocol_engine_index =
3194 scic_sds_controller_get_protocol_engine_group(scic);
3195 task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3196 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3197 task_context->abort = 0;
3198 task_context->valid = SCU_TASK_CONTEXT_VALID;
3199 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3202 task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3203 task_context->command_code = 0;
3204 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3207 task_context->link_layer_control = 0;
3208 task_context->do_not_dma_ssp_good_response = 1;
3209 task_context->strict_ordering = 0;
3210 task_context->control_frame = 1;
3211 task_context->timeout_enable = 0;
3212 task_context->block_guard_enable = 0;
3215 task_context->address_modifier = 0;
3218 task_context->ssp_command_iu_length = req_len;
3221 task_context->transfer_length_bytes = 0;
3224 * 18h ~ 30h, protocol specific
3225 * since commandIU has been build by framework at this point, we just
3226 * copy the frist DWord from command IU to this location. */
3227 memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3231 * "For SMP you could program it to zero. We would prefer that way
3232 * so that done code will be consistent." - Venki
3234 task_context->task_phase = 0;
3236 sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3237 (scic_sds_controller_get_protocol_engine_group(scic) <<
3238 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3239 (scic_sds_port_get_index(sci_port) <<
3240 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3241 ISCI_TAG_TCI(sci_req->io_tag));
3243 * Copy the physical address for the command buffer to the SCU Task
3244 * Context command buffer should not contain command header.
3246 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3247 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3249 /* SMP response comes as UF, so no need to set response IU address. */
3250 task_context->response_iu_upper = 0;
3251 task_context->response_iu_lower = 0;
3253 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
3259 * isci_smp_request_build() - This function builds the smp request.
3260 * @ireq: This parameter points to the isci_request allocated in the
3261 * request construct function.
3263 * SCI_SUCCESS on successfull completion, or specific failure code.
3265 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3267 struct sas_task *task = isci_request_access_task(ireq);
3268 struct device *dev = &ireq->isci_host->pdev->dev;
3269 struct scic_sds_request *sci_req = &ireq->sci;
3270 enum sci_status status = SCI_FAILURE;
3272 status = scic_io_request_construct_smp(dev, sci_req, task);
3273 if (status != SCI_SUCCESS)
3274 dev_warn(&ireq->isci_host->pdev->dev,
3275 "%s: failed with status = %d\n",
3283 * isci_io_request_build() - This function builds the io request object.
3284 * @isci_host: This parameter specifies the ISCI host object
3285 * @request: This parameter points to the isci_request object allocated in the
3286 * request construct function.
3287 * @sci_device: This parameter is the handle for the sci core's remote device
3288 * object that is the destination for this request.
3290 * SCI_SUCCESS on successfull completion, or specific failure code.
3292 static enum sci_status isci_io_request_build(struct isci_host *isci_host,
3293 struct isci_request *request,
3294 struct isci_remote_device *isci_device,
3297 enum sci_status status = SCI_SUCCESS;
3298 struct sas_task *task = isci_request_access_task(request);
3299 struct scic_sds_remote_device *sci_device = &isci_device->sci;
3301 dev_dbg(&isci_host->pdev->dev,
3302 "%s: isci_device = 0x%p; request = %p, "
3303 "num_scatter = %d\n",
3309 /* map the sgl addresses, if present.
3310 * libata does the mapping for sata devices
3311 * before we get the request.
3313 if (task->num_scatter &&
3314 !sas_protocol_ata(task->task_proto) &&
3315 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3317 request->num_sg_entries = dma_map_sg(
3318 &isci_host->pdev->dev,
3324 if (request->num_sg_entries == 0)
3325 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3328 /* build the common request object. For now,
3329 * we will let the core allocate the IO tag.
3331 status = scic_io_request_construct(&isci_host->sci, sci_device,
3332 tag, &request->sci);
3334 if (status != SCI_SUCCESS) {
3335 dev_warn(&isci_host->pdev->dev,
3336 "%s: failed request construct\n",
3341 switch (task->task_proto) {
3342 case SAS_PROTOCOL_SMP:
3343 status = isci_smp_request_build(request);
3345 case SAS_PROTOCOL_SSP:
3346 status = isci_request_ssp_request_construct(request);
3348 case SAS_PROTOCOL_SATA:
3349 case SAS_PROTOCOL_STP:
3350 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3351 status = isci_request_stp_request_construct(request);
3354 dev_warn(&isci_host->pdev->dev,
3355 "%s: unknown protocol\n", __func__);
3362 static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
3366 struct isci_request *ireq;
3368 ireq = dma_pool_alloc(ihost->dma_pool, gfp_flags, &handle);
3370 dev_warn(&ihost->pdev->dev,
3371 "%s: dma_pool_alloc returned NULL\n", __func__);
3375 /* initialize the request object. */
3376 spin_lock_init(&ireq->state_lock);
3377 ireq->request_daddr = handle;
3378 ireq->isci_host = ihost;
3379 ireq->io_request_completion = NULL;
3381 ireq->num_sg_entries = 0;
3382 INIT_LIST_HEAD(&ireq->completed_node);
3383 INIT_LIST_HEAD(&ireq->dev_node);
3385 isci_request_change_state(ireq, allocated);
3390 static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
3391 struct sas_task *task,
3394 struct isci_request *ireq;
3396 ireq = isci_request_alloc_core(ihost, gfp_flags);
3398 ireq->ttype_ptr.io_task_ptr = task;
3399 ireq->ttype = io_task;
3400 task->lldd_task = ireq;
3405 struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
3406 struct isci_tmf *isci_tmf,
3409 struct isci_request *ireq;
3411 ireq = isci_request_alloc_core(ihost, gfp_flags);
3413 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3414 ireq->ttype = tmf_task;
3419 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3420 struct sas_task *task, u16 tag, gfp_t gfp_flags)
3422 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3423 struct isci_request *ireq;
3424 unsigned long flags;
3427 /* do common allocation and init of request object. */
3428 ireq = isci_request_alloc_io(ihost, task, gfp_flags);
3432 status = isci_io_request_build(ihost, ireq, idev, tag);
3433 if (status != SCI_SUCCESS) {
3434 dev_warn(&ihost->pdev->dev,
3435 "%s: request_construct failed - status = 0x%x\n",
3441 spin_lock_irqsave(&ihost->scic_lock, flags);
3443 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3445 if (isci_task_is_ncq_recovery(task)) {
3447 /* The device is in an NCQ recovery state. Issue the
3448 * request on the task side. Note that it will
3449 * complete on the I/O request side because the
3450 * request was built that way (ie.
3451 * ireq->is_task_management_request is false).
3453 status = scic_controller_start_task(&ihost->sci,
3457 status = SCI_FAILURE;
3460 /* send the request, let the core assign the IO TAG. */
3461 status = scic_controller_start_io(&ihost->sci, &idev->sci,
3465 if (status != SCI_SUCCESS &&
3466 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3467 dev_warn(&ihost->pdev->dev,
3468 "%s: failed request start (0x%x)\n",
3470 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3474 /* Either I/O started OK, or the core has signaled that
3475 * the device needs a target reset.
3477 * In either case, hold onto the I/O for later.
3479 * Update it's status and add it to the list in the
3480 * remote device object.
3482 list_add(&ireq->dev_node, &idev->reqs_in_process);
3484 if (status == SCI_SUCCESS) {
3485 /* Save the tag for possible task mgmt later. */
3486 ireq->io_tag = ireq->sci.io_tag;
3487 isci_request_change_state(ireq, started);
3489 /* The request did not really start in the
3490 * hardware, so clear the request handle
3491 * here so no terminations will be done.
3493 set_bit(IREQ_TERMINATED, &ireq->flags);
3494 isci_request_change_state(ireq, completed);
3496 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3499 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3500 /* Signal libsas that we need the SCSI error
3501 * handler thread to work on this I/O and that
3502 * we want a device reset.
3504 spin_lock_irqsave(&task->task_state_lock, flags);
3505 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3506 spin_unlock_irqrestore(&task->task_state_lock, flags);
3508 /* Cause this task to be scheduled in the SCSI error
3511 isci_execpath_callback(ihost, task,
3514 /* Change the status, since we are holding
3515 * the I/O until it is managed by the SCSI
3518 status = SCI_SUCCESS;
3522 if (status != SCI_SUCCESS) {
3523 /* release dma memory on failure. */
3524 isci_request_free(ihost, ireq);