blob: 2d29abf3ce1f3ee8b3e3af2c0c6390afcfefa573 [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070057#include "task.h"
58#include "request.h"
59#include "sata.h"
60#include "scu_completion_codes.h"
Dan Williams5dec6f42011-05-10 02:28:49 -070061#include "scu_event_codes.h"
Dave Jiang2ec53eb2011-05-04 18:01:22 -070062#include "sas.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070063
Dan Williams5076a1a2011-06-27 14:57:03 -070064static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
Dan Williams312e0c22011-06-28 13:47:09 -070065 int idx)
66{
67 if (idx == 0)
Dan Williams5076a1a2011-06-27 14:57:03 -070068 return &ireq->tc->sgl_pair_ab;
Dan Williams312e0c22011-06-28 13:47:09 -070069 else if (idx == 1)
Dan Williams5076a1a2011-06-27 14:57:03 -070070 return &ireq->tc->sgl_pair_cd;
Dan Williams312e0c22011-06-28 13:47:09 -070071 else if (idx < 0)
72 return NULL;
73 else
Dan Williams5076a1a2011-06-27 14:57:03 -070074 return &ireq->sg_table[idx - 2];
Dan Williams6f231dd2011-07-02 22:56:22 -070075}
76
Dan Williams312e0c22011-06-28 13:47:09 -070077static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic,
Dan Williams5076a1a2011-06-27 14:57:03 -070078 struct isci_request *ireq, u32 idx)
Dan Williams312e0c22011-06-28 13:47:09 -070079{
80 u32 offset;
81
82 if (idx == 0) {
Dan Williams5076a1a2011-06-27 14:57:03 -070083 offset = (void *) &ireq->tc->sgl_pair_ab -
Dan Williams312e0c22011-06-28 13:47:09 -070084 (void *) &scic->task_context_table[0];
85 return scic->task_context_dma + offset;
86 } else if (idx == 1) {
Dan Williams5076a1a2011-06-27 14:57:03 -070087 offset = (void *) &ireq->tc->sgl_pair_cd -
Dan Williams312e0c22011-06-28 13:47:09 -070088 (void *) &scic->task_context_table[0];
89 return scic->task_context_dma + offset;
90 }
91
Dan Williams5076a1a2011-06-27 14:57:03 -070092 return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
Dan Williams312e0c22011-06-28 13:47:09 -070093}
94
95static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
96{
97 e->length = sg_dma_len(sg);
98 e->address_upper = upper_32_bits(sg_dma_address(sg));
99 e->address_lower = lower_32_bits(sg_dma_address(sg));
100 e->address_modifier = 0;
101}
102
Dan Williams5076a1a2011-06-27 14:57:03 -0700103static void scic_sds_request_build_sgl(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700104{
Dan Williams5076a1a2011-06-27 14:57:03 -0700105 struct isci_host *isci_host = ireq->isci_host;
Dan Williams312e0c22011-06-28 13:47:09 -0700106 struct scic_sds_controller *scic = &isci_host->sci;
Dan Williams5076a1a2011-06-27 14:57:03 -0700107 struct sas_task *task = isci_request_access_task(ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700108 struct scatterlist *sg = NULL;
109 dma_addr_t dma_addr;
110 u32 sg_idx = 0;
111 struct scu_sgl_element_pair *scu_sg = NULL;
112 struct scu_sgl_element_pair *prev_sg = NULL;
113
114 if (task->num_scatter > 0) {
115 sg = task->scatter;
116
117 while (sg) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700118 scu_sg = to_sgl_element_pair(ireq, sg_idx);
Dan Williams312e0c22011-06-28 13:47:09 -0700119 init_sgl_element(&scu_sg->A, sg);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700120 sg = sg_next(sg);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700121 if (sg) {
Dan Williams312e0c22011-06-28 13:47:09 -0700122 init_sgl_element(&scu_sg->B, sg);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700123 sg = sg_next(sg);
124 } else
Dan Williams312e0c22011-06-28 13:47:09 -0700125 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
Dan Williamsf1f52e72011-05-10 02:28:45 -0700126
127 if (prev_sg) {
Dan Williams312e0c22011-06-28 13:47:09 -0700128 dma_addr = to_sgl_element_pair_dma(scic,
Dan Williams5076a1a2011-06-27 14:57:03 -0700129 ireq,
Dan Williams312e0c22011-06-28 13:47:09 -0700130 sg_idx);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700131
132 prev_sg->next_pair_upper =
133 upper_32_bits(dma_addr);
134 prev_sg->next_pair_lower =
135 lower_32_bits(dma_addr);
136 }
137
138 prev_sg = scu_sg;
139 sg_idx++;
140 }
141 } else { /* handle when no sg */
Dan Williams5076a1a2011-06-27 14:57:03 -0700142 scu_sg = to_sgl_element_pair(ireq, sg_idx);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700143
144 dma_addr = dma_map_single(&isci_host->pdev->dev,
145 task->scatter,
146 task->total_xfer_len,
147 task->data_dir);
148
Dan Williams5076a1a2011-06-27 14:57:03 -0700149 ireq->zero_scatter_daddr = dma_addr;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700150
151 scu_sg->A.length = task->total_xfer_len;
152 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153 scu_sg->A.address_lower = lower_32_bits(dma_addr);
154 }
155
156 if (scu_sg) {
157 scu_sg->next_pair_upper = 0;
158 scu_sg->next_pair_lower = 0;
159 }
160}
161
Dan Williams5076a1a2011-06-27 14:57:03 -0700162static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700163{
164 struct ssp_cmd_iu *cmd_iu;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700165 struct sas_task *task = isci_request_access_task(ireq);
166
Dan Williams5076a1a2011-06-27 14:57:03 -0700167 cmd_iu = &ireq->ssp.cmd;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700168
169 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
170 cmd_iu->add_cdb_len = 0;
171 cmd_iu->_r_a = 0;
172 cmd_iu->_r_b = 0;
173 cmd_iu->en_fburst = 0; /* unsupported */
174 cmd_iu->task_prio = task->ssp_task.task_prio;
175 cmd_iu->task_attr = task->ssp_task.task_attr;
176 cmd_iu->_r_c = 0;
177
178 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
179 sizeof(task->ssp_task.cdb) / sizeof(u32));
180}
181
Dan Williams5076a1a2011-06-27 14:57:03 -0700182static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700183{
184 struct ssp_task_iu *task_iu;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700185 struct sas_task *task = isci_request_access_task(ireq);
186 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
187
Dan Williams5076a1a2011-06-27 14:57:03 -0700188 task_iu = &ireq->ssp.tmf;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700189
190 memset(task_iu, 0, sizeof(struct ssp_task_iu));
191
192 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
193
194 task_iu->task_func = isci_tmf->tmf_code;
195 task_iu->task_tag =
196 (ireq->ttype == tmf_task) ?
197 isci_tmf->io_tag :
198 SCI_CONTROLLER_INVALID_IO_TAG;
199}
200
201/**
202 * This method is will fill in the SCU Task Context for any type of SSP request.
203 * @sci_req:
204 * @task_context:
205 *
206 */
207static void scu_ssp_reqeust_construct_task_context(
Dan Williams5076a1a2011-06-27 14:57:03 -0700208 struct isci_request *ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700209 struct scu_task_context *task_context)
210{
211 dma_addr_t dma_addr;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700212 struct scic_sds_remote_device *target_device;
Dan Williamsffe191c2011-06-29 13:09:25 -0700213 struct isci_port *iport;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700214
Dan Williams5076a1a2011-06-27 14:57:03 -0700215 target_device = scic_sds_request_get_device(ireq);
Dan Williamsffe191c2011-06-29 13:09:25 -0700216 iport = scic_sds_request_get_port(ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700217
218 /* Fill in the TC with the its required data */
219 task_context->abort = 0;
220 task_context->priority = 0;
221 task_context->initiator_request = 1;
222 task_context->connection_rate = target_device->connection_rate;
223 task_context->protocol_engine_index =
224 scic_sds_controller_get_protocol_engine_group(controller);
Dan Williamsffe191c2011-06-29 13:09:25 -0700225 task_context->logical_port_index = scic_sds_port_get_index(iport);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700226 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
227 task_context->valid = SCU_TASK_CONTEXT_VALID;
228 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
229
230 task_context->remote_node_index =
Dan Williams5076a1a2011-06-27 14:57:03 -0700231 scic_sds_remote_device_get_index(ireq->target_device);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700232 task_context->command_code = 0;
233
234 task_context->link_layer_control = 0;
235 task_context->do_not_dma_ssp_good_response = 1;
236 task_context->strict_ordering = 0;
237 task_context->control_frame = 0;
238 task_context->timeout_enable = 0;
239 task_context->block_guard_enable = 0;
240
241 task_context->address_modifier = 0;
242
Dan Williams5076a1a2011-06-27 14:57:03 -0700243 /* task_context->type.ssp.tag = ireq->io_tag; */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700244 task_context->task_phase = 0x01;
245
Dan Williams5076a1a2011-06-27 14:57:03 -0700246 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
Dan Williamsffe191c2011-06-29 13:09:25 -0700247 (scic_sds_controller_get_protocol_engine_group(controller) <<
248 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
249 (scic_sds_port_get_index(iport) <<
250 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
251 ISCI_TAG_TCI(ireq->io_tag));
Dan Williamsf1f52e72011-05-10 02:28:45 -0700252
253 /*
254 * Copy the physical address for the command buffer to the
255 * SCU Task Context
256 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700257 dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700258
259 task_context->command_iu_upper = upper_32_bits(dma_addr);
260 task_context->command_iu_lower = lower_32_bits(dma_addr);
261
262 /*
263 * Copy the physical address for the response buffer to the
264 * SCU Task Context
265 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700266 dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700267
268 task_context->response_iu_upper = upper_32_bits(dma_addr);
269 task_context->response_iu_lower = lower_32_bits(dma_addr);
270}
271
272/**
273 * This method is will fill in the SCU Task Context for a SSP IO request.
274 * @sci_req:
275 *
276 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700277static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
Dan Williams312e0c22011-06-28 13:47:09 -0700278 enum dma_data_direction dir,
279 u32 len)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700280{
Dan Williams5076a1a2011-06-27 14:57:03 -0700281 struct scu_task_context *task_context = ireq->tc;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700282
Dan Williams5076a1a2011-06-27 14:57:03 -0700283 scu_ssp_reqeust_construct_task_context(ireq, task_context);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700284
285 task_context->ssp_command_iu_length =
286 sizeof(struct ssp_cmd_iu) / sizeof(u32);
287 task_context->type.ssp.frame_type = SSP_COMMAND;
288
289 switch (dir) {
290 case DMA_FROM_DEVICE:
291 case DMA_NONE:
292 default:
293 task_context->task_type = SCU_TASK_TYPE_IOREAD;
294 break;
295 case DMA_TO_DEVICE:
296 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
297 break;
298 }
299
300 task_context->transfer_length_bytes = len;
301
302 if (task_context->transfer_length_bytes > 0)
Dan Williams5076a1a2011-06-27 14:57:03 -0700303 scic_sds_request_build_sgl(ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700304}
305
Dan Williamsf1f52e72011-05-10 02:28:45 -0700306/**
307 * This method will fill in the SCU Task Context for a SSP Task request. The
308 * following important settings are utilized: -# priority ==
309 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
310 * ahead of other task destined for the same Remote Node. -# task_type ==
311 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
312 * (i.e. non-raw frame) is being utilized to perform task management. -#
313 * control_frame == 1. This ensures that the proper endianess is set so
314 * that the bytes are transmitted in the right order for a task frame.
315 * @sci_req: This parameter specifies the task request object being
316 * constructed.
317 *
318 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700319static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700320{
Dan Williams5076a1a2011-06-27 14:57:03 -0700321 struct scu_task_context *task_context = ireq->tc;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700322
Dan Williams5076a1a2011-06-27 14:57:03 -0700323 scu_ssp_reqeust_construct_task_context(ireq, task_context);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700324
325 task_context->control_frame = 1;
326 task_context->priority = SCU_TASK_PRIORITY_HIGH;
327 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
328 task_context->transfer_length_bytes = 0;
329 task_context->type.ssp.frame_type = SSP_TASK;
330 task_context->ssp_command_iu_length =
331 sizeof(struct ssp_task_iu) / sizeof(u32);
332}
333
Dan Williamsf1f52e72011-05-10 02:28:45 -0700334/**
Dan Williams5dec6f42011-05-10 02:28:49 -0700335 * This method is will fill in the SCU Task Context for any type of SATA
336 * request. This is called from the various SATA constructors.
337 * @sci_req: The general IO request object which is to be used in
338 * constructing the SCU task context.
339 * @task_context: The buffer pointer for the SCU task context which is being
340 * constructed.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700341 *
Dan Williams5dec6f42011-05-10 02:28:49 -0700342 * The general io request construction is complete. The buffer assignment for
343 * the command buffer is complete. none Revisit task context construction to
344 * determine what is common for SSP/SMP/STP task context structures.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700345 */
Dan Williams5dec6f42011-05-10 02:28:49 -0700346static void scu_sata_reqeust_construct_task_context(
Dan Williams5076a1a2011-06-27 14:57:03 -0700347 struct isci_request *ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -0700348 struct scu_task_context *task_context)
349{
350 dma_addr_t dma_addr;
Dan Williams5dec6f42011-05-10 02:28:49 -0700351 struct scic_sds_remote_device *target_device;
Dan Williamsffe191c2011-06-29 13:09:25 -0700352 struct isci_port *iport;
Dan Williams5dec6f42011-05-10 02:28:49 -0700353
Dan Williams5076a1a2011-06-27 14:57:03 -0700354 target_device = scic_sds_request_get_device(ireq);
Dan Williamsffe191c2011-06-29 13:09:25 -0700355 iport = scic_sds_request_get_port(ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -0700356
357 /* Fill in the TC with the its required data */
358 task_context->abort = 0;
359 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
360 task_context->initiator_request = 1;
361 task_context->connection_rate = target_device->connection_rate;
362 task_context->protocol_engine_index =
363 scic_sds_controller_get_protocol_engine_group(controller);
364 task_context->logical_port_index =
Dan Williamsffe191c2011-06-29 13:09:25 -0700365 scic_sds_port_get_index(iport);
Dan Williams5dec6f42011-05-10 02:28:49 -0700366 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
367 task_context->valid = SCU_TASK_CONTEXT_VALID;
368 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
369
370 task_context->remote_node_index =
Dan Williams5076a1a2011-06-27 14:57:03 -0700371 scic_sds_remote_device_get_index(ireq->target_device);
Dan Williams5dec6f42011-05-10 02:28:49 -0700372 task_context->command_code = 0;
373
374 task_context->link_layer_control = 0;
375 task_context->do_not_dma_ssp_good_response = 1;
376 task_context->strict_ordering = 0;
377 task_context->control_frame = 0;
378 task_context->timeout_enable = 0;
379 task_context->block_guard_enable = 0;
380
381 task_context->address_modifier = 0;
382 task_context->task_phase = 0x01;
383
384 task_context->ssp_command_iu_length =
385 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
386
387 /* Set the first word of the H2D REG FIS */
Dan Williams5076a1a2011-06-27 14:57:03 -0700388 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
Dan Williams5dec6f42011-05-10 02:28:49 -0700389
Dan Williams5076a1a2011-06-27 14:57:03 -0700390 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
Dan Williams312e0c22011-06-28 13:47:09 -0700391 (scic_sds_controller_get_protocol_engine_group(controller) <<
392 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
Dan Williamsffe191c2011-06-29 13:09:25 -0700393 (scic_sds_port_get_index(iport) <<
Dan Williams312e0c22011-06-28 13:47:09 -0700394 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
Dan Williams5076a1a2011-06-27 14:57:03 -0700395 ISCI_TAG_TCI(ireq->io_tag));
Dan Williams5dec6f42011-05-10 02:28:49 -0700396 /*
397 * Copy the physical address for the command buffer to the SCU Task
398 * Context. We must offset the command buffer by 4 bytes because the
399 * first 4 bytes are transfered in the body of the TC.
400 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700401 dma_addr = scic_io_request_get_dma_addr(ireq,
402 ((char *) &ireq->stp.cmd) +
Dan Williams5dec6f42011-05-10 02:28:49 -0700403 sizeof(u32));
404
405 task_context->command_iu_upper = upper_32_bits(dma_addr);
406 task_context->command_iu_lower = lower_32_bits(dma_addr);
407
408 /* SATA Requests do not have a response buffer */
409 task_context->response_iu_upper = 0;
410 task_context->response_iu_lower = 0;
411}
412
Dan Williams5076a1a2011-06-27 14:57:03 -0700413static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
Dan Williams5dec6f42011-05-10 02:28:49 -0700414{
Dan Williams5076a1a2011-06-27 14:57:03 -0700415 struct scu_task_context *task_context = ireq->tc;
Dan Williams5dec6f42011-05-10 02:28:49 -0700416
Dan Williams5076a1a2011-06-27 14:57:03 -0700417 scu_sata_reqeust_construct_task_context(ireq, task_context);
Dan Williams5dec6f42011-05-10 02:28:49 -0700418
419 task_context->control_frame = 0;
420 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
421 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
422 task_context->type.stp.fis_type = FIS_REGH2D;
423 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
424}
425
Dan Williams5076a1a2011-06-27 14:57:03 -0700426static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq,
427 bool copy_rx_frame)
Dan Williams5dec6f42011-05-10 02:28:49 -0700428{
Dan Williams5076a1a2011-06-27 14:57:03 -0700429 struct isci_stp_request *stp_req = &ireq->stp.req;
Dan Williams5dec6f42011-05-10 02:28:49 -0700430
Dan Williams5076a1a2011-06-27 14:57:03 -0700431 scu_stp_raw_request_construct_task_context(ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -0700432
Dan Williamsba7cb222011-06-27 11:56:41 -0700433 stp_req->status = 0;
434 stp_req->sgl.offset = 0;
435 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
Dan Williams5dec6f42011-05-10 02:28:49 -0700436
437 if (copy_rx_frame) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700438 scic_sds_request_build_sgl(ireq);
Dan Williamsba7cb222011-06-27 11:56:41 -0700439 stp_req->sgl.index = 0;
Dan Williams5dec6f42011-05-10 02:28:49 -0700440 } else {
441 /* The user does not want the data copied to the SGL buffer location */
Dan Williamsba7cb222011-06-27 11:56:41 -0700442 stp_req->sgl.index = -1;
Dan Williams5dec6f42011-05-10 02:28:49 -0700443 }
444
445 return SCI_SUCCESS;
446}
447
448/**
449 *
450 * @sci_req: This parameter specifies the request to be constructed as an
451 * optimized request.
452 * @optimized_task_type: This parameter specifies whether the request is to be
453 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
454 * value of 1 indicates NCQ.
455 *
456 * This method will perform request construction common to all types of STP
457 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
458 * returns an indication as to whether the construction was successful.
459 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700460static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -0700461 u8 optimized_task_type,
462 u32 len,
463 enum dma_data_direction dir)
464{
Dan Williams5076a1a2011-06-27 14:57:03 -0700465 struct scu_task_context *task_context = ireq->tc;
Dan Williams5dec6f42011-05-10 02:28:49 -0700466
467 /* Build the STP task context structure */
Dan Williams5076a1a2011-06-27 14:57:03 -0700468 scu_sata_reqeust_construct_task_context(ireq, task_context);
Dan Williams5dec6f42011-05-10 02:28:49 -0700469
470 /* Copy over the SGL elements */
Dan Williams5076a1a2011-06-27 14:57:03 -0700471 scic_sds_request_build_sgl(ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -0700472
473 /* Copy over the number of bytes to be transfered */
474 task_context->transfer_length_bytes = len;
475
476 if (dir == DMA_TO_DEVICE) {
477 /*
478 * The difference between the DMA IN and DMA OUT request task type
479 * values are consistent with the difference between FPDMA READ
480 * and FPDMA WRITE values. Add the supplied task type parameter
481 * to this difference to set the task type properly for this
482 * DATA OUT (WRITE) case. */
483 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
484 - SCU_TASK_TYPE_DMA_IN);
485 } else {
486 /*
487 * For the DATA IN (READ) case, simply save the supplied
488 * optimized task type. */
489 task_context->task_type = optimized_task_type;
490 }
491}
492
493
494
Dan Williamsf1f52e72011-05-10 02:28:45 -0700495static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -0700496scic_io_request_construct_sata(struct isci_request *ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700497 u32 len,
498 enum dma_data_direction dir,
499 bool copy)
Dan Williams6f231dd2011-07-02 22:56:22 -0700500{
Dan Williams6f231dd2011-07-02 22:56:22 -0700501 enum sci_status status = SCI_SUCCESS;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700502 struct sas_task *task = isci_request_access_task(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700503
Dan Williamsf1f52e72011-05-10 02:28:45 -0700504 /* check for management protocols */
505 if (ireq->ttype == tmf_task) {
506 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700507
Dan Williamsf1f52e72011-05-10 02:28:45 -0700508 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
Dan Williams5dec6f42011-05-10 02:28:49 -0700509 tmf->tmf_code == isci_tmf_sata_srst_low) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700510 scu_stp_raw_request_construct_task_context(ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -0700511 return SCI_SUCCESS;
512 } else {
Dan Williams5076a1a2011-06-27 14:57:03 -0700513 dev_err(scic_to_dev(ireq->owning_controller),
Dan Williamsf1f52e72011-05-10 02:28:45 -0700514 "%s: Request 0x%p received un-handled SAT "
515 "management protocol 0x%x.\n",
Dan Williams5076a1a2011-06-27 14:57:03 -0700516 __func__, ireq, tmf->tmf_code);
Dan Williams6f231dd2011-07-02 22:56:22 -0700517
Dan Williamsf1f52e72011-05-10 02:28:45 -0700518 return SCI_FAILURE;
519 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700520 }
521
Dan Williamsf1f52e72011-05-10 02:28:45 -0700522 if (!sas_protocol_ata(task->task_proto)) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700523 dev_err(scic_to_dev(ireq->owning_controller),
Dan Williamsf1f52e72011-05-10 02:28:45 -0700524 "%s: Non-ATA protocol in SATA path: 0x%x\n",
525 __func__,
526 task->task_proto);
Dan Williams6f231dd2011-07-02 22:56:22 -0700527 return SCI_FAILURE;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700528
Dan Williams6f231dd2011-07-02 22:56:22 -0700529 }
530
Dan Williamsf1f52e72011-05-10 02:28:45 -0700531 /* non data */
Dan Williams5dec6f42011-05-10 02:28:49 -0700532 if (task->data_dir == DMA_NONE) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700533 scu_stp_raw_request_construct_task_context(ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -0700534 return SCI_SUCCESS;
535 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700536
537 /* NCQ */
Dan Williams5dec6f42011-05-10 02:28:49 -0700538 if (task->ata_task.use_ncq) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700539 scic_sds_stp_optimized_request_construct(ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -0700540 SCU_TASK_TYPE_FPDMAQ_READ,
541 len, dir);
542 return SCI_SUCCESS;
543 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700544
545 /* DMA */
Dan Williams5dec6f42011-05-10 02:28:49 -0700546 if (task->ata_task.dma_xfer) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700547 scic_sds_stp_optimized_request_construct(ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -0700548 SCU_TASK_TYPE_DMA_IN,
549 len, dir);
550 return SCI_SUCCESS;
551 } else /* PIO */
Dan Williams5076a1a2011-06-27 14:57:03 -0700552 return scic_sds_stp_pio_request_construct(ireq, copy);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700553
554 return status;
555}
556
Dan Williams5076a1a2011-06-27 14:57:03 -0700557static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700558{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700559 struct sas_task *task = isci_request_access_task(ireq);
560
Dan Williams5076a1a2011-06-27 14:57:03 -0700561 ireq->protocol = SCIC_SSP_PROTOCOL;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700562
Dan Williams5076a1a2011-06-27 14:57:03 -0700563 scu_ssp_io_request_construct_task_context(ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700564 task->data_dir,
565 task->total_xfer_len);
566
Dan Williams5076a1a2011-06-27 14:57:03 -0700567 scic_sds_io_request_build_ssp_command_iu(ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700568
Dan Williams5076a1a2011-06-27 14:57:03 -0700569 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700570
571 return SCI_SUCCESS;
572}
573
574enum sci_status scic_task_request_construct_ssp(
Dan Williams5076a1a2011-06-27 14:57:03 -0700575 struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700576{
577 /* Construct the SSP Task SCU Task Context */
Dan Williams5076a1a2011-06-27 14:57:03 -0700578 scu_ssp_task_request_construct_task_context(ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700579
580 /* Fill in the SSP Task IU */
Dan Williams5076a1a2011-06-27 14:57:03 -0700581 scic_sds_task_request_build_ssp_task_iu(ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700582
Dan Williams5076a1a2011-06-27 14:57:03 -0700583 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700584
585 return SCI_SUCCESS;
586}
587
Dan Williams5076a1a2011-06-27 14:57:03 -0700588static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq)
Dan Williams6f231dd2011-07-02 22:56:22 -0700589{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700590 enum sci_status status;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700591 bool copy = false;
Dan Williams5076a1a2011-06-27 14:57:03 -0700592 struct sas_task *task = isci_request_access_task(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700593
Dan Williams5076a1a2011-06-27 14:57:03 -0700594 ireq->protocol = SCIC_STP_PROTOCOL;
Dan Williams6f231dd2011-07-02 22:56:22 -0700595
Dan Williamsf1f52e72011-05-10 02:28:45 -0700596 copy = (task->data_dir == DMA_NONE) ? false : true;
Dan Williams6f231dd2011-07-02 22:56:22 -0700597
Dan Williams5076a1a2011-06-27 14:57:03 -0700598 status = scic_io_request_construct_sata(ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700599 task->total_xfer_len,
600 task->data_dir,
601 copy);
Dan Williams6f231dd2011-07-02 22:56:22 -0700602
Dan Williamsf1f52e72011-05-10 02:28:45 -0700603 if (status == SCI_SUCCESS)
Dan Williams5076a1a2011-06-27 14:57:03 -0700604 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700605
Dan Williamsf1f52e72011-05-10 02:28:45 -0700606 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700607}
608
Dan Williams5076a1a2011-06-27 14:57:03 -0700609enum sci_status scic_task_request_construct_sata(struct isci_request *ireq)
Dan Williams6f231dd2011-07-02 22:56:22 -0700610{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700611 enum sci_status status = SCI_SUCCESS;
Dan Williams6f231dd2011-07-02 22:56:22 -0700612
Dan Williamsf1f52e72011-05-10 02:28:45 -0700613 /* check for management protocols */
614 if (ireq->ttype == tmf_task) {
615 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700616
Dan Williamsf1f52e72011-05-10 02:28:45 -0700617 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
618 tmf->tmf_code == isci_tmf_sata_srst_low) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700619 scu_stp_raw_request_construct_task_context(ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700620 } else {
Dan Williams5076a1a2011-06-27 14:57:03 -0700621 dev_err(scic_to_dev(ireq->owning_controller),
Dan Williamsf1f52e72011-05-10 02:28:45 -0700622 "%s: Request 0x%p received un-handled SAT "
623 "Protocol 0x%x.\n",
Dan Williams5076a1a2011-06-27 14:57:03 -0700624 __func__, ireq, tmf->tmf_code);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700625
626 return SCI_FAILURE;
627 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700628 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700629
Dan Williams5dec6f42011-05-10 02:28:49 -0700630 if (status != SCI_SUCCESS)
631 return status;
Dan Williams5076a1a2011-06-27 14:57:03 -0700632 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700633
634 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700635}
636
637/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700638 * sci_req_tx_bytes - bytes transferred when reply underruns request
639 * @sci_req: request that was terminated early
Dan Williams6f231dd2011-07-02 22:56:22 -0700640 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700641#define SCU_TASK_CONTEXT_SRAM 0x200000
Dan Williams5076a1a2011-06-27 14:57:03 -0700642static u32 sci_req_tx_bytes(struct isci_request *ireq)
Dan Williams6f231dd2011-07-02 22:56:22 -0700643{
Dan Williams5076a1a2011-06-27 14:57:03 -0700644 struct scic_sds_controller *scic = ireq->owning_controller;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700645 u32 ret_val = 0;
Dan Williams6f231dd2011-07-02 22:56:22 -0700646
Dan Williamsf1f52e72011-05-10 02:28:45 -0700647 if (readl(&scic->smu_registers->address_modifier) == 0) {
648 void __iomem *scu_reg_base = scic->scu_registers;
Dan Williams6f231dd2011-07-02 22:56:22 -0700649
Dan Williamsf1f52e72011-05-10 02:28:45 -0700650 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
651 * BAR1 is the scu_registers
652 * 0x20002C = 0x200000 + 0x2c
653 * = start of task context SRAM + offset of (type.ssp.data_offset)
654 * TCi is the io_tag of struct scic_sds_request
Dan Williams67ea8382011-05-08 11:47:15 -0700655 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700656 ret_val = readl(scu_reg_base +
657 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
Dan Williams5076a1a2011-06-27 14:57:03 -0700658 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
Dan Williams67ea8382011-05-08 11:47:15 -0700659 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700660
Dan Williamsf1f52e72011-05-10 02:28:45 -0700661 return ret_val;
Dan Williams6f231dd2011-07-02 22:56:22 -0700662}
663
Dan Williams5076a1a2011-06-27 14:57:03 -0700664enum sci_status scic_sds_request_start(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700665{
Piotr Sawickif4636a72011-05-10 23:50:32 +0000666 enum sci_base_request_states state;
Dan Williams5076a1a2011-06-27 14:57:03 -0700667 struct scu_task_context *tc = ireq->tc;
668 struct scic_sds_controller *scic = ireq->owning_controller;
Piotr Sawickif4636a72011-05-10 23:50:32 +0000669
Dan Williams5076a1a2011-06-27 14:57:03 -0700670 state = ireq->sm.current_state_id;
Edmund Nadolskie3013702011-06-02 00:10:43 +0000671 if (state != SCI_REQ_CONSTRUCTED) {
Piotr Sawickif4636a72011-05-10 23:50:32 +0000672 dev_warn(scic_to_dev(scic),
673 "%s: SCIC IO Request requested to start while in wrong "
674 "state %d\n", __func__, state);
675 return SCI_FAILURE_INVALID_STATE;
676 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700677
Dan Williams5076a1a2011-06-27 14:57:03 -0700678 tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700679
Dan Williams312e0c22011-06-28 13:47:09 -0700680 switch (tc->protocol_type) {
681 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
682 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
683 /* SSP/SMP Frame */
Dan Williams5076a1a2011-06-27 14:57:03 -0700684 tc->type.ssp.tag = ireq->io_tag;
Dan Williams312e0c22011-06-28 13:47:09 -0700685 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
686 break;
Piotr Sawickif4636a72011-05-10 23:50:32 +0000687
Dan Williams312e0c22011-06-28 13:47:09 -0700688 case SCU_TASK_CONTEXT_PROTOCOL_STP:
689 /* STP/SATA Frame
Dan Williams5076a1a2011-06-27 14:57:03 -0700690 * tc->type.stp.ncq_tag = ireq->ncq_tag;
Dan Williams312e0c22011-06-28 13:47:09 -0700691 */
692 break;
Piotr Sawickif4636a72011-05-10 23:50:32 +0000693
Dan Williams312e0c22011-06-28 13:47:09 -0700694 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
695 /* / @todo When do we set no protocol type? */
696 break;
Piotr Sawickif4636a72011-05-10 23:50:32 +0000697
Dan Williams312e0c22011-06-28 13:47:09 -0700698 default:
699 /* This should never happen since we build the IO
700 * requests */
701 break;
Piotr Sawickif4636a72011-05-10 23:50:32 +0000702 }
703
Dan Williams312e0c22011-06-28 13:47:09 -0700704 /* Add to the post_context the io tag value */
Dan Williams5076a1a2011-06-27 14:57:03 -0700705 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
Dan Williams312e0c22011-06-28 13:47:09 -0700706
707 /* Everything is good go ahead and change state */
Dan Williams5076a1a2011-06-27 14:57:03 -0700708 sci_change_state(&ireq->sm, SCI_REQ_STARTED);
Dan Williams312e0c22011-06-28 13:47:09 -0700709
710 return SCI_SUCCESS;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700711}
712
713enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -0700714scic_sds_io_request_terminate(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700715{
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700716 enum sci_base_request_states state;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700717
Dan Williams5076a1a2011-06-27 14:57:03 -0700718 state = ireq->sm.current_state_id;
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700719
720 switch (state) {
Edmund Nadolskie3013702011-06-02 00:10:43 +0000721 case SCI_REQ_CONSTRUCTED:
Dan Williams5076a1a2011-06-27 14:57:03 -0700722 scic_sds_request_set_status(ireq,
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700723 SCU_TASK_DONE_TASK_ABORT,
724 SCI_FAILURE_IO_TERMINATED);
725
Dan Williams5076a1a2011-06-27 14:57:03 -0700726 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700727 return SCI_SUCCESS;
Edmund Nadolskie3013702011-06-02 00:10:43 +0000728 case SCI_REQ_STARTED:
729 case SCI_REQ_TASK_WAIT_TC_COMP:
730 case SCI_REQ_SMP_WAIT_RESP:
731 case SCI_REQ_SMP_WAIT_TC_COMP:
732 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
733 case SCI_REQ_STP_UDMA_WAIT_D2H:
734 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
735 case SCI_REQ_STP_NON_DATA_WAIT_D2H:
736 case SCI_REQ_STP_PIO_WAIT_H2D:
737 case SCI_REQ_STP_PIO_WAIT_FRAME:
738 case SCI_REQ_STP_PIO_DATA_IN:
739 case SCI_REQ_STP_PIO_DATA_OUT:
740 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
741 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
742 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
Dan Williams5076a1a2011-06-27 14:57:03 -0700743 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700744 return SCI_SUCCESS;
Edmund Nadolskie3013702011-06-02 00:10:43 +0000745 case SCI_REQ_TASK_WAIT_TC_RESP:
Dan Williams5076a1a2011-06-27 14:57:03 -0700746 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
747 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700748 return SCI_SUCCESS;
Edmund Nadolskie3013702011-06-02 00:10:43 +0000749 case SCI_REQ_ABORTING:
Dan Williams5076a1a2011-06-27 14:57:03 -0700750 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700751 return SCI_SUCCESS;
Edmund Nadolskie3013702011-06-02 00:10:43 +0000752 case SCI_REQ_COMPLETED:
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700753 default:
Dan Williams5076a1a2011-06-27 14:57:03 -0700754 dev_warn(scic_to_dev(ireq->owning_controller),
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700755 "%s: SCIC IO Request requested to abort while in wrong "
756 "state %d\n",
757 __func__,
Dan Williams5076a1a2011-06-27 14:57:03 -0700758 ireq->sm.current_state_id);
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700759 break;
760 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700761
762 return SCI_FAILURE_INVALID_STATE;
763}
764
Dan Williams5076a1a2011-06-27 14:57:03 -0700765enum sci_status scic_sds_request_complete(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700766{
Dan Williams79e2b6b2011-05-11 08:29:56 -0700767 enum sci_base_request_states state;
Dan Williams5076a1a2011-06-27 14:57:03 -0700768 struct scic_sds_controller *scic = ireq->owning_controller;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700769
Dan Williams5076a1a2011-06-27 14:57:03 -0700770 state = ireq->sm.current_state_id;
Edmund Nadolskie3013702011-06-02 00:10:43 +0000771 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
Dan Williams79e2b6b2011-05-11 08:29:56 -0700772 "isci: request completion from wrong state (%d)\n", state))
773 return SCI_FAILURE_INVALID_STATE;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700774
Dan Williams5076a1a2011-06-27 14:57:03 -0700775 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
Dan Williams79e2b6b2011-05-11 08:29:56 -0700776 scic_sds_controller_release_frame(scic,
Dan Williams5076a1a2011-06-27 14:57:03 -0700777 ireq->saved_rx_frame_index);
Dan Williams79e2b6b2011-05-11 08:29:56 -0700778
779 /* XXX can we just stop the machine and remove the 'final' state? */
Dan Williams5076a1a2011-06-27 14:57:03 -0700780 sci_change_state(&ireq->sm, SCI_REQ_FINAL);
Dan Williams79e2b6b2011-05-11 08:29:56 -0700781 return SCI_SUCCESS;
782}
783
Dan Williams5076a1a2011-06-27 14:57:03 -0700784enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq,
Dan Williams79e2b6b2011-05-11 08:29:56 -0700785 u32 event_code)
786{
787 enum sci_base_request_states state;
Dan Williams5076a1a2011-06-27 14:57:03 -0700788 struct scic_sds_controller *scic = ireq->owning_controller;
Dan Williams79e2b6b2011-05-11 08:29:56 -0700789
Dan Williams5076a1a2011-06-27 14:57:03 -0700790 state = ireq->sm.current_state_id;
Dan Williams79e2b6b2011-05-11 08:29:56 -0700791
Edmund Nadolskie3013702011-06-02 00:10:43 +0000792 if (state != SCI_REQ_STP_PIO_DATA_IN) {
Dan Williams79e2b6b2011-05-11 08:29:56 -0700793 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
794 __func__, event_code, state);
795
796 return SCI_FAILURE_INVALID_STATE;
797 }
798
799 switch (scu_get_event_specifier(event_code)) {
800 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
801 /* We are waiting for data and the SCU has R_ERR the data frame.
802 * Go back to waiting for the D2H Register FIS
803 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700804 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
Dan Williams79e2b6b2011-05-11 08:29:56 -0700805 return SCI_SUCCESS;
806 default:
807 dev_err(scic_to_dev(scic),
808 "%s: pio request unexpected event %#x\n",
809 __func__, event_code);
810
811 /* TODO Should we fail the PIO request when we get an
812 * unexpected event?
813 */
814 return SCI_FAILURE;
815 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700816}
817
Dan Williamsf1f52e72011-05-10 02:28:45 -0700818/*
819 * This function copies response data for requests returning response data
820 * instead of sense data.
821 * @sci_req: This parameter specifies the request object for which to copy
822 * the response data.
823 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700824static void scic_sds_io_request_copy_response(struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700825{
826 void *resp_buf;
827 u32 len;
828 struct ssp_response_iu *ssp_response;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700829 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
830
Dan Williams5076a1a2011-06-27 14:57:03 -0700831 ssp_response = &ireq->ssp.rsp;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700832
833 resp_buf = &isci_tmf->resp.resp_iu;
834
835 len = min_t(u32,
836 SSP_RESP_IU_MAX_SIZE,
837 be32_to_cpu(ssp_response->response_data_len));
838
839 memcpy(resp_buf, ssp_response->resp_data, len);
840}
841
Edmund Nadolskie3013702011-06-02 00:10:43 +0000842static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -0700843request_started_state_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +0000844 u32 completion_code)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700845{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700846 struct ssp_response_iu *resp_iu;
Dan Williamsa7e255a2011-05-11 08:27:47 -0700847 u8 datapres;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700848
Dan Williamsa7e255a2011-05-11 08:27:47 -0700849 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
850 * to determine SDMA status
Dan Williamsf1f52e72011-05-10 02:28:45 -0700851 */
852 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
853 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -0700854 scic_sds_request_set_status(ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700855 SCU_TASK_DONE_GOOD,
856 SCI_SUCCESS);
857 break;
Dan Williamsa7e255a2011-05-11 08:27:47 -0700858 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
859 /* There are times when the SCU hardware will return an early
Dan Williamsf1f52e72011-05-10 02:28:45 -0700860 * response because the io request specified more data than is
861 * returned by the target device (mode pages, inquiry data,
862 * etc.). We must check the response stats to see if this is
863 * truly a failed request or a good request that just got
864 * completed early.
865 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700866 struct ssp_response_iu *resp = &ireq->ssp.rsp;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700867 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
868
Dan Williams5076a1a2011-06-27 14:57:03 -0700869 sci_swab32_cpy(&ireq->ssp.rsp,
870 &ireq->ssp.rsp,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700871 word_cnt);
872
873 if (resp->status == 0) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700874 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -0700875 SCU_TASK_DONE_GOOD,
876 SCI_SUCCESS_IO_DONE_EARLY);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700877 } else {
Dan Williams5076a1a2011-06-27 14:57:03 -0700878 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -0700879 SCU_TASK_DONE_CHECK_RESPONSE,
880 SCI_FAILURE_IO_RESPONSE_VALID);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700881 }
Dan Williamsa7e255a2011-05-11 08:27:47 -0700882 break;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700883 }
Dan Williamsa7e255a2011-05-11 08:27:47 -0700884 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
Dan Williamsf1f52e72011-05-10 02:28:45 -0700885 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
886
Dan Williams5076a1a2011-06-27 14:57:03 -0700887 sci_swab32_cpy(&ireq->ssp.rsp,
888 &ireq->ssp.rsp,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700889 word_cnt);
890
Dan Williams5076a1a2011-06-27 14:57:03 -0700891 scic_sds_request_set_status(ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700892 SCU_TASK_DONE_CHECK_RESPONSE,
893 SCI_FAILURE_IO_RESPONSE_VALID);
894 break;
895 }
896
897 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
Dan Williamsa7e255a2011-05-11 08:27:47 -0700898 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
Dan Williamsf1f52e72011-05-10 02:28:45 -0700899 * guaranteed to be received before this completion status is
900 * posted?
901 */
Dan Williams5076a1a2011-06-27 14:57:03 -0700902 resp_iu = &ireq->ssp.rsp;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700903 datapres = resp_iu->datapres;
904
Dan Williamsa7e255a2011-05-11 08:27:47 -0700905 if (datapres == 1 || datapres == 2) {
Dan Williams5076a1a2011-06-27 14:57:03 -0700906 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -0700907 SCU_TASK_DONE_CHECK_RESPONSE,
908 SCI_FAILURE_IO_RESPONSE_VALID);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700909 } else
Dan Williams5076a1a2011-06-27 14:57:03 -0700910 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -0700911 SCU_TASK_DONE_GOOD,
912 SCI_SUCCESS);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700913 break;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700914 /* only stp device gets suspended. */
915 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
916 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
917 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
918 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
919 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
920 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
921 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
922 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
923 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
924 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
925 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
Dan Williams5076a1a2011-06-27 14:57:03 -0700926 if (ireq->protocol == SCIC_STP_PROTOCOL) {
927 scic_sds_request_set_status(ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700928 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
929 SCU_COMPLETION_TL_STATUS_SHIFT,
930 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
931 } else {
Dan Williams5076a1a2011-06-27 14:57:03 -0700932 scic_sds_request_set_status(ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700933 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
934 SCU_COMPLETION_TL_STATUS_SHIFT,
935 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
936 }
937 break;
938
939 /* both stp/ssp device gets suspended */
940 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
941 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
942 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
943 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
944 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
945 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
946 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
947 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
948 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
949 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
Dan Williams5076a1a2011-06-27 14:57:03 -0700950 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -0700951 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
952 SCU_COMPLETION_TL_STATUS_SHIFT,
953 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700954 break;
955
956 /* neither ssp nor stp gets suspended. */
957 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
958 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
959 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
960 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
961 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
962 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
963 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
964 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
965 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
966 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
967 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
968 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
969 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
970 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
971 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
972 default:
973 scic_sds_request_set_status(
Dan Williams5076a1a2011-06-27 14:57:03 -0700974 ireq,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700975 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
976 SCU_COMPLETION_TL_STATUS_SHIFT,
977 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
978 break;
979 }
980
981 /*
982 * TODO: This is probably wrong for ACK/NAK timeout conditions
983 */
984
985 /* In all cases we will treat this as the completion of the IO req. */
Dan Williams5076a1a2011-06-27 14:57:03 -0700986 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700987 return SCI_SUCCESS;
988}
989
Edmund Nadolskie3013702011-06-02 00:10:43 +0000990static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -0700991request_aborting_state_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +0000992 u32 completion_code)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700993{
994 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
995 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
996 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
Dan Williams5076a1a2011-06-27 14:57:03 -0700997 scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
Dan Williamsa7e255a2011-05-11 08:27:47 -0700998 SCI_FAILURE_IO_TERMINATED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700999
Dan Williams5076a1a2011-06-27 14:57:03 -07001000 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001001 break;
1002
1003 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001004 /* Unless we get some strange error wait for the task abort to complete
1005 * TODO: Should there be a state change for this completion?
1006 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07001007 break;
1008 }
1009
1010 return SCI_SUCCESS;
1011}
1012
Dan Williams5076a1a2011-06-27 14:57:03 -07001013static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001014 u32 completion_code)
Dan Williamsf1393032011-05-10 02:28:47 -07001015{
1016 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1017 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -07001018 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
Dan Williamsf1393032011-05-10 02:28:47 -07001019 SCI_SUCCESS);
1020
Dan Williams5076a1a2011-06-27 14:57:03 -07001021 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
Dan Williamsf1393032011-05-10 02:28:47 -07001022 break;
Dan Williamsf1393032011-05-10 02:28:47 -07001023 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001024 /* Currently, the decision is to simply allow the task request
1025 * to timeout if the task IU wasn't received successfully.
1026 * There is a potential for receiving multiple task responses if
1027 * we decide to send the task IU again.
1028 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001029 dev_warn(scic_to_dev(ireq->owning_controller),
Dan Williamsf1393032011-05-10 02:28:47 -07001030 "%s: TaskRequest:0x%p CompletionCode:%x - "
Dan Williams5076a1a2011-06-27 14:57:03 -07001031 "ACK/NAK timeout\n", __func__, ireq,
Dan Williamsf1393032011-05-10 02:28:47 -07001032 completion_code);
1033
Dan Williams5076a1a2011-06-27 14:57:03 -07001034 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
Dan Williamsf1393032011-05-10 02:28:47 -07001035 break;
Dan Williamsf1393032011-05-10 02:28:47 -07001036 default:
Edmund Nadolskie3013702011-06-02 00:10:43 +00001037 /*
1038 * All other completion status cause the IO to be complete.
1039 * If a NAK was received, then it is up to the user to retry
1040 * the request.
Dan Williamsa7e255a2011-05-11 08:27:47 -07001041 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001042 scic_sds_request_set_status(ireq,
Dan Williamsf1393032011-05-10 02:28:47 -07001043 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
Dan Williamsa7e255a2011-05-11 08:27:47 -07001044 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williamsf1393032011-05-10 02:28:47 -07001045
Dan Williams5076a1a2011-06-27 14:57:03 -07001046 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsf1393032011-05-10 02:28:47 -07001047 break;
1048 }
1049
1050 return SCI_SUCCESS;
1051}
1052
Edmund Nadolskie3013702011-06-02 00:10:43 +00001053static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07001054smp_request_await_response_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001055 u32 completion_code)
Dan Williamsc72086e2011-05-10 02:28:48 -07001056{
1057 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1058 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001059 /* In the AWAIT RESPONSE state, any TC completion is
1060 * unexpected. but if the TC has success status, we
1061 * complete the IO anyway.
1062 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001063 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
Dan Williams5dec6f42011-05-10 02:28:49 -07001064 SCI_SUCCESS);
Dan Williamsc72086e2011-05-10 02:28:48 -07001065
Dan Williams5076a1a2011-06-27 14:57:03 -07001066 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001067 break;
1068
1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001073 /* These status has been seen in a specific LSI
1074 * expander, which sometimes is not able to send smp
1075 * response within 2 ms. This causes our hardware break
1076 * the connection and set TC completion with one of
1077 * these SMP_XXX_XX_ERR status. For these type of error,
1078 * we ask scic user to retry the request.
1079 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001080 scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
Dan Williams5dec6f42011-05-10 02:28:49 -07001081 SCI_FAILURE_RETRY_REQUIRED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001082
Dan Williams5076a1a2011-06-27 14:57:03 -07001083 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001084 break;
1085
1086 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001087 /* All other completion status cause the IO to be complete. If a NAK
1088 * was received, then it is up to the user to retry the request
1089 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001090 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001091 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1092 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williamsc72086e2011-05-10 02:28:48 -07001093
Dan Williams5076a1a2011-06-27 14:57:03 -07001094 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001095 break;
1096 }
1097
1098 return SCI_SUCCESS;
1099}
1100
Edmund Nadolskie3013702011-06-02 00:10:43 +00001101static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07001102smp_request_await_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001103 u32 completion_code)
Dan Williamsc72086e2011-05-10 02:28:48 -07001104{
1105 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -07001107 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
Dan Williams5dec6f42011-05-10 02:28:49 -07001108 SCI_SUCCESS);
Dan Williamsc72086e2011-05-10 02:28:48 -07001109
Dan Williams5076a1a2011-06-27 14:57:03 -07001110 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001111 break;
Dan Williamsc72086e2011-05-10 02:28:48 -07001112 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001113 /* All other completion status cause the IO to be
1114 * complete. If a NAK was received, then it is up to
1115 * the user to retry the request.
1116 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001117 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001118 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1119 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williamsc72086e2011-05-10 02:28:48 -07001120
Dan Williams5076a1a2011-06-27 14:57:03 -07001121 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001122 break;
1123 }
1124
1125 return SCI_SUCCESS;
1126}
1127
Dan Williams5076a1a2011-06-27 14:57:03 -07001128void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001129 u16 ncq_tag)
1130{
1131 /**
1132 * @note This could be made to return an error to the user if the user
1133 * attempts to set the NCQ tag in the wrong state.
1134 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001135 ireq->tc->type.stp.ncq_tag = ncq_tag;
Dan Williams5dec6f42011-05-10 02:28:49 -07001136}
1137
Dan Williamsba7cb222011-06-27 11:56:41 -07001138static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
Dan Williams5dec6f42011-05-10 02:28:49 -07001139{
Dan Williams312e0c22011-06-28 13:47:09 -07001140 struct scu_sgl_element *sgl;
1141 struct scu_sgl_element_pair *sgl_pair;
Dan Williams5076a1a2011-06-27 14:57:03 -07001142 struct isci_request *ireq = to_ireq(stp_req);
Dan Williamsba7cb222011-06-27 11:56:41 -07001143 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
Dan Williams5dec6f42011-05-10 02:28:49 -07001144
Dan Williams5076a1a2011-06-27 14:57:03 -07001145 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
Dan Williams312e0c22011-06-28 13:47:09 -07001146 if (!sgl_pair)
1147 sgl = NULL;
Dan Williamsba7cb222011-06-27 11:56:41 -07001148 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
Dan Williams312e0c22011-06-28 13:47:09 -07001149 if (sgl_pair->B.address_lower == 0 &&
1150 sgl_pair->B.address_upper == 0) {
1151 sgl = NULL;
Dan Williams5dec6f42011-05-10 02:28:49 -07001152 } else {
Dan Williamsba7cb222011-06-27 11:56:41 -07001153 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
Dan Williams312e0c22011-06-28 13:47:09 -07001154 sgl = &sgl_pair->B;
Dan Williams5dec6f42011-05-10 02:28:49 -07001155 }
1156 } else {
Dan Williams312e0c22011-06-28 13:47:09 -07001157 if (sgl_pair->next_pair_lower == 0 &&
1158 sgl_pair->next_pair_upper == 0) {
1159 sgl = NULL;
Dan Williams5dec6f42011-05-10 02:28:49 -07001160 } else {
Dan Williamsba7cb222011-06-27 11:56:41 -07001161 pio_sgl->index++;
1162 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
Dan Williams5076a1a2011-06-27 14:57:03 -07001163 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
Dan Williams312e0c22011-06-28 13:47:09 -07001164 sgl = &sgl_pair->A;
Dan Williams5dec6f42011-05-10 02:28:49 -07001165 }
1166 }
1167
Dan Williams312e0c22011-06-28 13:47:09 -07001168 return sgl;
Dan Williams5dec6f42011-05-10 02:28:49 -07001169}
1170
Edmund Nadolskie3013702011-06-02 00:10:43 +00001171static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07001172stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001173 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001174{
1175 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1176 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -07001177 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001178 SCI_SUCCESS);
Dan Williams5dec6f42011-05-10 02:28:49 -07001179
Dan Williams5076a1a2011-06-27 14:57:03 -07001180 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
Dan Williams5dec6f42011-05-10 02:28:49 -07001181 break;
1182
1183 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001184 /* All other completion status cause the IO to be
1185 * complete. If a NAK was received, then it is up to
1186 * the user to retry the request.
1187 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001188 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001189 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1190 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07001191
Dan Williams5076a1a2011-06-27 14:57:03 -07001192 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07001193 break;
1194 }
1195
1196 return SCI_SUCCESS;
1197}
1198
Dan Williams5dec6f42011-05-10 02:28:49 -07001199#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1200
1201/* transmit DATA_FIS from (current sgl + offset) for input
1202 * parameter length. current sgl and offset is alreay stored in the IO request
1203 */
1204static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
Dan Williams5076a1a2011-06-27 14:57:03 -07001205 struct isci_request *ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001206 u32 length)
1207{
Dan Williams5076a1a2011-06-27 14:57:03 -07001208 struct isci_stp_request *stp_req = &ireq->stp.req;
1209 struct scu_task_context *task_context = ireq->tc;
Dan Williams312e0c22011-06-28 13:47:09 -07001210 struct scu_sgl_element_pair *sgl_pair;
Dan Williams5dec6f42011-05-10 02:28:49 -07001211 struct scu_sgl_element *current_sgl;
1212
1213 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1214 * for the data from current_sgl+offset for the input length
1215 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001216 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
Dan Williamsba7cb222011-06-27 11:56:41 -07001217 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
Dan Williams312e0c22011-06-28 13:47:09 -07001218 current_sgl = &sgl_pair->A;
Dan Williams5dec6f42011-05-10 02:28:49 -07001219 else
Dan Williams312e0c22011-06-28 13:47:09 -07001220 current_sgl = &sgl_pair->B;
Dan Williams5dec6f42011-05-10 02:28:49 -07001221
1222 /* update the TC */
1223 task_context->command_iu_upper = current_sgl->address_upper;
1224 task_context->command_iu_lower = current_sgl->address_lower;
1225 task_context->transfer_length_bytes = length;
1226 task_context->type.stp.fis_type = FIS_DATA;
1227
1228 /* send the new TC out. */
Dan Williams5076a1a2011-06-27 14:57:03 -07001229 return scic_controller_continue_io(ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -07001230}
1231
Dan Williams5076a1a2011-06-27 14:57:03 -07001232static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
Dan Williams5dec6f42011-05-10 02:28:49 -07001233{
Dan Williams5076a1a2011-06-27 14:57:03 -07001234 struct isci_stp_request *stp_req = &ireq->stp.req;
Dan Williams312e0c22011-06-28 13:47:09 -07001235 struct scu_sgl_element_pair *sgl_pair;
Dan Williamsba7cb222011-06-27 11:56:41 -07001236 struct scu_sgl_element *sgl;
1237 enum sci_status status;
1238 u32 offset;
1239 u32 len = 0;
Dan Williams5dec6f42011-05-10 02:28:49 -07001240
Dan Williamsba7cb222011-06-27 11:56:41 -07001241 offset = stp_req->sgl.offset;
Dan Williams5076a1a2011-06-27 14:57:03 -07001242 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
Dan Williams312e0c22011-06-28 13:47:09 -07001243 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1244 return SCI_FAILURE;
Dan Williams5dec6f42011-05-10 02:28:49 -07001245
Dan Williamsba7cb222011-06-27 11:56:41 -07001246 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1247 sgl = &sgl_pair->A;
1248 len = sgl_pair->A.length - offset;
Dan Williams5dec6f42011-05-10 02:28:49 -07001249 } else {
Dan Williamsba7cb222011-06-27 11:56:41 -07001250 sgl = &sgl_pair->B;
1251 len = sgl_pair->B.length - offset;
Dan Williams5dec6f42011-05-10 02:28:49 -07001252 }
1253
Dan Williamsba7cb222011-06-27 11:56:41 -07001254 if (stp_req->pio_len == 0)
1255 return SCI_SUCCESS;
Dan Williams5dec6f42011-05-10 02:28:49 -07001256
Dan Williamsba7cb222011-06-27 11:56:41 -07001257 if (stp_req->pio_len >= len) {
Dan Williams5076a1a2011-06-27 14:57:03 -07001258 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
Dan Williamsba7cb222011-06-27 11:56:41 -07001259 if (status != SCI_SUCCESS)
1260 return status;
1261 stp_req->pio_len -= len;
Dan Williams5dec6f42011-05-10 02:28:49 -07001262
Dan Williamsba7cb222011-06-27 11:56:41 -07001263 /* update the current sgl, offset and save for future */
1264 sgl = pio_sgl_next(stp_req);
1265 offset = 0;
1266 } else if (stp_req->pio_len < len) {
Dan Williams5076a1a2011-06-27 14:57:03 -07001267 scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
Dan Williamsba7cb222011-06-27 11:56:41 -07001268
1269 /* Sgl offset will be adjusted and saved for future */
1270 offset += stp_req->pio_len;
1271 sgl->address_lower += stp_req->pio_len;
1272 stp_req->pio_len = 0;
Dan Williams5dec6f42011-05-10 02:28:49 -07001273 }
1274
Dan Williamsba7cb222011-06-27 11:56:41 -07001275 stp_req->sgl.offset = offset;
Dan Williams5dec6f42011-05-10 02:28:49 -07001276
1277 return status;
1278}
1279
1280/**
1281 *
1282 * @stp_request: The request that is used for the SGL processing.
1283 * @data_buffer: The buffer of data to be copied.
1284 * @length: The length of the data transfer.
1285 *
1286 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1287 * specified data region. enum sci_status
1288 */
1289static enum sci_status
Dan Williamsba7cb222011-06-27 11:56:41 -07001290scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
Dan Williams5dec6f42011-05-10 02:28:49 -07001291 u8 *data_buf, u32 len)
1292{
Dan Williams5dec6f42011-05-10 02:28:49 -07001293 struct isci_request *ireq;
1294 u8 *src_addr;
1295 int copy_len;
1296 struct sas_task *task;
1297 struct scatterlist *sg;
1298 void *kaddr;
1299 int total_len = len;
1300
Dan Williams5076a1a2011-06-27 14:57:03 -07001301 ireq = to_ireq(stp_req);
Dan Williams5dec6f42011-05-10 02:28:49 -07001302 task = isci_request_access_task(ireq);
1303 src_addr = data_buf;
1304
1305 if (task->num_scatter > 0) {
1306 sg = task->scatter;
1307
1308 while (total_len > 0) {
1309 struct page *page = sg_page(sg);
1310
1311 copy_len = min_t(int, total_len, sg_dma_len(sg));
1312 kaddr = kmap_atomic(page, KM_IRQ0);
1313 memcpy(kaddr + sg->offset, src_addr, copy_len);
1314 kunmap_atomic(kaddr, KM_IRQ0);
1315 total_len -= copy_len;
1316 src_addr += copy_len;
1317 sg = sg_next(sg);
1318 }
1319 } else {
1320 BUG_ON(task->total_xfer_len < total_len);
1321 memcpy(task->scatter, src_addr, total_len);
1322 }
1323
1324 return SCI_SUCCESS;
1325}
1326
1327/**
1328 *
1329 * @sci_req: The PIO DATA IN request that is to receive the data.
1330 * @data_buffer: The buffer to copy from.
1331 *
1332 * Copy the data buffer to the io request data region. enum sci_status
1333 */
1334static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
Dan Williamsba7cb222011-06-27 11:56:41 -07001335 struct isci_stp_request *stp_req,
Dan Williams5dec6f42011-05-10 02:28:49 -07001336 u8 *data_buffer)
1337{
1338 enum sci_status status;
1339
1340 /*
1341 * If there is less than 1K remaining in the transfer request
1342 * copy just the data for the transfer */
Dan Williamsba7cb222011-06-27 11:56:41 -07001343 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
Dan Williams5dec6f42011-05-10 02:28:49 -07001344 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
Dan Williamsba7cb222011-06-27 11:56:41 -07001345 stp_req, data_buffer, stp_req->pio_len);
Dan Williams5dec6f42011-05-10 02:28:49 -07001346
1347 if (status == SCI_SUCCESS)
Dan Williamsba7cb222011-06-27 11:56:41 -07001348 stp_req->pio_len = 0;
Dan Williams5dec6f42011-05-10 02:28:49 -07001349 } else {
1350 /* We are transfering the whole frame so copy */
1351 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
Dan Williamsba7cb222011-06-27 11:56:41 -07001352 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
Dan Williams5dec6f42011-05-10 02:28:49 -07001353
1354 if (status == SCI_SUCCESS)
Dan Williamsba7cb222011-06-27 11:56:41 -07001355 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
Dan Williams5dec6f42011-05-10 02:28:49 -07001356 }
1357
1358 return status;
1359}
1360
Edmund Nadolskie3013702011-06-02 00:10:43 +00001361static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07001362stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001363 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001364{
1365 enum sci_status status = SCI_SUCCESS;
1366
1367 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1368 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -07001369 scic_sds_request_set_status(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001370 SCU_TASK_DONE_GOOD,
1371 SCI_SUCCESS);
Dan Williams5dec6f42011-05-10 02:28:49 -07001372
Dan Williams5076a1a2011-06-27 14:57:03 -07001373 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
Dan Williams5dec6f42011-05-10 02:28:49 -07001374 break;
1375
1376 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001377 /* All other completion status cause the IO to be
1378 * complete. If a NAK was received, then it is up to
1379 * the user to retry the request.
1380 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001381 scic_sds_request_set_status(ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001382 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1383 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07001384
Dan Williams5076a1a2011-06-27 14:57:03 -07001385 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07001386 break;
1387 }
1388
1389 return status;
1390}
1391
Edmund Nadolskie3013702011-06-02 00:10:43 +00001392static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07001393pio_data_out_tx_done_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001394 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001395{
1396 enum sci_status status = SCI_SUCCESS;
1397 bool all_frames_transferred = false;
Dan Williams5076a1a2011-06-27 14:57:03 -07001398 struct isci_stp_request *stp_req = &ireq->stp.req;
Dan Williams5dec6f42011-05-10 02:28:49 -07001399
1400 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1401 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1402 /* Transmit data */
Dan Williamsba7cb222011-06-27 11:56:41 -07001403 if (stp_req->pio_len != 0) {
Dan Williams5076a1a2011-06-27 14:57:03 -07001404 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -07001405 if (status == SCI_SUCCESS) {
Dan Williamsba7cb222011-06-27 11:56:41 -07001406 if (stp_req->pio_len == 0)
Dan Williams5dec6f42011-05-10 02:28:49 -07001407 all_frames_transferred = true;
1408 }
Dan Williamsba7cb222011-06-27 11:56:41 -07001409 } else if (stp_req->pio_len == 0) {
Dan Williams5dec6f42011-05-10 02:28:49 -07001410 /*
1411 * this will happen if the all data is written at the
1412 * first time after the pio setup fis is received
1413 */
1414 all_frames_transferred = true;
1415 }
1416
1417 /* all data transferred. */
1418 if (all_frames_transferred) {
1419 /*
Edmund Nadolskie3013702011-06-02 00:10:43 +00001420 * Change the state to SCI_REQ_STP_PIO_DATA_IN
Dan Williams5dec6f42011-05-10 02:28:49 -07001421 * and wait for PIO_SETUP fis / or D2H REg fis. */
Dan Williams5076a1a2011-06-27 14:57:03 -07001422 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
Dan Williams5dec6f42011-05-10 02:28:49 -07001423 }
1424 break;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001425
Dan Williams5dec6f42011-05-10 02:28:49 -07001426 default:
1427 /*
Edmund Nadolskie3013702011-06-02 00:10:43 +00001428 * All other completion status cause the IO to be complete.
1429 * If a NAK was received, then it is up to the user to retry
1430 * the request.
1431 */
Dan Williams5dec6f42011-05-10 02:28:49 -07001432 scic_sds_request_set_status(
Dan Williams5076a1a2011-06-27 14:57:03 -07001433 ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001434 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
Edmund Nadolskie3013702011-06-02 00:10:43 +00001435 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07001436
Dan Williams5076a1a2011-06-27 14:57:03 -07001437 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07001438 break;
1439 }
1440
1441 return status;
1442}
1443
Dan Williams5dec6f42011-05-10 02:28:49 -07001444static void scic_sds_stp_request_udma_complete_request(
Dan Williams5076a1a2011-06-27 14:57:03 -07001445 struct isci_request *ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001446 u32 scu_status,
1447 enum sci_status sci_status)
1448{
Dan Williams5076a1a2011-06-27 14:57:03 -07001449 scic_sds_request_set_status(ireq, scu_status, sci_status);
1450 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07001451}
1452
Dan Williams5076a1a2011-06-27 14:57:03 -07001453static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001454 u32 frame_index)
1455{
Dan Williams5076a1a2011-06-27 14:57:03 -07001456 struct scic_sds_controller *scic = ireq->owning_controller;
Dan Williams5dec6f42011-05-10 02:28:49 -07001457 struct dev_to_host_fis *frame_header;
1458 enum sci_status status;
1459 u32 *frame_buffer;
1460
1461 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1462 frame_index,
1463 (void **)&frame_header);
1464
1465 if ((status == SCI_SUCCESS) &&
1466 (frame_header->fis_type == FIS_REGD2H)) {
1467 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1468 frame_index,
1469 (void **)&frame_buffer);
1470
Dan Williams5076a1a2011-06-27 14:57:03 -07001471 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
Dan Williams5dec6f42011-05-10 02:28:49 -07001472 frame_header,
1473 frame_buffer);
1474 }
1475
1476 scic_sds_controller_release_frame(scic, frame_index);
1477
1478 return status;
1479}
1480
Edmund Nadolskie3013702011-06-02 00:10:43 +00001481enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07001482scic_sds_io_request_frame_handler(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001483 u32 frame_index)
Dan Williamsd1c637c32011-05-11 08:27:47 -07001484{
Dan Williams5076a1a2011-06-27 14:57:03 -07001485 struct scic_sds_controller *scic = ireq->owning_controller;
1486 struct isci_stp_request *stp_req = &ireq->stp.req;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001487 enum sci_base_request_states state;
1488 enum sci_status status;
1489 ssize_t word_cnt;
1490
Dan Williams5076a1a2011-06-27 14:57:03 -07001491 state = ireq->sm.current_state_id;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001492 switch (state) {
Edmund Nadolskie3013702011-06-02 00:10:43 +00001493 case SCI_REQ_STARTED: {
Dan Williamsd1c637c32011-05-11 08:27:47 -07001494 struct ssp_frame_hdr ssp_hdr;
1495 void *frame_header;
1496
1497 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1498 frame_index,
1499 &frame_header);
1500
1501 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1502 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1503
1504 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1505 struct ssp_response_iu *resp_iu;
1506 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1507
1508 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1509 frame_index,
1510 (void **)&resp_iu);
1511
Dan Williams5076a1a2011-06-27 14:57:03 -07001512 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001513
Dan Williams5076a1a2011-06-27 14:57:03 -07001514 resp_iu = &ireq->ssp.rsp;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001515
1516 if (resp_iu->datapres == 0x01 ||
1517 resp_iu->datapres == 0x02) {
Dan Williams5076a1a2011-06-27 14:57:03 -07001518 scic_sds_request_set_status(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001519 SCU_TASK_DONE_CHECK_RESPONSE,
1520 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1521 } else
Dan Williams5076a1a2011-06-27 14:57:03 -07001522 scic_sds_request_set_status(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001523 SCU_TASK_DONE_GOOD,
1524 SCI_SUCCESS);
1525 } else {
1526 /* not a response frame, why did it get forwarded? */
1527 dev_err(scic_to_dev(scic),
1528 "%s: SCIC IO Request 0x%p received unexpected "
Dan Williams5076a1a2011-06-27 14:57:03 -07001529 "frame %d type 0x%02x\n", __func__, ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001530 frame_index, ssp_hdr.frame_type);
1531 }
1532
1533 /*
Edmund Nadolskie3013702011-06-02 00:10:43 +00001534 * In any case we are done with this frame buffer return it to
1535 * the controller
Dan Williamsd1c637c32011-05-11 08:27:47 -07001536 */
1537 scic_sds_controller_release_frame(scic, frame_index);
1538
1539 return SCI_SUCCESS;
1540 }
Edmund Nadolskie3013702011-06-02 00:10:43 +00001541
1542 case SCI_REQ_TASK_WAIT_TC_RESP:
Dan Williams5076a1a2011-06-27 14:57:03 -07001543 scic_sds_io_request_copy_response(ireq);
1544 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001545 scic_sds_controller_release_frame(scic,frame_index);
1546 return SCI_SUCCESS;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001547
1548 case SCI_REQ_SMP_WAIT_RESP: {
Dan Williams5076a1a2011-06-27 14:57:03 -07001549 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001550 void *frame_header;
1551
1552 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1553 frame_index,
1554 &frame_header);
1555
1556 /* byte swap the header. */
1557 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1558 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1559
1560 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1561 void *smp_resp;
1562
1563 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1564 frame_index,
1565 &smp_resp);
1566
Dan Williams5edc3342011-06-16 17:20:35 -07001567 word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
Dan Williamsd1c637c32011-05-11 08:27:47 -07001568 sizeof(u32);
1569
1570 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1571 smp_resp, word_cnt);
1572
Dan Williams5076a1a2011-06-27 14:57:03 -07001573 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001574 SCI_SUCCESS);
1575
Dan Williams5076a1a2011-06-27 14:57:03 -07001576 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001577 } else {
Edmund Nadolskie3013702011-06-02 00:10:43 +00001578 /*
1579 * This was not a response frame why did it get
1580 * forwarded?
1581 */
Dan Williamsd1c637c32011-05-11 08:27:47 -07001582 dev_err(scic_to_dev(scic),
Edmund Nadolskie3013702011-06-02 00:10:43 +00001583 "%s: SCIC SMP Request 0x%p received unexpected "
1584 "frame %d type 0x%02x\n",
1585 __func__,
Dan Williams5076a1a2011-06-27 14:57:03 -07001586 ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001587 frame_index,
1588 rsp_hdr->frame_type);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001589
Dan Williams5076a1a2011-06-27 14:57:03 -07001590 scic_sds_request_set_status(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001591 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1592 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1593
Dan Williams5076a1a2011-06-27 14:57:03 -07001594 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001595 }
1596
1597 scic_sds_controller_release_frame(scic, frame_index);
1598
1599 return SCI_SUCCESS;
1600 }
Edmund Nadolskie3013702011-06-02 00:10:43 +00001601
1602 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
Dan Williams5076a1a2011-06-27 14:57:03 -07001603 return scic_sds_stp_request_udma_general_frame_handler(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001604 frame_index);
1605
1606 case SCI_REQ_STP_UDMA_WAIT_D2H:
Dan Williamsd1c637c32011-05-11 08:27:47 -07001607 /* Use the general frame handler to copy the resposne data */
Dan Williams5076a1a2011-06-27 14:57:03 -07001608 status = scic_sds_stp_request_udma_general_frame_handler(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001609 frame_index);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001610
1611 if (status != SCI_SUCCESS)
1612 return status;
1613
Dan Williams5076a1a2011-06-27 14:57:03 -07001614 scic_sds_stp_request_udma_complete_request(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001615 SCU_TASK_DONE_CHECK_RESPONSE,
1616 SCI_FAILURE_IO_RESPONSE_VALID);
Edmund Nadolskie3013702011-06-02 00:10:43 +00001617
Dan Williamsd1c637c32011-05-11 08:27:47 -07001618 return SCI_SUCCESS;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001619
1620 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
Dan Williamsd1c637c32011-05-11 08:27:47 -07001621 struct dev_to_host_fis *frame_header;
1622 u32 *frame_buffer;
1623
1624 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1625 frame_index,
1626 (void **)&frame_header);
1627
1628 if (status != SCI_SUCCESS) {
1629 dev_err(scic_to_dev(scic),
Edmund Nadolskie3013702011-06-02 00:10:43 +00001630 "%s: SCIC IO Request 0x%p could not get frame "
1631 "header for frame index %d, status %x\n",
1632 __func__,
1633 stp_req,
1634 frame_index,
1635 status);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001636
1637 return status;
1638 }
1639
1640 switch (frame_header->fis_type) {
1641 case FIS_REGD2H:
1642 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1643 frame_index,
1644 (void **)&frame_buffer);
1645
Dan Williams5076a1a2011-06-27 14:57:03 -07001646 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001647 frame_header,
1648 frame_buffer);
1649
1650 /* The command has completed with error */
Dan Williams5076a1a2011-06-27 14:57:03 -07001651 scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001652 SCI_FAILURE_IO_RESPONSE_VALID);
1653 break;
1654
1655 default:
1656 dev_warn(scic_to_dev(scic),
1657 "%s: IO Request:0x%p Frame Id:%d protocol "
1658 "violation occurred\n", __func__, stp_req,
1659 frame_index);
1660
Dan Williams5076a1a2011-06-27 14:57:03 -07001661 scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001662 SCI_FAILURE_PROTOCOL_VIOLATION);
1663 break;
1664 }
1665
Dan Williams5076a1a2011-06-27 14:57:03 -07001666 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001667
1668 /* Frame has been decoded return it to the controller */
1669 scic_sds_controller_release_frame(scic, frame_index);
1670
1671 return status;
1672 }
Edmund Nadolskie3013702011-06-02 00:10:43 +00001673
1674 case SCI_REQ_STP_PIO_WAIT_FRAME: {
Dan Williamsd1c637c32011-05-11 08:27:47 -07001675 struct sas_task *task = isci_request_access_task(ireq);
1676 struct dev_to_host_fis *frame_header;
1677 u32 *frame_buffer;
1678
1679 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1680 frame_index,
1681 (void **)&frame_header);
1682
1683 if (status != SCI_SUCCESS) {
1684 dev_err(scic_to_dev(scic),
Edmund Nadolskie3013702011-06-02 00:10:43 +00001685 "%s: SCIC IO Request 0x%p could not get frame "
1686 "header for frame index %d, status %x\n",
Dan Williamsd1c637c32011-05-11 08:27:47 -07001687 __func__, stp_req, frame_index, status);
1688 return status;
1689 }
1690
1691 switch (frame_header->fis_type) {
1692 case FIS_PIO_SETUP:
1693 /* Get from the frame buffer the PIO Setup Data */
1694 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1695 frame_index,
1696 (void **)&frame_buffer);
1697
Edmund Nadolskie3013702011-06-02 00:10:43 +00001698 /* Get the data from the PIO Setup The SCU Hardware
1699 * returns first word in the frame_header and the rest
1700 * of the data is in the frame buffer so we need to
1701 * back up one dword
Dan Williamsd1c637c32011-05-11 08:27:47 -07001702 */
1703
1704 /* transfer_count: first 16bits in the 4th dword */
Dan Williamsba7cb222011-06-27 11:56:41 -07001705 stp_req->pio_len = frame_buffer[3] & 0xffff;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001706
Dan Williamsba7cb222011-06-27 11:56:41 -07001707 /* status: 4th byte in the 3rd dword */
1708 stp_req->status = (frame_buffer[2] >> 24) & 0xff;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001709
Dan Williams5076a1a2011-06-27 14:57:03 -07001710 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001711 frame_header,
1712 frame_buffer);
1713
Dan Williams5076a1a2011-06-27 14:57:03 -07001714 ireq->stp.rsp.status = stp_req->status;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001715
1716 /* The next state is dependent on whether the
1717 * request was PIO Data-in or Data out
1718 */
1719 if (task->data_dir == DMA_FROM_DEVICE) {
Dan Williams5076a1a2011-06-27 14:57:03 -07001720 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001721 } else if (task->data_dir == DMA_TO_DEVICE) {
1722 /* Transmit data */
Dan Williams5076a1a2011-06-27 14:57:03 -07001723 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001724 if (status != SCI_SUCCESS)
1725 break;
Dan Williams5076a1a2011-06-27 14:57:03 -07001726 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001727 }
1728 break;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001729
Dan Williamsd1c637c32011-05-11 08:27:47 -07001730 case FIS_SETDEVBITS:
Dan Williams5076a1a2011-06-27 14:57:03 -07001731 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001732 break;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001733
Dan Williamsd1c637c32011-05-11 08:27:47 -07001734 case FIS_REGD2H:
1735 if (frame_header->status & ATA_BUSY) {
Edmund Nadolskie3013702011-06-02 00:10:43 +00001736 /*
1737 * Now why is the drive sending a D2H Register
1738 * FIS when it is still busy? Do nothing since
1739 * we are still in the right state.
Dan Williamsd1c637c32011-05-11 08:27:47 -07001740 */
1741 dev_dbg(scic_to_dev(scic),
1742 "%s: SCIC PIO Request 0x%p received "
1743 "D2H Register FIS with BSY status "
Edmund Nadolskie3013702011-06-02 00:10:43 +00001744 "0x%x\n",
1745 __func__,
1746 stp_req,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001747 frame_header->status);
1748 break;
1749 }
1750
1751 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1752 frame_index,
1753 (void **)&frame_buffer);
1754
Dan Williams5076a1a2011-06-27 14:57:03 -07001755 scic_sds_controller_copy_sata_response(&ireq->stp.req,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001756 frame_header,
1757 frame_buffer);
1758
Dan Williams5076a1a2011-06-27 14:57:03 -07001759 scic_sds_request_set_status(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001760 SCU_TASK_DONE_CHECK_RESPONSE,
1761 SCI_FAILURE_IO_RESPONSE_VALID);
1762
Dan Williams5076a1a2011-06-27 14:57:03 -07001763 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001764 break;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001765
Dan Williamsd1c637c32011-05-11 08:27:47 -07001766 default:
1767 /* FIXME: what do we do here? */
1768 break;
1769 }
1770
1771 /* Frame is decoded return it to the controller */
1772 scic_sds_controller_release_frame(scic, frame_index);
1773
1774 return status;
1775 }
Edmund Nadolskie3013702011-06-02 00:10:43 +00001776
1777 case SCI_REQ_STP_PIO_DATA_IN: {
Dan Williamsd1c637c32011-05-11 08:27:47 -07001778 struct dev_to_host_fis *frame_header;
1779 struct sata_fis_data *frame_buffer;
1780
1781 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1782 frame_index,
1783 (void **)&frame_header);
1784
1785 if (status != SCI_SUCCESS) {
1786 dev_err(scic_to_dev(scic),
Edmund Nadolskie3013702011-06-02 00:10:43 +00001787 "%s: SCIC IO Request 0x%p could not get frame "
1788 "header for frame index %d, status %x\n",
1789 __func__,
1790 stp_req,
1791 frame_index,
1792 status);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001793 return status;
1794 }
1795
1796 if (frame_header->fis_type != FIS_DATA) {
1797 dev_err(scic_to_dev(scic),
1798 "%s: SCIC PIO Request 0x%p received frame %d "
1799 "with fis type 0x%02x when expecting a data "
Edmund Nadolskie3013702011-06-02 00:10:43 +00001800 "fis.\n",
1801 __func__,
1802 stp_req,
1803 frame_index,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001804 frame_header->fis_type);
1805
Dan Williams5076a1a2011-06-27 14:57:03 -07001806 scic_sds_request_set_status(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001807 SCU_TASK_DONE_GOOD,
1808 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1809
Dan Williams5076a1a2011-06-27 14:57:03 -07001810 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001811
1812 /* Frame is decoded return it to the controller */
1813 scic_sds_controller_release_frame(scic, frame_index);
1814 return status;
1815 }
1816
Dan Williamsba7cb222011-06-27 11:56:41 -07001817 if (stp_req->sgl.index < 0) {
Dan Williams5076a1a2011-06-27 14:57:03 -07001818 ireq->saved_rx_frame_index = frame_index;
Dan Williamsba7cb222011-06-27 11:56:41 -07001819 stp_req->pio_len = 0;
Dan Williamsd1c637c32011-05-11 08:27:47 -07001820 } else {
1821 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1822 frame_index,
1823 (void **)&frame_buffer);
1824
1825 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1826 (u8 *)frame_buffer);
1827
1828 /* Frame is decoded return it to the controller */
1829 scic_sds_controller_release_frame(scic, frame_index);
1830 }
1831
1832 /* Check for the end of the transfer, are there more
1833 * bytes remaining for this data transfer
1834 */
Dan Williamsba7cb222011-06-27 11:56:41 -07001835 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
Dan Williamsd1c637c32011-05-11 08:27:47 -07001836 return status;
1837
Dan Williamsba7cb222011-06-27 11:56:41 -07001838 if ((stp_req->status & ATA_BUSY) == 0) {
Dan Williams5076a1a2011-06-27 14:57:03 -07001839 scic_sds_request_set_status(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001840 SCU_TASK_DONE_CHECK_RESPONSE,
1841 SCI_FAILURE_IO_RESPONSE_VALID);
1842
Dan Williams5076a1a2011-06-27 14:57:03 -07001843 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001844 } else {
Dan Williams5076a1a2011-06-27 14:57:03 -07001845 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001846 }
1847 return status;
1848 }
Edmund Nadolskie3013702011-06-02 00:10:43 +00001849
1850 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
Dan Williamsd1c637c32011-05-11 08:27:47 -07001851 struct dev_to_host_fis *frame_header;
1852 u32 *frame_buffer;
1853
1854 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1855 frame_index,
1856 (void **)&frame_header);
1857 if (status != SCI_SUCCESS) {
1858 dev_err(scic_to_dev(scic),
Edmund Nadolskie3013702011-06-02 00:10:43 +00001859 "%s: SCIC IO Request 0x%p could not get frame "
1860 "header for frame index %d, status %x\n",
1861 __func__,
1862 stp_req,
1863 frame_index,
1864 status);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001865 return status;
1866 }
1867
1868 switch (frame_header->fis_type) {
1869 case FIS_REGD2H:
1870 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1871 frame_index,
1872 (void **)&frame_buffer);
1873
Dan Williams5076a1a2011-06-27 14:57:03 -07001874 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001875 frame_header,
1876 frame_buffer);
1877
1878 /* The command has completed with error */
Dan Williams5076a1a2011-06-27 14:57:03 -07001879 scic_sds_request_set_status(ireq,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001880 SCU_TASK_DONE_CHECK_RESPONSE,
1881 SCI_FAILURE_IO_RESPONSE_VALID);
1882 break;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001883
Dan Williamsd1c637c32011-05-11 08:27:47 -07001884 default:
1885 dev_warn(scic_to_dev(scic),
1886 "%s: IO Request:0x%p Frame Id:%d protocol "
Edmund Nadolskie3013702011-06-02 00:10:43 +00001887 "violation occurred\n",
1888 __func__,
1889 stp_req,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001890 frame_index);
1891
Dan Williams5076a1a2011-06-27 14:57:03 -07001892 scic_sds_request_set_status(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001893 SCU_TASK_DONE_UNEXP_FIS,
Dan Williamsd1c637c32011-05-11 08:27:47 -07001894 SCI_FAILURE_PROTOCOL_VIOLATION);
1895 break;
1896 }
1897
Dan Williams5076a1a2011-06-27 14:57:03 -07001898 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001899
1900 /* Frame has been decoded return it to the controller */
1901 scic_sds_controller_release_frame(scic, frame_index);
1902
1903 return status;
1904 }
Edmund Nadolskie3013702011-06-02 00:10:43 +00001905 case SCI_REQ_ABORTING:
1906 /*
1907 * TODO: Is it even possible to get an unsolicited frame in the
Dan Williamsd1c637c32011-05-11 08:27:47 -07001908 * aborting state?
1909 */
1910 scic_sds_controller_release_frame(scic, frame_index);
1911 return SCI_SUCCESS;
Edmund Nadolskie3013702011-06-02 00:10:43 +00001912
Dan Williamsd1c637c32011-05-11 08:27:47 -07001913 default:
1914 dev_warn(scic_to_dev(scic),
Edmund Nadolskie3013702011-06-02 00:10:43 +00001915 "%s: SCIC IO Request given unexpected frame %x while "
1916 "in state %d\n",
1917 __func__,
1918 frame_index,
1919 state);
Dan Williamsd1c637c32011-05-11 08:27:47 -07001920
1921 scic_sds_controller_release_frame(scic, frame_index);
1922 return SCI_FAILURE_INVALID_STATE;
1923 }
1924}
1925
Dan Williams5076a1a2011-06-27 14:57:03 -07001926static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001927 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001928{
1929 enum sci_status status = SCI_SUCCESS;
1930
1931 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -07001933 scic_sds_stp_request_udma_complete_request(ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001934 SCU_TASK_DONE_GOOD,
1935 SCI_SUCCESS);
1936 break;
1937 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001939 /* We must check ther response buffer to see if the D2H
1940 * Register FIS was received before we got the TC
1941 * completion.
1942 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001943 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1944 scic_sds_remote_device_suspend(ireq->target_device,
Dan Williams5dec6f42011-05-10 02:28:49 -07001945 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1946
Dan Williams5076a1a2011-06-27 14:57:03 -07001947 scic_sds_stp_request_udma_complete_request(ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001948 SCU_TASK_DONE_CHECK_RESPONSE,
1949 SCI_FAILURE_IO_RESPONSE_VALID);
1950 } else {
Dan Williamsa7e255a2011-05-11 08:27:47 -07001951 /* If we have an error completion status for the
1952 * TC then we can expect a D2H register FIS from
1953 * the device so we must change state to wait
1954 * for it
1955 */
Dan Williams5076a1a2011-06-27 14:57:03 -07001956 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
Dan Williams5dec6f42011-05-10 02:28:49 -07001957 }
1958 break;
1959
Dan Williamsa7e255a2011-05-11 08:27:47 -07001960 /* TODO Check to see if any of these completion status need to
1961 * wait for the device to host register fis.
1962 */
1963 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
1964 * - this comes only for B0
1965 */
Dan Williams5dec6f42011-05-10 02:28:49 -07001966 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1967 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1968 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1969 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1970 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
Dan Williams5076a1a2011-06-27 14:57:03 -07001971 scic_sds_remote_device_suspend(ireq->target_device,
Dan Williams5dec6f42011-05-10 02:28:49 -07001972 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1973 /* Fall through to the default case */
1974 default:
1975 /* All other completion status cause the IO to be complete. */
Dan Williams5076a1a2011-06-27 14:57:03 -07001976 scic_sds_stp_request_udma_complete_request(ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07001977 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1978 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1979 break;
1980 }
1981
1982 return status;
1983}
1984
Edmund Nadolskie3013702011-06-02 00:10:43 +00001985static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07001986stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001987 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001988{
1989 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1990 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -07001991 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
Dan Williamsa7e255a2011-05-11 08:27:47 -07001992 SCI_SUCCESS);
Dan Williams5dec6f42011-05-10 02:28:49 -07001993
Dan Williams5076a1a2011-06-27 14:57:03 -07001994 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
Dan Williams5dec6f42011-05-10 02:28:49 -07001995 break;
1996
1997 default:
1998 /*
Edmund Nadolskie3013702011-06-02 00:10:43 +00001999 * All other completion status cause the IO to be complete.
2000 * If a NAK was received, then it is up to the user to retry
2001 * the request.
2002 */
Dan Williams5076a1a2011-06-27 14:57:03 -07002003 scic_sds_request_set_status(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002004 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2005 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07002006
Dan Williams5076a1a2011-06-27 14:57:03 -07002007 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07002008 break;
2009 }
2010
2011 return SCI_SUCCESS;
2012}
2013
Edmund Nadolskie3013702011-06-02 00:10:43 +00002014static enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07002015stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002016 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07002017{
2018 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2019 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5076a1a2011-06-27 14:57:03 -07002020 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
Dan Williams5dec6f42011-05-10 02:28:49 -07002021 SCI_SUCCESS);
2022
Dan Williams5076a1a2011-06-27 14:57:03 -07002023 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
Dan Williams5dec6f42011-05-10 02:28:49 -07002024 break;
2025
2026 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07002027 /* All other completion status cause the IO to be complete. If
2028 * a NAK was received, then it is up to the user to retry the
2029 * request.
2030 */
Dan Williams5076a1a2011-06-27 14:57:03 -07002031 scic_sds_request_set_status(ireq,
Dan Williams5dec6f42011-05-10 02:28:49 -07002032 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
Dan Williamsa7e255a2011-05-11 08:27:47 -07002033 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07002034
Dan Williams5076a1a2011-06-27 14:57:03 -07002035 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07002036 break;
2037 }
2038
2039 return SCI_SUCCESS;
2040}
2041
Dan Williamsa7e255a2011-05-11 08:27:47 -07002042enum sci_status
Dan Williams5076a1a2011-06-27 14:57:03 -07002043scic_sds_io_request_tc_completion(struct isci_request *ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002044 u32 completion_code)
Dan Williamsa7e255a2011-05-11 08:27:47 -07002045{
2046 enum sci_base_request_states state;
Dan Williams5076a1a2011-06-27 14:57:03 -07002047 struct scic_sds_controller *scic = ireq->owning_controller;
Dan Williamsa7e255a2011-05-11 08:27:47 -07002048
Dan Williams5076a1a2011-06-27 14:57:03 -07002049 state = ireq->sm.current_state_id;
Dan Williamsa7e255a2011-05-11 08:27:47 -07002050
2051 switch (state) {
Edmund Nadolskie3013702011-06-02 00:10:43 +00002052 case SCI_REQ_STARTED:
Dan Williams5076a1a2011-06-27 14:57:03 -07002053 return request_started_state_tc_event(ireq, completion_code);
Edmund Nadolskie3013702011-06-02 00:10:43 +00002054
2055 case SCI_REQ_TASK_WAIT_TC_COMP:
Dan Williams5076a1a2011-06-27 14:57:03 -07002056 return ssp_task_request_await_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002057 completion_code);
2058
2059 case SCI_REQ_SMP_WAIT_RESP:
Dan Williams5076a1a2011-06-27 14:57:03 -07002060 return smp_request_await_response_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002061 completion_code);
2062
2063 case SCI_REQ_SMP_WAIT_TC_COMP:
Dan Williams5076a1a2011-06-27 14:57:03 -07002064 return smp_request_await_tc_event(ireq, completion_code);
Edmund Nadolskie3013702011-06-02 00:10:43 +00002065
2066 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
Dan Williams5076a1a2011-06-27 14:57:03 -07002067 return stp_request_udma_await_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002068 completion_code);
2069
2070 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
Dan Williams5076a1a2011-06-27 14:57:03 -07002071 return stp_request_non_data_await_h2d_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002072 completion_code);
2073
2074 case SCI_REQ_STP_PIO_WAIT_H2D:
Dan Williams5076a1a2011-06-27 14:57:03 -07002075 return stp_request_pio_await_h2d_completion_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002076 completion_code);
2077
2078 case SCI_REQ_STP_PIO_DATA_OUT:
Dan Williams5076a1a2011-06-27 14:57:03 -07002079 return pio_data_out_tx_done_tc_event(ireq, completion_code);
Edmund Nadolskie3013702011-06-02 00:10:43 +00002080
2081 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
Dan Williams5076a1a2011-06-27 14:57:03 -07002082 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002083 completion_code);
2084
2085 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
Dan Williams5076a1a2011-06-27 14:57:03 -07002086 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002087 completion_code);
2088
2089 case SCI_REQ_ABORTING:
Dan Williams5076a1a2011-06-27 14:57:03 -07002090 return request_aborting_state_tc_event(ireq,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002091 completion_code);
2092
2093 default:
2094 dev_warn(scic_to_dev(scic),
2095 "%s: SCIC IO Request given task completion "
2096 "notification %x while in wrong state %d\n",
2097 __func__,
2098 completion_code,
2099 state);
2100 return SCI_FAILURE_INVALID_STATE;
Dan Williamsa7e255a2011-05-11 08:27:47 -07002101 }
2102}
2103
Dan Williams6f231dd2011-07-02 22:56:22 -07002104/**
2105 * isci_request_process_response_iu() - This function sets the status and
2106 * response iu, in the task struct, from the request object for the upper
2107 * layer driver.
2108 * @sas_task: This parameter is the task struct from the upper layer driver.
2109 * @resp_iu: This parameter points to the response iu of the completed request.
2110 * @dev: This parameter specifies the linux device struct.
2111 *
2112 * none.
2113 */
2114static void isci_request_process_response_iu(
2115 struct sas_task *task,
2116 struct ssp_response_iu *resp_iu,
2117 struct device *dev)
2118{
2119 dev_dbg(dev,
2120 "%s: resp_iu = %p "
2121 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2122 "resp_iu->response_data_len = %x, "
2123 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2124 __func__,
2125 resp_iu,
2126 resp_iu->status,
2127 resp_iu->datapres,
2128 resp_iu->response_data_len,
2129 resp_iu->sense_data_len);
2130
2131 task->task_status.stat = resp_iu->status;
2132
2133 /* libsas updates the task status fields based on the response iu. */
2134 sas_ssp_task_response(dev, task, resp_iu);
2135}
2136
2137/**
2138 * isci_request_set_open_reject_status() - This function prepares the I/O
2139 * completion for OPEN_REJECT conditions.
2140 * @request: This parameter is the completed isci_request object.
2141 * @response_ptr: This parameter specifies the service response for the I/O.
2142 * @status_ptr: This parameter specifies the exec status for the I/O.
2143 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2144 * the LLDD with respect to completing this request or forcing an abort
2145 * condition on the I/O.
2146 * @open_rej_reason: This parameter specifies the encoded reason for the
2147 * abandon-class reject.
2148 *
2149 * none.
2150 */
2151static void isci_request_set_open_reject_status(
2152 struct isci_request *request,
2153 struct sas_task *task,
2154 enum service_response *response_ptr,
2155 enum exec_status *status_ptr,
2156 enum isci_completion_selection *complete_to_host_ptr,
2157 enum sas_open_rej_reason open_rej_reason)
2158{
2159 /* Task in the target is done. */
Dan Williams38d88792011-06-23 14:33:48 -07002160 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002161 *response_ptr = SAS_TASK_UNDELIVERED;
2162 *status_ptr = SAS_OPEN_REJECT;
2163 *complete_to_host_ptr = isci_perform_normal_io_completion;
2164 task->task_status.open_rej_reason = open_rej_reason;
2165}
2166
2167/**
2168 * isci_request_handle_controller_specific_errors() - This function decodes
2169 * controller-specific I/O completion error conditions.
2170 * @request: This parameter is the completed isci_request object.
2171 * @response_ptr: This parameter specifies the service response for the I/O.
2172 * @status_ptr: This parameter specifies the exec status for the I/O.
2173 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2174 * the LLDD with respect to completing this request or forcing an abort
2175 * condition on the I/O.
2176 *
2177 * none.
2178 */
2179static void isci_request_handle_controller_specific_errors(
Dan Williams209fae12011-06-13 17:39:44 -07002180 struct isci_remote_device *idev,
Dan Williams6f231dd2011-07-02 22:56:22 -07002181 struct isci_request *request,
2182 struct sas_task *task,
2183 enum service_response *response_ptr,
2184 enum exec_status *status_ptr,
2185 enum isci_completion_selection *complete_to_host_ptr)
2186{
2187 unsigned int cstatus;
2188
Dan Williams5076a1a2011-06-27 14:57:03 -07002189 cstatus = request->scu_status;
Dan Williams6f231dd2011-07-02 22:56:22 -07002190
2191 dev_dbg(&request->isci_host->pdev->dev,
2192 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2193 "- controller status = 0x%x\n",
2194 __func__, request, cstatus);
2195
2196 /* Decode the controller-specific errors; most
2197 * important is to recognize those conditions in which
2198 * the target may still have a task outstanding that
2199 * must be aborted.
2200 *
2201 * Note that there are SCU completion codes being
2202 * named in the decode below for which SCIC has already
2203 * done work to handle them in a way other than as
2204 * a controller-specific completion code; these are left
2205 * in the decode below for completeness sake.
2206 */
2207 switch (cstatus) {
2208 case SCU_TASK_DONE_DMASETUP_DIRERR:
2209 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2210 case SCU_TASK_DONE_XFERCNT_ERR:
2211 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2212 if (task->task_proto == SAS_PROTOCOL_SMP) {
2213 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2214 *response_ptr = SAS_TASK_COMPLETE;
2215
2216 /* See if the device has been/is being stopped. Note
2217 * that we ignore the quiesce state, since we are
2218 * concerned about the actual device state.
2219 */
Dan Williams209fae12011-06-13 17:39:44 -07002220 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002221 *status_ptr = SAS_DEVICE_UNKNOWN;
2222 else
2223 *status_ptr = SAS_ABORTED_TASK;
2224
Dan Williams38d88792011-06-23 14:33:48 -07002225 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002226
2227 *complete_to_host_ptr =
2228 isci_perform_normal_io_completion;
2229 } else {
2230 /* Task in the target is not done. */
2231 *response_ptr = SAS_TASK_UNDELIVERED;
2232
Dan Williams209fae12011-06-13 17:39:44 -07002233 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002234 *status_ptr = SAS_DEVICE_UNKNOWN;
2235 else
2236 *status_ptr = SAM_STAT_TASK_ABORTED;
2237
Dan Williams38d88792011-06-23 14:33:48 -07002238 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002239
2240 *complete_to_host_ptr =
2241 isci_perform_error_io_completion;
2242 }
2243
2244 break;
2245
2246 case SCU_TASK_DONE_CRC_ERR:
2247 case SCU_TASK_DONE_NAK_CMD_ERR:
2248 case SCU_TASK_DONE_EXCESS_DATA:
2249 case SCU_TASK_DONE_UNEXP_FIS:
2250 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2251 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2252 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2253 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2254 /* These are conditions in which the target
2255 * has completed the task, so that no cleanup
2256 * is necessary.
2257 */
2258 *response_ptr = SAS_TASK_COMPLETE;
2259
2260 /* See if the device has been/is being stopped. Note
2261 * that we ignore the quiesce state, since we are
2262 * concerned about the actual device state.
2263 */
Dan Williams209fae12011-06-13 17:39:44 -07002264 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002265 *status_ptr = SAS_DEVICE_UNKNOWN;
2266 else
2267 *status_ptr = SAS_ABORTED_TASK;
2268
Dan Williams38d88792011-06-23 14:33:48 -07002269 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002270
2271 *complete_to_host_ptr = isci_perform_normal_io_completion;
2272 break;
2273
2274
2275 /* Note that the only open reject completion codes seen here will be
2276 * abandon-class codes; all others are automatically retried in the SCU.
2277 */
2278 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2279
2280 isci_request_set_open_reject_status(
2281 request, task, response_ptr, status_ptr,
2282 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2283 break;
2284
2285 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2286
2287 /* Note - the return of AB0 will change when
2288 * libsas implements detection of zone violations.
2289 */
2290 isci_request_set_open_reject_status(
2291 request, task, response_ptr, status_ptr,
2292 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2293 break;
2294
2295 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2296
2297 isci_request_set_open_reject_status(
2298 request, task, response_ptr, status_ptr,
2299 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2300 break;
2301
2302 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2303
2304 isci_request_set_open_reject_status(
2305 request, task, response_ptr, status_ptr,
2306 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2307 break;
2308
2309 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2310
2311 isci_request_set_open_reject_status(
2312 request, task, response_ptr, status_ptr,
2313 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2314 break;
2315
2316 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2317
2318 isci_request_set_open_reject_status(
2319 request, task, response_ptr, status_ptr,
2320 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2321 break;
2322
2323 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2324
2325 isci_request_set_open_reject_status(
2326 request, task, response_ptr, status_ptr,
2327 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2328 break;
2329
2330 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2331
2332 isci_request_set_open_reject_status(
2333 request, task, response_ptr, status_ptr,
2334 complete_to_host_ptr, SAS_OREJ_EPROTO);
2335 break;
2336
2337 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2338
2339 isci_request_set_open_reject_status(
2340 request, task, response_ptr, status_ptr,
2341 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2342 break;
2343
2344 case SCU_TASK_DONE_LL_R_ERR:
2345 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2346 case SCU_TASK_DONE_LL_PERR:
2347 case SCU_TASK_DONE_LL_SY_TERM:
2348 /* Also SCU_TASK_DONE_NAK_ERR:*/
2349 case SCU_TASK_DONE_LL_LF_TERM:
2350 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2351 case SCU_TASK_DONE_LL_ABORT_ERR:
2352 case SCU_TASK_DONE_SEQ_INV_TYPE:
2353 /* Also SCU_TASK_DONE_UNEXP_XR: */
2354 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2355 case SCU_TASK_DONE_INV_FIS_LEN:
2356 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2357 case SCU_TASK_DONE_SDMA_ERR:
2358 case SCU_TASK_DONE_OFFSET_ERR:
2359 case SCU_TASK_DONE_MAX_PLD_ERR:
2360 case SCU_TASK_DONE_LF_ERR:
2361 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2362 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2363 case SCU_TASK_DONE_UNEXP_DATA:
2364 case SCU_TASK_DONE_UNEXP_SDBFIS:
2365 case SCU_TASK_DONE_REG_ERR:
2366 case SCU_TASK_DONE_SDB_ERR:
2367 case SCU_TASK_DONE_TASK_ABORT:
2368 default:
2369 /* Task in the target is not done. */
2370 *response_ptr = SAS_TASK_UNDELIVERED;
2371 *status_ptr = SAM_STAT_TASK_ABORTED;
Dan Williams6f231dd2011-07-02 22:56:22 -07002372
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002373 if (task->task_proto == SAS_PROTOCOL_SMP) {
Dan Williams38d88792011-06-23 14:33:48 -07002374 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002375
2376 *complete_to_host_ptr = isci_perform_normal_io_completion;
2377 } else {
Dan Williams38d88792011-06-23 14:33:48 -07002378 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002379
2380 *complete_to_host_ptr = isci_perform_error_io_completion;
2381 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002382 break;
2383 }
2384}
2385
2386/**
2387 * isci_task_save_for_upper_layer_completion() - This function saves the
2388 * request for later completion to the upper layer driver.
2389 * @host: This parameter is a pointer to the host on which the the request
2390 * should be queued (either as an error or success).
2391 * @request: This parameter is the completed request.
2392 * @response: This parameter is the response code for the completed task.
2393 * @status: This parameter is the status code for the completed task.
2394 *
2395 * none.
2396 */
2397static void isci_task_save_for_upper_layer_completion(
2398 struct isci_host *host,
2399 struct isci_request *request,
2400 enum service_response response,
2401 enum exec_status status,
2402 enum isci_completion_selection task_notification_selection)
2403{
2404 struct sas_task *task = isci_request_access_task(request);
2405
Jeff Skirvinec6c9632011-03-04 14:06:44 -08002406 task_notification_selection
2407 = isci_task_set_completion_status(task, response, status,
2408 task_notification_selection);
Dan Williams6f231dd2011-07-02 22:56:22 -07002409
2410 /* Tasks aborted specifically by a call to the lldd_abort_task
2411 * function should not be completed to the host in the regular path.
2412 */
2413 switch (task_notification_selection) {
2414
2415 case isci_perform_normal_io_completion:
2416
2417 /* Normal notification (task_done) */
2418 dev_dbg(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002419 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002420 __func__,
2421 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002422 task->task_status.resp, response,
2423 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002424 /* Add to the completed list. */
2425 list_add(&request->completed_node,
2426 &host->requests_to_complete);
Jeff Skirvinec6c9632011-03-04 14:06:44 -08002427
2428 /* Take the request off the device's pending request list. */
2429 list_del_init(&request->dev_node);
Dan Williams6f231dd2011-07-02 22:56:22 -07002430 break;
2431
2432 case isci_perform_aborted_io_completion:
Jeff Skirvina5fde222011-03-04 14:06:42 -08002433 /* No notification to libsas because this request is
2434 * already in the abort path.
Dan Williams6f231dd2011-07-02 22:56:22 -07002435 */
2436 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002437 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002438 __func__,
2439 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002440 task->task_status.resp, response,
2441 task->task_status.stat, status);
Jeff Skirvina5fde222011-03-04 14:06:42 -08002442
2443 /* Wake up whatever process was waiting for this
2444 * request to complete.
2445 */
2446 WARN_ON(request->io_request_completion == NULL);
2447
2448 if (request->io_request_completion != NULL) {
2449
2450 /* Signal whoever is waiting that this
2451 * request is complete.
2452 */
2453 complete(request->io_request_completion);
2454 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002455 break;
2456
2457 case isci_perform_error_io_completion:
2458 /* Use sas_task_abort */
2459 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002460 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002461 __func__,
2462 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002463 task->task_status.resp, response,
2464 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002465 /* Add to the aborted list. */
2466 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002467 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002468 break;
2469
2470 default:
2471 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002472 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002473 __func__,
2474 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002475 task->task_status.resp, response,
2476 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002477
Jeff Skirvina5fde222011-03-04 14:06:42 -08002478 /* Add to the error to libsas list. */
Dan Williams6f231dd2011-07-02 22:56:22 -07002479 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002480 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002481 break;
2482 }
2483}
2484
Dan Williamsf1f52e72011-05-10 02:28:45 -07002485static void isci_request_io_request_complete(struct isci_host *isci_host,
2486 struct isci_request *request,
2487 enum sci_io_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -07002488{
2489 struct sas_task *task = isci_request_access_task(request);
2490 struct ssp_response_iu *resp_iu;
2491 void *resp_buf;
2492 unsigned long task_flags;
Dan Williams209fae12011-06-13 17:39:44 -07002493 struct isci_remote_device *idev = isci_lookup_device(task->dev);
Dan Williams6f231dd2011-07-02 22:56:22 -07002494 enum service_response response = SAS_TASK_UNDELIVERED;
2495 enum exec_status status = SAS_ABORTED_TASK;
2496 enum isci_request_status request_status;
2497 enum isci_completion_selection complete_to_host
2498 = isci_perform_normal_io_completion;
2499
2500 dev_dbg(&isci_host->pdev->dev,
2501 "%s: request = %p, task = %p,\n"
2502 "task->data_dir = %d completion_status = 0x%x\n",
2503 __func__,
2504 request,
2505 task,
2506 task->data_dir,
2507 completion_status);
2508
Jeff Skirvina5fde222011-03-04 14:06:42 -08002509 spin_lock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002510 request_status = isci_request_get_state(request);
Dan Williams6f231dd2011-07-02 22:56:22 -07002511
2512 /* Decode the request status. Note that if the request has been
2513 * aborted by a task management function, we don't care
2514 * what the status is.
2515 */
2516 switch (request_status) {
2517
2518 case aborted:
2519 /* "aborted" indicates that the request was aborted by a task
2520 * management function, since once a task management request is
2521 * perfomed by the device, the request only completes because
2522 * of the subsequent driver terminate.
2523 *
2524 * Aborted also means an external thread is explicitly managing
2525 * this request, so that we do not complete it up the stack.
2526 *
2527 * The target is still there (since the TMF was successful).
2528 */
Dan Williams38d88792011-06-23 14:33:48 -07002529 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002530 response = SAS_TASK_COMPLETE;
2531
2532 /* See if the device has been/is being stopped. Note
2533 * that we ignore the quiesce state, since we are
2534 * concerned about the actual device state.
2535 */
Dan Williams209fae12011-06-13 17:39:44 -07002536 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002537 status = SAS_DEVICE_UNKNOWN;
2538 else
2539 status = SAS_ABORTED_TASK;
2540
2541 complete_to_host = isci_perform_aborted_io_completion;
2542 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002543
2544 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002545 break;
2546
2547 case aborting:
2548 /* aborting means that the task management function tried and
2549 * failed to abort the request. We need to note the request
2550 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2551 * target as down.
2552 *
2553 * Aborting also means an external thread is explicitly managing
2554 * this request, so that we do not complete it up the stack.
2555 */
Dan Williams38d88792011-06-23 14:33:48 -07002556 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002557 response = SAS_TASK_UNDELIVERED;
2558
Dan Williams209fae12011-06-13 17:39:44 -07002559 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002560 /* The device has been /is being stopped. Note that
2561 * we ignore the quiesce state, since we are
2562 * concerned about the actual device state.
2563 */
2564 status = SAS_DEVICE_UNKNOWN;
2565 else
2566 status = SAS_PHY_DOWN;
2567
2568 complete_to_host = isci_perform_aborted_io_completion;
2569
2570 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002571
2572 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002573 break;
2574
2575 case terminating:
2576
2577 /* This was an terminated request. This happens when
2578 * the I/O is being terminated because of an action on
2579 * the device (reset, tear down, etc.), and the I/O needs
2580 * to be completed up the stack.
2581 */
Dan Williams38d88792011-06-23 14:33:48 -07002582 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002583 response = SAS_TASK_UNDELIVERED;
2584
2585 /* See if the device has been/is being stopped. Note
2586 * that we ignore the quiesce state, since we are
2587 * concerned about the actual device state.
2588 */
Dan Williams209fae12011-06-13 17:39:44 -07002589 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002590 status = SAS_DEVICE_UNKNOWN;
2591 else
2592 status = SAS_ABORTED_TASK;
2593
Jeff Skirvina5fde222011-03-04 14:06:42 -08002594 complete_to_host = isci_perform_aborted_io_completion;
Dan Williams6f231dd2011-07-02 22:56:22 -07002595
2596 /* This was a terminated request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002597
2598 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002599 break;
2600
Jeff Skirvin77c852f2011-06-20 14:09:16 -07002601 case dead:
2602 /* This was a terminated request that timed-out during the
2603 * termination process. There is no task to complete to
2604 * libsas.
2605 */
2606 complete_to_host = isci_perform_normal_io_completion;
2607 spin_unlock(&request->state_lock);
2608 break;
2609
Dan Williams6f231dd2011-07-02 22:56:22 -07002610 default:
2611
Jeff Skirvina5fde222011-03-04 14:06:42 -08002612 /* The request is done from an SCU HW perspective. */
2613 request->status = completed;
2614
2615 spin_unlock(&request->state_lock);
2616
Dan Williams6f231dd2011-07-02 22:56:22 -07002617 /* This is an active request being completed from the core. */
2618 switch (completion_status) {
2619
2620 case SCI_IO_FAILURE_RESPONSE_VALID:
2621 dev_dbg(&isci_host->pdev->dev,
2622 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2623 __func__,
2624 request,
2625 task);
2626
2627 if (sas_protocol_ata(task->task_proto)) {
Dan Williams5076a1a2011-06-27 14:57:03 -07002628 resp_buf = &request->stp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002629 isci_request_process_stp_response(task,
Dan Williamsb7645812011-05-08 02:35:32 -07002630 resp_buf);
Dan Williams6f231dd2011-07-02 22:56:22 -07002631 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2632
2633 /* crack the iu response buffer. */
Dan Williams5076a1a2011-06-27 14:57:03 -07002634 resp_iu = &request->ssp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002635 isci_request_process_response_iu(task, resp_iu,
Dan Williamsb7645812011-05-08 02:35:32 -07002636 &isci_host->pdev->dev);
Dan Williams6f231dd2011-07-02 22:56:22 -07002637
2638 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2639
2640 dev_err(&isci_host->pdev->dev,
2641 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2642 "SAS_PROTOCOL_SMP protocol\n",
2643 __func__);
2644
2645 } else
2646 dev_err(&isci_host->pdev->dev,
2647 "%s: unknown protocol\n", __func__);
2648
2649 /* use the task status set in the task struct by the
2650 * isci_request_process_response_iu call.
2651 */
Dan Williams38d88792011-06-23 14:33:48 -07002652 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002653 response = task->task_status.resp;
2654 status = task->task_status.stat;
2655 break;
2656
2657 case SCI_IO_SUCCESS:
2658 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2659
2660 response = SAS_TASK_COMPLETE;
2661 status = SAM_STAT_GOOD;
Dan Williams38d88792011-06-23 14:33:48 -07002662 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002663
2664 if (task->task_proto == SAS_PROTOCOL_SMP) {
Dan Williams5076a1a2011-06-27 14:57:03 -07002665 void *rsp = &request->smp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002666
2667 dev_dbg(&isci_host->pdev->dev,
2668 "%s: SMP protocol completion\n",
2669 __func__);
2670
2671 sg_copy_from_buffer(
2672 &task->smp_task.smp_resp, 1,
Dan Williamsb7645812011-05-08 02:35:32 -07002673 rsp, sizeof(struct smp_resp));
Dan Williams6f231dd2011-07-02 22:56:22 -07002674 } else if (completion_status
2675 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2676
2677 /* This was an SSP / STP / SATA transfer.
2678 * There is a possibility that less data than
2679 * the maximum was transferred.
2680 */
Dan Williams5076a1a2011-06-27 14:57:03 -07002681 u32 transferred_length = sci_req_tx_bytes(request);
Dan Williams6f231dd2011-07-02 22:56:22 -07002682
2683 task->task_status.residual
2684 = task->total_xfer_len - transferred_length;
2685
2686 /* If there were residual bytes, call this an
2687 * underrun.
2688 */
2689 if (task->task_status.residual != 0)
2690 status = SAS_DATA_UNDERRUN;
2691
2692 dev_dbg(&isci_host->pdev->dev,
2693 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2694 __func__,
2695 status);
2696
2697 } else
2698 dev_dbg(&isci_host->pdev->dev,
2699 "%s: SCI_IO_SUCCESS\n",
2700 __func__);
2701
2702 break;
2703
2704 case SCI_IO_FAILURE_TERMINATED:
2705 dev_dbg(&isci_host->pdev->dev,
2706 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2707 __func__,
2708 request,
2709 task);
2710
2711 /* The request was terminated explicitly. No handling
2712 * is needed in the SCSI error handler path.
2713 */
Dan Williams38d88792011-06-23 14:33:48 -07002714 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002715 response = SAS_TASK_UNDELIVERED;
2716
2717 /* See if the device has been/is being stopped. Note
2718 * that we ignore the quiesce state, since we are
2719 * concerned about the actual device state.
2720 */
Dan Williams209fae12011-06-13 17:39:44 -07002721 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002722 status = SAS_DEVICE_UNKNOWN;
2723 else
2724 status = SAS_ABORTED_TASK;
2725
2726 complete_to_host = isci_perform_normal_io_completion;
2727 break;
2728
2729 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2730
2731 isci_request_handle_controller_specific_errors(
Dan Williams209fae12011-06-13 17:39:44 -07002732 idev, request, task, &response, &status,
Dan Williams6f231dd2011-07-02 22:56:22 -07002733 &complete_to_host);
2734
2735 break;
2736
2737 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2738 /* This is a special case, in that the I/O completion
2739 * is telling us that the device needs a reset.
2740 * In order for the device reset condition to be
2741 * noticed, the I/O has to be handled in the error
2742 * handler. Set the reset flag and cause the
2743 * SCSI error thread to be scheduled.
2744 */
2745 spin_lock_irqsave(&task->task_state_lock, task_flags);
2746 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2747 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2748
Jeff Skirvinaa145102011-03-07 16:40:47 -07002749 /* Fail the I/O. */
2750 response = SAS_TASK_UNDELIVERED;
2751 status = SAM_STAT_TASK_ABORTED;
2752
Dan Williams6f231dd2011-07-02 22:56:22 -07002753 complete_to_host = isci_perform_error_io_completion;
Dan Williams38d88792011-06-23 14:33:48 -07002754 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002755 break;
2756
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002757 case SCI_FAILURE_RETRY_REQUIRED:
2758
2759 /* Fail the I/O so it can be retried. */
2760 response = SAS_TASK_UNDELIVERED;
Dan Williams209fae12011-06-13 17:39:44 -07002761 if (!idev)
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002762 status = SAS_DEVICE_UNKNOWN;
2763 else
2764 status = SAS_ABORTED_TASK;
2765
2766 complete_to_host = isci_perform_normal_io_completion;
Dan Williams38d88792011-06-23 14:33:48 -07002767 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002768 break;
2769
2770
Dan Williams6f231dd2011-07-02 22:56:22 -07002771 default:
2772 /* Catch any otherwise unhandled error codes here. */
2773 dev_warn(&isci_host->pdev->dev,
2774 "%s: invalid completion code: 0x%x - "
2775 "isci_request = %p\n",
2776 __func__, completion_status, request);
2777
2778 response = SAS_TASK_UNDELIVERED;
2779
2780 /* See if the device has been/is being stopped. Note
2781 * that we ignore the quiesce state, since we are
2782 * concerned about the actual device state.
2783 */
Dan Williams209fae12011-06-13 17:39:44 -07002784 if (!idev)
Dan Williams6f231dd2011-07-02 22:56:22 -07002785 status = SAS_DEVICE_UNKNOWN;
2786 else
2787 status = SAS_ABORTED_TASK;
2788
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002789 if (SAS_PROTOCOL_SMP == task->task_proto) {
Dan Williams38d88792011-06-23 14:33:48 -07002790 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002791 complete_to_host = isci_perform_normal_io_completion;
2792 } else {
Dan Williams38d88792011-06-23 14:33:48 -07002793 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
Jeff Skirvincde76fb2011-06-20 14:09:06 -07002794 complete_to_host = isci_perform_error_io_completion;
2795 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002796 break;
2797 }
2798 break;
2799 }
2800
Dan Williamsddcc7e32011-06-17 10:40:43 -07002801 switch (task->task_proto) {
2802 case SAS_PROTOCOL_SSP:
2803 if (task->data_dir == DMA_NONE)
2804 break;
2805 if (task->num_scatter == 0)
2806 /* 0 indicates a single dma address */
2807 dma_unmap_single(&isci_host->pdev->dev,
2808 request->zero_scatter_daddr,
2809 task->total_xfer_len, task->data_dir);
2810 else /* unmap the sgl dma addresses */
2811 dma_unmap_sg(&isci_host->pdev->dev, task->scatter,
2812 request->num_sg_entries, task->data_dir);
2813 break;
Dan Williamse9bf7092011-06-16 16:59:56 -07002814 case SAS_PROTOCOL_SMP: {
2815 struct scatterlist *sg = &task->smp_task.smp_req;
2816 struct smp_req *smp_req;
2817 void *kaddr;
2818
2819 dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE);
2820
2821 /* need to swab it back in case the command buffer is re-used */
2822 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2823 smp_req = kaddr + sg->offset;
2824 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2825 kunmap_atomic(kaddr, KM_IRQ0);
2826 break;
2827 }
Dan Williamsddcc7e32011-06-17 10:40:43 -07002828 default:
2829 break;
2830 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002831
2832 /* Put the completed request on the correct list */
2833 isci_task_save_for_upper_layer_completion(isci_host, request, response,
2834 status, complete_to_host
2835 );
2836
2837 /* complete the io request to the core. */
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002838 scic_controller_complete_io(&isci_host->sci,
Dan Williams5076a1a2011-06-27 14:57:03 -07002839 request->target_device,
2840 request);
Dan Williams209fae12011-06-13 17:39:44 -07002841 isci_put_device(idev);
2842
Dan Williams67ea8382011-05-08 11:47:15 -07002843 /* set terminated handle so it cannot be completed or
Dan Williams6f231dd2011-07-02 22:56:22 -07002844 * terminated again, and to cause any calls into abort
2845 * task to recognize the already completed case.
2846 */
Dan Williams38d88792011-06-23 14:33:48 -07002847 set_bit(IREQ_TERMINATED, &request->flags);
Dan Williams6f231dd2011-07-02 22:56:22 -07002848}
Dan Williamsf1f52e72011-05-10 02:28:45 -07002849
Dan Williams9269e0e2011-05-12 07:42:17 -07002850static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
Dan Williamsf1f52e72011-05-10 02:28:45 -07002851{
Dan Williams5076a1a2011-06-27 14:57:03 -07002852 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2853 struct domain_device *dev = sci_dev_to_domain(ireq->target_device);
Dan Williamsc72086e2011-05-10 02:28:48 -07002854 struct sas_task *task;
2855
2856 /* XXX as hch said always creating an internal sas_task for tmf
2857 * requests would simplify the driver
2858 */
2859 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07002860
Dan Williams5dec6f42011-05-10 02:28:49 -07002861 /* all unaccelerated request types (non ssp or ncq) handled with
2862 * substates
Dan Williamsf1393032011-05-10 02:28:47 -07002863 */
Dan Williamsc72086e2011-05-10 02:28:48 -07002864 if (!task && dev->dev_type == SAS_END_DEV) {
Edmund Nadolskie3013702011-06-02 00:10:43 +00002865 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
Dan Williams5dec6f42011-05-10 02:28:49 -07002866 } else if (!task &&
2867 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2868 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
Edmund Nadolskie3013702011-06-02 00:10:43 +00002869 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
Dan Williamsc72086e2011-05-10 02:28:48 -07002870 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
Edmund Nadolskie3013702011-06-02 00:10:43 +00002871 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
Dan Williams5dec6f42011-05-10 02:28:49 -07002872 } else if (task && sas_protocol_ata(task->task_proto) &&
2873 !task->ata_task.use_ncq) {
2874 u32 state;
2875
2876 if (task->data_dir == DMA_NONE)
Edmund Nadolskie3013702011-06-02 00:10:43 +00002877 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
Dan Williams5dec6f42011-05-10 02:28:49 -07002878 else if (task->ata_task.dma_xfer)
Edmund Nadolskie3013702011-06-02 00:10:43 +00002879 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
Dan Williams5dec6f42011-05-10 02:28:49 -07002880 else /* PIO */
Edmund Nadolskie3013702011-06-02 00:10:43 +00002881 state = SCI_REQ_STP_PIO_WAIT_H2D;
Dan Williams5dec6f42011-05-10 02:28:49 -07002882
Edmund Nadolskie3013702011-06-02 00:10:43 +00002883 sci_change_state(sm, state);
Dan Williamsc72086e2011-05-10 02:28:48 -07002884 }
Dan Williamsf1f52e72011-05-10 02:28:45 -07002885}
2886
Dan Williams9269e0e2011-05-12 07:42:17 -07002887static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
Dan Williamsf1f52e72011-05-10 02:28:45 -07002888{
Dan Williams5076a1a2011-06-27 14:57:03 -07002889 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2890 struct scic_sds_controller *scic = ireq->owning_controller;
Dan Williamsf1f52e72011-05-10 02:28:45 -07002891 struct isci_host *ihost = scic_to_ihost(scic);
Dan Williamsf1f52e72011-05-10 02:28:45 -07002892
Dan Williamsf1f52e72011-05-10 02:28:45 -07002893 /* Tell the SCI_USER that the IO request is complete */
Dan Williams38d88792011-06-23 14:33:48 -07002894 if (!test_bit(IREQ_TMF, &ireq->flags))
Dan Williamsf1f52e72011-05-10 02:28:45 -07002895 isci_request_io_request_complete(ihost, ireq,
Dan Williams5076a1a2011-06-27 14:57:03 -07002896 ireq->sci_status);
Dan Williamsf1f52e72011-05-10 02:28:45 -07002897 else
Dan Williams5076a1a2011-06-27 14:57:03 -07002898 isci_task_request_complete(ihost, ireq, ireq->sci_status);
Dan Williamsf1f52e72011-05-10 02:28:45 -07002899}
2900
Dan Williams9269e0e2011-05-12 07:42:17 -07002901static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
Dan Williamsf1f52e72011-05-10 02:28:45 -07002902{
Dan Williams5076a1a2011-06-27 14:57:03 -07002903 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
Dan Williamsf1f52e72011-05-10 02:28:45 -07002904
2905 /* Setting the abort bit in the Task Context is required by the silicon. */
Dan Williams5076a1a2011-06-27 14:57:03 -07002906 ireq->tc->abort = 1;
Dan Williamsc72086e2011-05-10 02:28:48 -07002907}
2908
Dan Williams9269e0e2011-05-12 07:42:17 -07002909static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
Dan Williams5dec6f42011-05-10 02:28:49 -07002910{
Dan Williams5076a1a2011-06-27 14:57:03 -07002911 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
Dan Williams5dec6f42011-05-10 02:28:49 -07002912
Dan Williams5076a1a2011-06-27 14:57:03 -07002913 scic_sds_remote_device_set_working_request(ireq->target_device,
2914 ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -07002915}
2916
Dan Williams9269e0e2011-05-12 07:42:17 -07002917static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
Dan Williams5dec6f42011-05-10 02:28:49 -07002918{
Dan Williams5076a1a2011-06-27 14:57:03 -07002919 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
Dan Williams5dec6f42011-05-10 02:28:49 -07002920
Dan Williams5076a1a2011-06-27 14:57:03 -07002921 scic_sds_remote_device_set_working_request(ireq->target_device,
2922 ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -07002923}
2924
Dan Williams9269e0e2011-05-12 07:42:17 -07002925static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
Dan Williams5dec6f42011-05-10 02:28:49 -07002926{
Dan Williams5076a1a2011-06-27 14:57:03 -07002927 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
Dan Williams5dec6f42011-05-10 02:28:49 -07002928
Dan Williams5076a1a2011-06-27 14:57:03 -07002929 scic_sds_remote_device_set_working_request(ireq->target_device,
2930 ireq);
Dan Williams5dec6f42011-05-10 02:28:49 -07002931}
2932
Dan Williams9269e0e2011-05-12 07:42:17 -07002933static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
Dan Williams5dec6f42011-05-10 02:28:49 -07002934{
Dan Williams5076a1a2011-06-27 14:57:03 -07002935 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2936 struct scu_task_context *tc = ireq->tc;
Dan Williams5dec6f42011-05-10 02:28:49 -07002937 struct host_to_dev_fis *h2d_fis;
2938 enum sci_status status;
2939
2940 /* Clear the SRST bit */
Dan Williams5076a1a2011-06-27 14:57:03 -07002941 h2d_fis = &ireq->stp.cmd;
Dan Williams5dec6f42011-05-10 02:28:49 -07002942 h2d_fis->control = 0;
2943
2944 /* Clear the TC control bit */
Dan Williams312e0c22011-06-28 13:47:09 -07002945 tc->control_frame = 0;
Dan Williams5dec6f42011-05-10 02:28:49 -07002946
Dan Williams5076a1a2011-06-27 14:57:03 -07002947 status = scic_controller_continue_io(ireq);
Dan Williams79e2b6b2011-05-11 08:29:56 -07002948 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
Dan Williams5dec6f42011-05-10 02:28:49 -07002949}
2950
Dan Williamsf1f52e72011-05-10 02:28:45 -07002951static const struct sci_base_state scic_sds_request_state_table[] = {
Edmund Nadolskie3013702011-06-02 00:10:43 +00002952 [SCI_REQ_INIT] = { },
2953 [SCI_REQ_CONSTRUCTED] = { },
2954 [SCI_REQ_STARTED] = {
Dan Williamsf1f52e72011-05-10 02:28:45 -07002955 .enter_state = scic_sds_request_started_state_enter,
Dan Williams5dec6f42011-05-10 02:28:49 -07002956 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00002957 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002958 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
2959 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00002960 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2961 [SCI_REQ_STP_PIO_WAIT_H2D] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002962 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
2963 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00002964 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2965 [SCI_REQ_STP_PIO_DATA_IN] = { },
2966 [SCI_REQ_STP_PIO_DATA_OUT] = { },
2967 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2968 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2969 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002970 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
2971 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00002972 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002973 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
2974 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00002975 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
2976 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
2977 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
2978 [SCI_REQ_SMP_WAIT_RESP] = { },
2979 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
2980 [SCI_REQ_COMPLETED] = {
Dan Williamsf1f52e72011-05-10 02:28:45 -07002981 .enter_state = scic_sds_request_completed_state_enter,
2982 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00002983 [SCI_REQ_ABORTING] = {
Dan Williamsf1f52e72011-05-10 02:28:45 -07002984 .enter_state = scic_sds_request_aborting_state_enter,
2985 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00002986 [SCI_REQ_FINAL] = { },
Dan Williamsf1f52e72011-05-10 02:28:45 -07002987};
2988
Edmund Nadolskie3013702011-06-02 00:10:43 +00002989static void
2990scic_sds_general_request_construct(struct scic_sds_controller *scic,
2991 struct scic_sds_remote_device *sci_dev,
Dan Williams5076a1a2011-06-27 14:57:03 -07002992 struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -07002993{
Dan Williams5076a1a2011-06-27 14:57:03 -07002994 sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT);
Dan Williamsf1f52e72011-05-10 02:28:45 -07002995
Dan Williams5076a1a2011-06-27 14:57:03 -07002996 ireq->target_device = sci_dev;
2997 ireq->protocol = SCIC_NO_PROTOCOL;
2998 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
Dan Williamsf1f52e72011-05-10 02:28:45 -07002999
Dan Williams5076a1a2011-06-27 14:57:03 -07003000 ireq->sci_status = SCI_SUCCESS;
3001 ireq->scu_status = 0;
3002 ireq->post_context = 0xFFFFFFFF;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003003}
3004
3005static enum sci_status
3006scic_io_request_construct(struct scic_sds_controller *scic,
3007 struct scic_sds_remote_device *sci_dev,
Dan Williams5076a1a2011-06-27 14:57:03 -07003008 struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003009{
3010 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3011 enum sci_status status = SCI_SUCCESS;
3012
3013 /* Build the common part of the request */
Dan Williams5076a1a2011-06-27 14:57:03 -07003014 scic_sds_general_request_construct(scic, sci_dev, ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003015
Dan Williamsc72086e2011-05-10 02:28:48 -07003016 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003017 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3018
3019 if (dev->dev_type == SAS_END_DEV)
Dan Williamsc72086e2011-05-10 02:28:48 -07003020 /* pass */;
3021 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
Dan Williams5076a1a2011-06-27 14:57:03 -07003022 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
Dan Williamsc72086e2011-05-10 02:28:48 -07003023 else if (dev_is_expander(dev))
Dan Williamse9bf7092011-06-16 16:59:56 -07003024 /* pass */;
Dan Williamsc72086e2011-05-10 02:28:48 -07003025 else
3026 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003027
Dan Williams5076a1a2011-06-27 14:57:03 -07003028 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
Dan Williamsf1f52e72011-05-10 02:28:45 -07003029
3030 return status;
3031}
3032
3033enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3034 struct scic_sds_remote_device *sci_dev,
Dan Williams5076a1a2011-06-27 14:57:03 -07003035 u16 io_tag, struct isci_request *ireq)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003036{
3037 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3038 enum sci_status status = SCI_SUCCESS;
3039
3040 /* Build the common part of the request */
Dan Williams5076a1a2011-06-27 14:57:03 -07003041 scic_sds_general_request_construct(scic, sci_dev, ireq);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003042
Dan Williamsc72086e2011-05-10 02:28:48 -07003043 if (dev->dev_type == SAS_END_DEV ||
3044 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
Dan Williams5076a1a2011-06-27 14:57:03 -07003045 set_bit(IREQ_TMF, &ireq->flags);
3046 memset(ireq->tc, 0, sizeof(struct scu_task_context));
Dan Williamsc72086e2011-05-10 02:28:48 -07003047 } else
3048 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003049
3050 return status;
3051}
3052
3053static enum sci_status isci_request_ssp_request_construct(
3054 struct isci_request *request)
3055{
3056 enum sci_status status;
3057
3058 dev_dbg(&request->isci_host->pdev->dev,
3059 "%s: request = %p\n",
3060 __func__,
3061 request);
Dan Williams5076a1a2011-06-27 14:57:03 -07003062 status = scic_io_request_construct_basic_ssp(request);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003063 return status;
3064}
3065
3066static enum sci_status isci_request_stp_request_construct(
3067 struct isci_request *request)
3068{
3069 struct sas_task *task = isci_request_access_task(request);
3070 enum sci_status status;
3071 struct host_to_dev_fis *register_fis;
3072
3073 dev_dbg(&request->isci_host->pdev->dev,
3074 "%s: request = %p\n",
3075 __func__,
3076 request);
3077
3078 /* Get the host_to_dev_fis from the core and copy
3079 * the fis from the task into it.
3080 */
3081 register_fis = isci_sata_task_to_fis_copy(task);
3082
Dan Williams5076a1a2011-06-27 14:57:03 -07003083 status = scic_io_request_construct_basic_sata(request);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003084
3085 /* Set the ncq tag in the fis, from the queue
3086 * command in the task.
3087 */
3088 if (isci_sata_is_task_ncq(task)) {
3089
3090 isci_sata_set_ncq_tag(
3091 register_fis,
3092 task
3093 );
3094 }
3095
3096 return status;
3097}
3098
Dan Williamse9bf7092011-06-16 16:59:56 -07003099static enum sci_status
3100scic_io_request_construct_smp(struct device *dev,
Dan Williams5076a1a2011-06-27 14:57:03 -07003101 struct isci_request *ireq,
Dan Williamse9bf7092011-06-16 16:59:56 -07003102 struct sas_task *task)
Dan Williamsc72086e2011-05-10 02:28:48 -07003103{
Dan Williamse9bf7092011-06-16 16:59:56 -07003104 struct scatterlist *sg = &task->smp_task.smp_req;
Dan Williamsc72086e2011-05-10 02:28:48 -07003105 struct scic_sds_remote_device *sci_dev;
Dan Williamsc72086e2011-05-10 02:28:48 -07003106 struct scu_task_context *task_context;
Dan Williamsffe191c2011-06-29 13:09:25 -07003107 struct isci_port *iport;
Dan Williamse9bf7092011-06-16 16:59:56 -07003108 struct smp_req *smp_req;
3109 void *kaddr;
3110 u8 req_len;
3111 u32 cmd;
3112
3113 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3114 smp_req = kaddr + sg->offset;
3115 /*
3116 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3117 * functions under SAS 2.0, a zero request length really indicates
3118 * a non-zero default length.
3119 */
3120 if (smp_req->req_len == 0) {
3121 switch (smp_req->func) {
3122 case SMP_DISCOVER:
3123 case SMP_REPORT_PHY_ERR_LOG:
3124 case SMP_REPORT_PHY_SATA:
3125 case SMP_REPORT_ROUTE_INFO:
3126 smp_req->req_len = 2;
3127 break;
3128 case SMP_CONF_ROUTE_INFO:
3129 case SMP_PHY_CONTROL:
3130 case SMP_PHY_TEST_FUNCTION:
3131 smp_req->req_len = 9;
3132 break;
3133 /* Default - zero is a valid default for 2.0. */
3134 }
3135 }
3136 req_len = smp_req->req_len;
3137 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3138 cmd = *(u32 *) smp_req;
3139 kunmap_atomic(kaddr, KM_IRQ0);
3140
3141 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3142 return SCI_FAILURE;
3143
Dan Williams5076a1a2011-06-27 14:57:03 -07003144 ireq->protocol = SCIC_SMP_PROTOCOL;
Dan Williamsc72086e2011-05-10 02:28:48 -07003145
3146 /* byte swap the smp request. */
Dan Williamsc72086e2011-05-10 02:28:48 -07003147
Dan Williams5076a1a2011-06-27 14:57:03 -07003148 task_context = ireq->tc;
Dan Williamsc72086e2011-05-10 02:28:48 -07003149
Dan Williams5076a1a2011-06-27 14:57:03 -07003150 sci_dev = scic_sds_request_get_device(ireq);
Dan Williamsffe191c2011-06-29 13:09:25 -07003151 iport = scic_sds_request_get_port(ireq);
Dan Williamsc72086e2011-05-10 02:28:48 -07003152
3153 /*
3154 * Fill in the TC with the its required data
3155 * 00h
3156 */
3157 task_context->priority = 0;
3158 task_context->initiator_request = 1;
3159 task_context->connection_rate = sci_dev->connection_rate;
3160 task_context->protocol_engine_index =
3161 scic_sds_controller_get_protocol_engine_group(scic);
Dan Williamsffe191c2011-06-29 13:09:25 -07003162 task_context->logical_port_index = scic_sds_port_get_index(iport);
Dan Williamsc72086e2011-05-10 02:28:48 -07003163 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3164 task_context->abort = 0;
3165 task_context->valid = SCU_TASK_CONTEXT_VALID;
3166 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3167
3168 /* 04h */
3169 task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3170 task_context->command_code = 0;
3171 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3172
3173 /* 08h */
3174 task_context->link_layer_control = 0;
3175 task_context->do_not_dma_ssp_good_response = 1;
3176 task_context->strict_ordering = 0;
3177 task_context->control_frame = 1;
3178 task_context->timeout_enable = 0;
3179 task_context->block_guard_enable = 0;
3180
3181 /* 0ch */
3182 task_context->address_modifier = 0;
3183
3184 /* 10h */
Dave Jiang77d67382011-05-25 02:21:57 +00003185 task_context->ssp_command_iu_length = req_len;
Dan Williamsc72086e2011-05-10 02:28:48 -07003186
3187 /* 14h */
3188 task_context->transfer_length_bytes = 0;
3189
3190 /*
3191 * 18h ~ 30h, protocol specific
3192 * since commandIU has been build by framework at this point, we just
3193 * copy the frist DWord from command IU to this location. */
Dan Williamse9bf7092011-06-16 16:59:56 -07003194 memcpy(&task_context->type.smp, &cmd, sizeof(u32));
Dan Williamsc72086e2011-05-10 02:28:48 -07003195
3196 /*
3197 * 40h
3198 * "For SMP you could program it to zero. We would prefer that way
3199 * so that done code will be consistent." - Venki
3200 */
3201 task_context->task_phase = 0;
3202
Dan Williams5076a1a2011-06-27 14:57:03 -07003203 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
Dan Williams312e0c22011-06-28 13:47:09 -07003204 (scic_sds_controller_get_protocol_engine_group(scic) <<
3205 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
Dan Williamsffe191c2011-06-29 13:09:25 -07003206 (scic_sds_port_get_index(iport) <<
Dan Williams312e0c22011-06-28 13:47:09 -07003207 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
Dan Williams5076a1a2011-06-27 14:57:03 -07003208 ISCI_TAG_TCI(ireq->io_tag));
Dan Williamsc72086e2011-05-10 02:28:48 -07003209 /*
3210 * Copy the physical address for the command buffer to the SCU Task
3211 * Context command buffer should not contain command header.
3212 */
Dan Williamse9bf7092011-06-16 16:59:56 -07003213 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3214 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
Dan Williamsc72086e2011-05-10 02:28:48 -07003215
3216 /* SMP response comes as UF, so no need to set response IU address. */
3217 task_context->response_iu_upper = 0;
3218 task_context->response_iu_lower = 0;
Dan Williamsc72086e2011-05-10 02:28:48 -07003219
Dan Williams5076a1a2011-06-27 14:57:03 -07003220 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
Dan Williamsc72086e2011-05-10 02:28:48 -07003221
3222 return SCI_SUCCESS;
3223}
3224
3225/*
Dan Williamsf1f52e72011-05-10 02:28:45 -07003226 * isci_smp_request_build() - This function builds the smp request.
3227 * @ireq: This parameter points to the isci_request allocated in the
3228 * request construct function.
3229 *
3230 * SCI_SUCCESS on successfull completion, or specific failure code.
3231 */
3232static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3233{
Dan Williamsf1f52e72011-05-10 02:28:45 -07003234 struct sas_task *task = isci_request_access_task(ireq);
Dan Williamse9bf7092011-06-16 16:59:56 -07003235 struct device *dev = &ireq->isci_host->pdev->dev;
Dan Williamse9bf7092011-06-16 16:59:56 -07003236 enum sci_status status = SCI_FAILURE;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003237
Dan Williams5076a1a2011-06-27 14:57:03 -07003238 status = scic_io_request_construct_smp(dev, ireq, task);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003239 if (status != SCI_SUCCESS)
3240 dev_warn(&ireq->isci_host->pdev->dev,
3241 "%s: failed with status = %d\n",
3242 __func__,
3243 status);
3244
3245 return status;
3246}
3247
3248/**
3249 * isci_io_request_build() - This function builds the io request object.
3250 * @isci_host: This parameter specifies the ISCI host object
3251 * @request: This parameter points to the isci_request object allocated in the
3252 * request construct function.
3253 * @sci_device: This parameter is the handle for the sci core's remote device
3254 * object that is the destination for this request.
3255 *
3256 * SCI_SUCCESS on successfull completion, or specific failure code.
3257 */
Dan Williams312e0c22011-06-28 13:47:09 -07003258static enum sci_status isci_io_request_build(struct isci_host *isci_host,
3259 struct isci_request *request,
Dan Williamsdb056252011-06-17 14:18:39 -07003260 struct isci_remote_device *isci_device)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003261{
3262 enum sci_status status = SCI_SUCCESS;
3263 struct sas_task *task = isci_request_access_task(request);
3264 struct scic_sds_remote_device *sci_device = &isci_device->sci;
3265
3266 dev_dbg(&isci_host->pdev->dev,
3267 "%s: isci_device = 0x%p; request = %p, "
3268 "num_scatter = %d\n",
3269 __func__,
3270 isci_device,
3271 request,
3272 task->num_scatter);
3273
3274 /* map the sgl addresses, if present.
3275 * libata does the mapping for sata devices
3276 * before we get the request.
3277 */
3278 if (task->num_scatter &&
3279 !sas_protocol_ata(task->task_proto) &&
3280 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3281
3282 request->num_sg_entries = dma_map_sg(
3283 &isci_host->pdev->dev,
3284 task->scatter,
3285 task->num_scatter,
3286 task->data_dir
3287 );
3288
3289 if (request->num_sg_entries == 0)
3290 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3291 }
3292
Dan Williamsf1f52e72011-05-10 02:28:45 -07003293 status = scic_io_request_construct(&isci_host->sci, sci_device,
Dan Williams5076a1a2011-06-27 14:57:03 -07003294 request);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003295
3296 if (status != SCI_SUCCESS) {
3297 dev_warn(&isci_host->pdev->dev,
3298 "%s: failed request construct\n",
3299 __func__);
3300 return SCI_FAILURE;
3301 }
3302
3303 switch (task->task_proto) {
3304 case SAS_PROTOCOL_SMP:
3305 status = isci_smp_request_build(request);
3306 break;
3307 case SAS_PROTOCOL_SSP:
3308 status = isci_request_ssp_request_construct(request);
3309 break;
3310 case SAS_PROTOCOL_SATA:
3311 case SAS_PROTOCOL_STP:
3312 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3313 status = isci_request_stp_request_construct(request);
3314 break;
3315 default:
3316 dev_warn(&isci_host->pdev->dev,
3317 "%s: unknown protocol\n", __func__);
3318 return SCI_FAILURE;
3319 }
3320
3321 return SCI_SUCCESS;
3322}
3323
Dan Williamsdb056252011-06-17 14:18:39 -07003324static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003325{
Dan Williams0d0cf142011-06-13 00:51:30 -07003326 struct isci_request *ireq;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003327
Dan Williamsdb056252011-06-17 14:18:39 -07003328 ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
Dan Williams5076a1a2011-06-27 14:57:03 -07003329 ireq->io_tag = tag;
Dan Williams0d0cf142011-06-13 00:51:30 -07003330 ireq->io_request_completion = NULL;
Dan Williams38d88792011-06-23 14:33:48 -07003331 ireq->flags = 0;
Dan Williams0d0cf142011-06-13 00:51:30 -07003332 ireq->num_sg_entries = 0;
Dan Williams0d0cf142011-06-13 00:51:30 -07003333 INIT_LIST_HEAD(&ireq->completed_node);
3334 INIT_LIST_HEAD(&ireq->dev_node);
Dan Williams0d0cf142011-06-13 00:51:30 -07003335 isci_request_change_state(ireq, allocated);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003336
Dan Williams0d0cf142011-06-13 00:51:30 -07003337 return ireq;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003338}
3339
Dan Williamsdb056252011-06-17 14:18:39 -07003340static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3341 struct sas_task *task,
3342 u16 tag)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003343{
Dan Williams0d0cf142011-06-13 00:51:30 -07003344 struct isci_request *ireq;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003345
Dan Williamsdb056252011-06-17 14:18:39 -07003346 ireq = isci_request_from_tag(ihost, tag);
3347 ireq->ttype_ptr.io_task_ptr = task;
3348 ireq->ttype = io_task;
3349 task->lldd_task = ireq;
3350
Dan Williams0d0cf142011-06-13 00:51:30 -07003351 return ireq;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003352}
3353
Dan Williamsdb056252011-06-17 14:18:39 -07003354struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3355 struct isci_tmf *isci_tmf,
3356 u16 tag)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003357{
Dan Williams0d0cf142011-06-13 00:51:30 -07003358 struct isci_request *ireq;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003359
Dan Williamsdb056252011-06-17 14:18:39 -07003360 ireq = isci_request_from_tag(ihost, tag);
3361 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3362 ireq->ttype = tmf_task;
3363
Dan Williams0d0cf142011-06-13 00:51:30 -07003364 return ireq;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003365}
3366
Dan Williams209fae12011-06-13 17:39:44 -07003367int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
Dan Williamsdb056252011-06-17 14:18:39 -07003368 struct sas_task *task, u16 tag)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003369{
Dan Williamsf1f52e72011-05-10 02:28:45 -07003370 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
Dan Williams0d0cf142011-06-13 00:51:30 -07003371 struct isci_request *ireq;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003372 unsigned long flags;
Dan Williams0d0cf142011-06-13 00:51:30 -07003373 int ret = 0;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003374
Dan Williamsf1f52e72011-05-10 02:28:45 -07003375 /* do common allocation and init of request object. */
Dan Williamsdb056252011-06-17 14:18:39 -07003376 ireq = isci_io_request_from_tag(ihost, task, tag);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003377
Dan Williamsdb056252011-06-17 14:18:39 -07003378 status = isci_io_request_build(ihost, ireq, idev);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003379 if (status != SCI_SUCCESS) {
Dan Williams0d0cf142011-06-13 00:51:30 -07003380 dev_warn(&ihost->pdev->dev,
Dan Williamsf1f52e72011-05-10 02:28:45 -07003381 "%s: request_construct failed - status = 0x%x\n",
3382 __func__,
3383 status);
Dan Williamsdb056252011-06-17 14:18:39 -07003384 return status;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003385 }
3386
Dan Williams0d0cf142011-06-13 00:51:30 -07003387 spin_lock_irqsave(&ihost->scic_lock, flags);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003388
Jeff Skirvin9274f452011-06-23 17:09:02 -07003389 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3390
3391 if (isci_task_is_ncq_recovery(task)) {
3392
3393 /* The device is in an NCQ recovery state. Issue the
3394 * request on the task side. Note that it will
3395 * complete on the I/O request side because the
3396 * request was built that way (ie.
3397 * ireq->is_task_management_request is false).
3398 */
3399 status = scic_controller_start_task(&ihost->sci,
3400 &idev->sci,
Dan Williams5076a1a2011-06-27 14:57:03 -07003401 ireq);
Jeff Skirvin9274f452011-06-23 17:09:02 -07003402 } else {
3403 status = SCI_FAILURE;
3404 }
3405 } else {
Jeff Skirvin9274f452011-06-23 17:09:02 -07003406 /* send the request, let the core assign the IO TAG. */
3407 status = scic_controller_start_io(&ihost->sci, &idev->sci,
Dan Williams5076a1a2011-06-27 14:57:03 -07003408 ireq);
Jeff Skirvin9274f452011-06-23 17:09:02 -07003409 }
Dan Williams312e0c22011-06-28 13:47:09 -07003410
Dan Williamsf1f52e72011-05-10 02:28:45 -07003411 if (status != SCI_SUCCESS &&
3412 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
Dan Williams0d0cf142011-06-13 00:51:30 -07003413 dev_warn(&ihost->pdev->dev,
Dan Williamsf1f52e72011-05-10 02:28:45 -07003414 "%s: failed request start (0x%x)\n",
3415 __func__, status);
Dan Williams0d0cf142011-06-13 00:51:30 -07003416 spin_unlock_irqrestore(&ihost->scic_lock, flags);
Dan Williamsdb056252011-06-17 14:18:39 -07003417 return status;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003418 }
3419
3420 /* Either I/O started OK, or the core has signaled that
3421 * the device needs a target reset.
3422 *
3423 * In either case, hold onto the I/O for later.
3424 *
3425 * Update it's status and add it to the list in the
3426 * remote device object.
3427 */
Dan Williams0d0cf142011-06-13 00:51:30 -07003428 list_add(&ireq->dev_node, &idev->reqs_in_process);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003429
3430 if (status == SCI_SUCCESS) {
Dan Williams0d0cf142011-06-13 00:51:30 -07003431 isci_request_change_state(ireq, started);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003432 } else {
3433 /* The request did not really start in the
3434 * hardware, so clear the request handle
3435 * here so no terminations will be done.
3436 */
Dan Williams38d88792011-06-23 14:33:48 -07003437 set_bit(IREQ_TERMINATED, &ireq->flags);
Dan Williams0d0cf142011-06-13 00:51:30 -07003438 isci_request_change_state(ireq, completed);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003439 }
Dan Williams0d0cf142011-06-13 00:51:30 -07003440 spin_unlock_irqrestore(&ihost->scic_lock, flags);
Dan Williamsf1f52e72011-05-10 02:28:45 -07003441
3442 if (status ==
3443 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3444 /* Signal libsas that we need the SCSI error
Dan Williams312e0c22011-06-28 13:47:09 -07003445 * handler thread to work on this I/O and that
3446 * we want a device reset.
3447 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07003448 spin_lock_irqsave(&task->task_state_lock, flags);
3449 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3450 spin_unlock_irqrestore(&task->task_state_lock, flags);
3451
3452 /* Cause this task to be scheduled in the SCSI error
Dan Williams312e0c22011-06-28 13:47:09 -07003453 * handler thread.
3454 */
Dan Williams0d0cf142011-06-13 00:51:30 -07003455 isci_execpath_callback(ihost, task,
Dan Williamsf1f52e72011-05-10 02:28:45 -07003456 sas_task_abort);
3457
3458 /* Change the status, since we are holding
Dan Williams312e0c22011-06-28 13:47:09 -07003459 * the I/O until it is managed by the SCSI
3460 * error handler.
3461 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07003462 status = SCI_SUCCESS;
3463 }
3464
Dan Williamsf1f52e72011-05-10 02:28:45 -07003465 return ret;
3466}