blob: b9f97e8cc5e0f53eea31c5e0d27dc2bf1e362dd4 [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070057#include "task.h"
58#include "request.h"
59#include "sata.h"
60#include "scu_completion_codes.h"
Dan Williams5dec6f42011-05-10 02:28:49 -070061#include "scu_event_codes.h"
Dave Jiang2ec53eb2011-05-04 18:01:22 -070062#include "sas.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070063
Dan Williamsf1f52e72011-05-10 02:28:45 -070064/**
65 * This method returns the sgl element pair for the specificed sgl_pair index.
66 * @sci_req: This parameter specifies the IO request for which to retrieve
67 * the Scatter-Gather List element pair.
68 * @sgl_pair_index: This parameter specifies the index into the SGL element
69 * pair to be retrieved.
70 *
71 * This method returns a pointer to an struct scu_sgl_element_pair.
72 */
73static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
74 struct scic_sds_request *sci_req,
75 u32 sgl_pair_index
76 ) {
77 struct scu_task_context *task_context;
Dan Williams6f231dd2011-07-02 22:56:22 -070078
Dan Williamsf1f52e72011-05-10 02:28:45 -070079 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
Dan Williams6f231dd2011-07-02 22:56:22 -070080
Dan Williamsf1f52e72011-05-10 02:28:45 -070081 if (sgl_pair_index == 0) {
82 return &task_context->sgl_pair_ab;
83 } else if (sgl_pair_index == 1) {
84 return &task_context->sgl_pair_cd;
Dan Williams6f231dd2011-07-02 22:56:22 -070085 }
86
Dan Williamsf1f52e72011-05-10 02:28:45 -070087 return &sci_req->sg_table[sgl_pair_index - 2];
Dan Williams6f231dd2011-07-02 22:56:22 -070088}
89
90/**
Dan Williamsf1f52e72011-05-10 02:28:45 -070091 * This function will build the SGL list for an IO request.
92 * @sci_req: This parameter specifies the IO request for which to build
93 * the Scatter-Gather List.
Dan Williams6f231dd2011-07-02 22:56:22 -070094 *
Dan Williams6f231dd2011-07-02 22:56:22 -070095 */
Dan Williams5dec6f42011-05-10 02:28:49 -070096static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
Dan Williamsf1f52e72011-05-10 02:28:45 -070097{
98 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
99 struct isci_host *isci_host = isci_request->isci_host;
100 struct sas_task *task = isci_request_access_task(isci_request);
101 struct scatterlist *sg = NULL;
102 dma_addr_t dma_addr;
103 u32 sg_idx = 0;
104 struct scu_sgl_element_pair *scu_sg = NULL;
105 struct scu_sgl_element_pair *prev_sg = NULL;
106
107 if (task->num_scatter > 0) {
108 sg = task->scatter;
109
110 while (sg) {
111 scu_sg = scic_sds_request_get_sgl_element_pair(
112 sds_request,
113 sg_idx);
114
115 SCU_SGL_COPY(scu_sg->A, sg);
116
117 sg = sg_next(sg);
118
119 if (sg) {
120 SCU_SGL_COPY(scu_sg->B, sg);
121 sg = sg_next(sg);
122 } else
123 SCU_SGL_ZERO(scu_sg->B);
124
125 if (prev_sg) {
126 dma_addr =
127 scic_io_request_get_dma_addr(
128 sds_request,
129 scu_sg);
130
131 prev_sg->next_pair_upper =
132 upper_32_bits(dma_addr);
133 prev_sg->next_pair_lower =
134 lower_32_bits(dma_addr);
135 }
136
137 prev_sg = scu_sg;
138 sg_idx++;
139 }
140 } else { /* handle when no sg */
141 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
142 sg_idx);
143
144 dma_addr = dma_map_single(&isci_host->pdev->dev,
145 task->scatter,
146 task->total_xfer_len,
147 task->data_dir);
148
149 isci_request->zero_scatter_daddr = dma_addr;
150
151 scu_sg->A.length = task->total_xfer_len;
152 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153 scu_sg->A.address_lower = lower_32_bits(dma_addr);
154 }
155
156 if (scu_sg) {
157 scu_sg->next_pair_upper = 0;
158 scu_sg->next_pair_lower = 0;
159 }
160}
161
Dan Williamsf1f52e72011-05-10 02:28:45 -0700162static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
163{
164 struct ssp_cmd_iu *cmd_iu;
165 struct isci_request *ireq = sci_req_to_ireq(sci_req);
166 struct sas_task *task = isci_request_access_task(ireq);
167
168 cmd_iu = &sci_req->ssp.cmd;
169
170 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
171 cmd_iu->add_cdb_len = 0;
172 cmd_iu->_r_a = 0;
173 cmd_iu->_r_b = 0;
174 cmd_iu->en_fburst = 0; /* unsupported */
175 cmd_iu->task_prio = task->ssp_task.task_prio;
176 cmd_iu->task_attr = task->ssp_task.task_attr;
177 cmd_iu->_r_c = 0;
178
179 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
180 sizeof(task->ssp_task.cdb) / sizeof(u32));
181}
182
183static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
184{
185 struct ssp_task_iu *task_iu;
186 struct isci_request *ireq = sci_req_to_ireq(sci_req);
187 struct sas_task *task = isci_request_access_task(ireq);
188 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
189
190 task_iu = &sci_req->ssp.tmf;
191
192 memset(task_iu, 0, sizeof(struct ssp_task_iu));
193
194 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
195
196 task_iu->task_func = isci_tmf->tmf_code;
197 task_iu->task_tag =
198 (ireq->ttype == tmf_task) ?
199 isci_tmf->io_tag :
200 SCI_CONTROLLER_INVALID_IO_TAG;
201}
202
203/**
204 * This method is will fill in the SCU Task Context for any type of SSP request.
205 * @sci_req:
206 * @task_context:
207 *
208 */
209static void scu_ssp_reqeust_construct_task_context(
210 struct scic_sds_request *sds_request,
211 struct scu_task_context *task_context)
212{
213 dma_addr_t dma_addr;
214 struct scic_sds_controller *controller;
215 struct scic_sds_remote_device *target_device;
216 struct scic_sds_port *target_port;
217
218 controller = scic_sds_request_get_controller(sds_request);
219 target_device = scic_sds_request_get_device(sds_request);
220 target_port = scic_sds_request_get_port(sds_request);
221
222 /* Fill in the TC with the its required data */
223 task_context->abort = 0;
224 task_context->priority = 0;
225 task_context->initiator_request = 1;
226 task_context->connection_rate = target_device->connection_rate;
227 task_context->protocol_engine_index =
228 scic_sds_controller_get_protocol_engine_group(controller);
229 task_context->logical_port_index =
230 scic_sds_port_get_index(target_port);
231 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
232 task_context->valid = SCU_TASK_CONTEXT_VALID;
233 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
234
235 task_context->remote_node_index =
236 scic_sds_remote_device_get_index(sds_request->target_device);
237 task_context->command_code = 0;
238
239 task_context->link_layer_control = 0;
240 task_context->do_not_dma_ssp_good_response = 1;
241 task_context->strict_ordering = 0;
242 task_context->control_frame = 0;
243 task_context->timeout_enable = 0;
244 task_context->block_guard_enable = 0;
245
246 task_context->address_modifier = 0;
247
248 /* task_context->type.ssp.tag = sci_req->io_tag; */
249 task_context->task_phase = 0x01;
250
251 if (sds_request->was_tag_assigned_by_user) {
252 /*
253 * Build the task context now since we have already read
254 * the data
255 */
256 sds_request->post_context =
257 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
258 (scic_sds_controller_get_protocol_engine_group(
259 controller) <<
260 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
261 (scic_sds_port_get_index(target_port) <<
262 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
263 scic_sds_io_tag_get_index(sds_request->io_tag));
264 } else {
265 /*
266 * Build the task context now since we have already read
267 * the data
268 *
269 * I/O tag index is not assigned because we have to wait
270 * until we get a TCi
271 */
272 sds_request->post_context =
273 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
274 (scic_sds_controller_get_protocol_engine_group(
275 owning_controller) <<
276 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
277 (scic_sds_port_get_index(target_port) <<
278 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
279 }
280
281 /*
282 * Copy the physical address for the command buffer to the
283 * SCU Task Context
284 */
285 dma_addr = scic_io_request_get_dma_addr(sds_request,
286 &sds_request->ssp.cmd);
287
288 task_context->command_iu_upper = upper_32_bits(dma_addr);
289 task_context->command_iu_lower = lower_32_bits(dma_addr);
290
291 /*
292 * Copy the physical address for the response buffer to the
293 * SCU Task Context
294 */
295 dma_addr = scic_io_request_get_dma_addr(sds_request,
296 &sds_request->ssp.rsp);
297
298 task_context->response_iu_upper = upper_32_bits(dma_addr);
299 task_context->response_iu_lower = lower_32_bits(dma_addr);
300}
301
302/**
303 * This method is will fill in the SCU Task Context for a SSP IO request.
304 * @sci_req:
305 *
306 */
307static void scu_ssp_io_request_construct_task_context(
308 struct scic_sds_request *sci_req,
309 enum dma_data_direction dir,
310 u32 len)
311{
312 struct scu_task_context *task_context;
313
314 task_context = scic_sds_request_get_task_context(sci_req);
315
316 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
317
318 task_context->ssp_command_iu_length =
319 sizeof(struct ssp_cmd_iu) / sizeof(u32);
320 task_context->type.ssp.frame_type = SSP_COMMAND;
321
322 switch (dir) {
323 case DMA_FROM_DEVICE:
324 case DMA_NONE:
325 default:
326 task_context->task_type = SCU_TASK_TYPE_IOREAD;
327 break;
328 case DMA_TO_DEVICE:
329 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
330 break;
331 }
332
333 task_context->transfer_length_bytes = len;
334
335 if (task_context->transfer_length_bytes > 0)
336 scic_sds_request_build_sgl(sci_req);
337}
338
Dan Williamsf1f52e72011-05-10 02:28:45 -0700339/**
340 * This method will fill in the SCU Task Context for a SSP Task request. The
341 * following important settings are utilized: -# priority ==
342 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
343 * ahead of other task destined for the same Remote Node. -# task_type ==
344 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
345 * (i.e. non-raw frame) is being utilized to perform task management. -#
346 * control_frame == 1. This ensures that the proper endianess is set so
347 * that the bytes are transmitted in the right order for a task frame.
348 * @sci_req: This parameter specifies the task request object being
349 * constructed.
350 *
351 */
352static void scu_ssp_task_request_construct_task_context(
353 struct scic_sds_request *sci_req)
354{
355 struct scu_task_context *task_context;
356
357 task_context = scic_sds_request_get_task_context(sci_req);
358
359 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
360
361 task_context->control_frame = 1;
362 task_context->priority = SCU_TASK_PRIORITY_HIGH;
363 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
364 task_context->transfer_length_bytes = 0;
365 task_context->type.ssp.frame_type = SSP_TASK;
366 task_context->ssp_command_iu_length =
367 sizeof(struct ssp_task_iu) / sizeof(u32);
368}
369
Dan Williamsf1f52e72011-05-10 02:28:45 -0700370/**
Dan Williams5dec6f42011-05-10 02:28:49 -0700371 * This method is will fill in the SCU Task Context for any type of SATA
372 * request. This is called from the various SATA constructors.
373 * @sci_req: The general IO request object which is to be used in
374 * constructing the SCU task context.
375 * @task_context: The buffer pointer for the SCU task context which is being
376 * constructed.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700377 *
Dan Williams5dec6f42011-05-10 02:28:49 -0700378 * The general io request construction is complete. The buffer assignment for
379 * the command buffer is complete. none Revisit task context construction to
380 * determine what is common for SSP/SMP/STP task context structures.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700381 */
Dan Williams5dec6f42011-05-10 02:28:49 -0700382static void scu_sata_reqeust_construct_task_context(
383 struct scic_sds_request *sci_req,
384 struct scu_task_context *task_context)
385{
386 dma_addr_t dma_addr;
387 struct scic_sds_controller *controller;
388 struct scic_sds_remote_device *target_device;
389 struct scic_sds_port *target_port;
390
391 controller = scic_sds_request_get_controller(sci_req);
392 target_device = scic_sds_request_get_device(sci_req);
393 target_port = scic_sds_request_get_port(sci_req);
394
395 /* Fill in the TC with the its required data */
396 task_context->abort = 0;
397 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
398 task_context->initiator_request = 1;
399 task_context->connection_rate = target_device->connection_rate;
400 task_context->protocol_engine_index =
401 scic_sds_controller_get_protocol_engine_group(controller);
402 task_context->logical_port_index =
403 scic_sds_port_get_index(target_port);
404 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
405 task_context->valid = SCU_TASK_CONTEXT_VALID;
406 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
407
408 task_context->remote_node_index =
409 scic_sds_remote_device_get_index(sci_req->target_device);
410 task_context->command_code = 0;
411
412 task_context->link_layer_control = 0;
413 task_context->do_not_dma_ssp_good_response = 1;
414 task_context->strict_ordering = 0;
415 task_context->control_frame = 0;
416 task_context->timeout_enable = 0;
417 task_context->block_guard_enable = 0;
418
419 task_context->address_modifier = 0;
420 task_context->task_phase = 0x01;
421
422 task_context->ssp_command_iu_length =
423 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
424
425 /* Set the first word of the H2D REG FIS */
426 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
427
428 if (sci_req->was_tag_assigned_by_user) {
429 /*
430 * Build the task context now since we have already read
431 * the data
432 */
433 sci_req->post_context =
434 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
435 (scic_sds_controller_get_protocol_engine_group(
436 controller) <<
437 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
438 (scic_sds_port_get_index(target_port) <<
439 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
440 scic_sds_io_tag_get_index(sci_req->io_tag));
441 } else {
442 /*
443 * Build the task context now since we have already read
444 * the data.
445 * I/O tag index is not assigned because we have to wait
446 * until we get a TCi.
447 */
448 sci_req->post_context =
449 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
450 (scic_sds_controller_get_protocol_engine_group(
451 controller) <<
452 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
453 (scic_sds_port_get_index(target_port) <<
454 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
455 }
456
457 /*
458 * Copy the physical address for the command buffer to the SCU Task
459 * Context. We must offset the command buffer by 4 bytes because the
460 * first 4 bytes are transfered in the body of the TC.
461 */
462 dma_addr = scic_io_request_get_dma_addr(sci_req,
463 ((char *) &sci_req->stp.cmd) +
464 sizeof(u32));
465
466 task_context->command_iu_upper = upper_32_bits(dma_addr);
467 task_context->command_iu_lower = lower_32_bits(dma_addr);
468
469 /* SATA Requests do not have a response buffer */
470 task_context->response_iu_upper = 0;
471 task_context->response_iu_lower = 0;
472}
473
Dan Williamsf1f52e72011-05-10 02:28:45 -0700474
475
476/**
Dan Williams5dec6f42011-05-10 02:28:49 -0700477 * scu_stp_raw_request_construct_task_context -
478 * @sci_req: This parameter specifies the STP request object for which to
479 * construct a RAW command frame task context.
480 * @task_context: This parameter specifies the SCU specific task context buffer
481 * to construct.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700482 *
Dan Williams5dec6f42011-05-10 02:28:49 -0700483 * This method performs the operations common to all SATA/STP requests
484 * utilizing the raw frame method. none
Dan Williamsf1f52e72011-05-10 02:28:45 -0700485 */
Dan Williams5dec6f42011-05-10 02:28:49 -0700486static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
487 struct scu_task_context *task_context)
488{
489 struct scic_sds_request *sci_req = to_sci_req(stp_req);
490
491 scu_sata_reqeust_construct_task_context(sci_req, task_context);
492
493 task_context->control_frame = 0;
494 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
495 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
496 task_context->type.stp.fis_type = FIS_REGH2D;
497 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
498}
499
500static enum sci_status
501scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
502 bool copy_rx_frame)
503{
504 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
505 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
506
507 scu_stp_raw_request_construct_task_context(stp_req,
508 sci_req->task_context_buffer);
509
510 pio->current_transfer_bytes = 0;
511 pio->ending_error = 0;
512 pio->ending_status = 0;
513
514 pio->request_current.sgl_offset = 0;
515 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
516
517 if (copy_rx_frame) {
518 scic_sds_request_build_sgl(sci_req);
519 /* Since the IO request copy of the TC contains the same data as
520 * the actual TC this pointer is vaild for either.
521 */
522 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
523 } else {
524 /* The user does not want the data copied to the SGL buffer location */
525 pio->request_current.sgl_pair = NULL;
526 }
527
528 return SCI_SUCCESS;
529}
530
531/**
532 *
533 * @sci_req: This parameter specifies the request to be constructed as an
534 * optimized request.
535 * @optimized_task_type: This parameter specifies whether the request is to be
536 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
537 * value of 1 indicates NCQ.
538 *
539 * This method will perform request construction common to all types of STP
540 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
541 * returns an indication as to whether the construction was successful.
542 */
543static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
544 u8 optimized_task_type,
545 u32 len,
546 enum dma_data_direction dir)
547{
548 struct scu_task_context *task_context = sci_req->task_context_buffer;
549
550 /* Build the STP task context structure */
551 scu_sata_reqeust_construct_task_context(sci_req, task_context);
552
553 /* Copy over the SGL elements */
554 scic_sds_request_build_sgl(sci_req);
555
556 /* Copy over the number of bytes to be transfered */
557 task_context->transfer_length_bytes = len;
558
559 if (dir == DMA_TO_DEVICE) {
560 /*
561 * The difference between the DMA IN and DMA OUT request task type
562 * values are consistent with the difference between FPDMA READ
563 * and FPDMA WRITE values. Add the supplied task type parameter
564 * to this difference to set the task type properly for this
565 * DATA OUT (WRITE) case. */
566 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
567 - SCU_TASK_TYPE_DMA_IN);
568 } else {
569 /*
570 * For the DATA IN (READ) case, simply save the supplied
571 * optimized task type. */
572 task_context->task_type = optimized_task_type;
573 }
574}
575
576
577
Dan Williamsf1f52e72011-05-10 02:28:45 -0700578static enum sci_status
579scic_io_request_construct_sata(struct scic_sds_request *sci_req,
580 u32 len,
581 enum dma_data_direction dir,
582 bool copy)
Dan Williams6f231dd2011-07-02 22:56:22 -0700583{
Dan Williams6f231dd2011-07-02 22:56:22 -0700584 enum sci_status status = SCI_SUCCESS;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700585 struct isci_request *ireq = sci_req_to_ireq(sci_req);
586 struct sas_task *task = isci_request_access_task(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700587
Dan Williamsf1f52e72011-05-10 02:28:45 -0700588 /* check for management protocols */
589 if (ireq->ttype == tmf_task) {
590 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700591
Dan Williamsf1f52e72011-05-10 02:28:45 -0700592 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
Dan Williams5dec6f42011-05-10 02:28:49 -0700593 tmf->tmf_code == isci_tmf_sata_srst_low) {
594 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
595 sci_req->task_context_buffer);
596 return SCI_SUCCESS;
597 } else {
Dan Williamsf1f52e72011-05-10 02:28:45 -0700598 dev_err(scic_to_dev(sci_req->owning_controller),
599 "%s: Request 0x%p received un-handled SAT "
600 "management protocol 0x%x.\n",
601 __func__, sci_req, tmf->tmf_code);
Dan Williams6f231dd2011-07-02 22:56:22 -0700602
Dan Williamsf1f52e72011-05-10 02:28:45 -0700603 return SCI_FAILURE;
604 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700605 }
606
Dan Williamsf1f52e72011-05-10 02:28:45 -0700607 if (!sas_protocol_ata(task->task_proto)) {
608 dev_err(scic_to_dev(sci_req->owning_controller),
609 "%s: Non-ATA protocol in SATA path: 0x%x\n",
610 __func__,
611 task->task_proto);
Dan Williams6f231dd2011-07-02 22:56:22 -0700612 return SCI_FAILURE;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700613
Dan Williams6f231dd2011-07-02 22:56:22 -0700614 }
615
Dan Williamsf1f52e72011-05-10 02:28:45 -0700616 /* non data */
Dan Williams5dec6f42011-05-10 02:28:49 -0700617 if (task->data_dir == DMA_NONE) {
618 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
619 sci_req->task_context_buffer);
620 return SCI_SUCCESS;
621 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700622
623 /* NCQ */
Dan Williams5dec6f42011-05-10 02:28:49 -0700624 if (task->ata_task.use_ncq) {
625 scic_sds_stp_optimized_request_construct(sci_req,
626 SCU_TASK_TYPE_FPDMAQ_READ,
627 len, dir);
628 return SCI_SUCCESS;
629 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700630
631 /* DMA */
Dan Williams5dec6f42011-05-10 02:28:49 -0700632 if (task->ata_task.dma_xfer) {
633 scic_sds_stp_optimized_request_construct(sci_req,
634 SCU_TASK_TYPE_DMA_IN,
635 len, dir);
636 return SCI_SUCCESS;
637 } else /* PIO */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700638 return scic_sds_stp_pio_request_construct(sci_req, copy);
639
640 return status;
641}
642
643static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
644{
645 struct isci_request *ireq = sci_req_to_ireq(sci_req);
646 struct sas_task *task = isci_request_access_task(ireq);
647
648 sci_req->protocol = SCIC_SSP_PROTOCOL;
649
650 scu_ssp_io_request_construct_task_context(sci_req,
651 task->data_dir,
652 task->total_xfer_len);
653
654 scic_sds_io_request_build_ssp_command_iu(sci_req);
655
Dan Williams5dec6f42011-05-10 02:28:49 -0700656 sci_base_state_machine_change_state(&sci_req->state_machine,
657 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700658
659 return SCI_SUCCESS;
660}
661
662enum sci_status scic_task_request_construct_ssp(
663 struct scic_sds_request *sci_req)
664{
665 /* Construct the SSP Task SCU Task Context */
666 scu_ssp_task_request_construct_task_context(sci_req);
667
668 /* Fill in the SSP Task IU */
669 scic_sds_task_request_build_ssp_task_iu(sci_req);
670
671 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -0700672 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700673
674 return SCI_SUCCESS;
675}
676
Dan Williamsf1f52e72011-05-10 02:28:45 -0700677static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700678{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700679 enum sci_status status;
680 struct scic_sds_stp_request *stp_req;
681 bool copy = false;
682 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
683 struct sas_task *task = isci_request_access_task(isci_request);
Dan Williams6f231dd2011-07-02 22:56:22 -0700684
Dan Williamsf1f52e72011-05-10 02:28:45 -0700685 stp_req = &sci_req->stp.req;
686 sci_req->protocol = SCIC_STP_PROTOCOL;
Dan Williams6f231dd2011-07-02 22:56:22 -0700687
Dan Williamsf1f52e72011-05-10 02:28:45 -0700688 copy = (task->data_dir == DMA_NONE) ? false : true;
Dan Williams6f231dd2011-07-02 22:56:22 -0700689
Dan Williamsf1f52e72011-05-10 02:28:45 -0700690 status = scic_io_request_construct_sata(sci_req,
691 task->total_xfer_len,
692 task->data_dir,
693 copy);
Dan Williams6f231dd2011-07-02 22:56:22 -0700694
Dan Williamsf1f52e72011-05-10 02:28:45 -0700695 if (status == SCI_SUCCESS)
696 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -0700697 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700698
Dan Williamsf1f52e72011-05-10 02:28:45 -0700699 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700700}
701
Dan Williamsf1f52e72011-05-10 02:28:45 -0700702enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700703{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700704 enum sci_status status = SCI_SUCCESS;
705 struct isci_request *ireq = sci_req_to_ireq(sci_req);
Dan Williams6f231dd2011-07-02 22:56:22 -0700706
Dan Williamsf1f52e72011-05-10 02:28:45 -0700707 /* check for management protocols */
708 if (ireq->ttype == tmf_task) {
709 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700710
Dan Williamsf1f52e72011-05-10 02:28:45 -0700711 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
712 tmf->tmf_code == isci_tmf_sata_srst_low) {
Dan Williams5dec6f42011-05-10 02:28:49 -0700713 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
714 sci_req->task_context_buffer);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700715 } else {
716 dev_err(scic_to_dev(sci_req->owning_controller),
717 "%s: Request 0x%p received un-handled SAT "
718 "Protocol 0x%x.\n",
719 __func__, sci_req, tmf->tmf_code);
720
721 return SCI_FAILURE;
722 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700723 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700724
Dan Williams5dec6f42011-05-10 02:28:49 -0700725 if (status != SCI_SUCCESS)
726 return status;
727 sci_base_state_machine_change_state(&sci_req->state_machine,
728 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700729
730 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700731}
732
733/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700734 * sci_req_tx_bytes - bytes transferred when reply underruns request
735 * @sci_req: request that was terminated early
Dan Williams6f231dd2011-07-02 22:56:22 -0700736 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700737#define SCU_TASK_CONTEXT_SRAM 0x200000
738static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700739{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700740 struct scic_sds_controller *scic = sci_req->owning_controller;
741 u32 ret_val = 0;
Dan Williams6f231dd2011-07-02 22:56:22 -0700742
Dan Williamsf1f52e72011-05-10 02:28:45 -0700743 if (readl(&scic->smu_registers->address_modifier) == 0) {
744 void __iomem *scu_reg_base = scic->scu_registers;
Dan Williams6f231dd2011-07-02 22:56:22 -0700745
Dan Williamsf1f52e72011-05-10 02:28:45 -0700746 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
747 * BAR1 is the scu_registers
748 * 0x20002C = 0x200000 + 0x2c
749 * = start of task context SRAM + offset of (type.ssp.data_offset)
750 * TCi is the io_tag of struct scic_sds_request
Dan Williams67ea8382011-05-08 11:47:15 -0700751 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700752 ret_val = readl(scu_reg_base +
753 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
754 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
Dan Williams67ea8382011-05-08 11:47:15 -0700755 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700756
Dan Williamsf1f52e72011-05-10 02:28:45 -0700757 return ret_val;
Dan Williams6f231dd2011-07-02 22:56:22 -0700758}
759
Piotr Sawickif4636a72011-05-10 23:50:32 +0000760enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700761{
Piotr Sawickif4636a72011-05-10 23:50:32 +0000762 struct scic_sds_controller *scic = sci_req->owning_controller;
763 struct scu_task_context *task_context;
764 enum sci_base_request_states state;
765
766 if (sci_req->device_sequence !=
767 scic_sds_remote_device_get_sequence(sci_req->target_device))
Dan Williamsf1f52e72011-05-10 02:28:45 -0700768 return SCI_FAILURE;
769
Piotr Sawickif4636a72011-05-10 23:50:32 +0000770 state = sci_req->state_machine.current_state_id;
771 if (state != SCI_BASE_REQUEST_STATE_CONSTRUCTED) {
772 dev_warn(scic_to_dev(scic),
773 "%s: SCIC IO Request requested to start while in wrong "
774 "state %d\n", __func__, state);
775 return SCI_FAILURE_INVALID_STATE;
776 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700777
Piotr Sawickif4636a72011-05-10 23:50:32 +0000778 /* if necessary, allocate a TCi for the io request object and then will,
779 * if necessary, copy the constructed TC data into the actual TC buffer.
780 * If everything is successful the post context field is updated with
781 * the TCi so the controller can post the request to the hardware.
782 */
783 if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG)
784 sci_req->io_tag = scic_controller_allocate_io_tag(scic);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700785
Piotr Sawickif4636a72011-05-10 23:50:32 +0000786 /* Record the IO Tag in the request */
787 if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
788 task_context = sci_req->task_context_buffer;
789
790 task_context->task_index = scic_sds_io_tag_get_index(sci_req->io_tag);
791
792 switch (task_context->protocol_type) {
793 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
794 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
795 /* SSP/SMP Frame */
796 task_context->type.ssp.tag = sci_req->io_tag;
797 task_context->type.ssp.target_port_transfer_tag =
798 0xFFFF;
799 break;
800
801 case SCU_TASK_CONTEXT_PROTOCOL_STP:
802 /* STP/SATA Frame
803 * task_context->type.stp.ncq_tag = sci_req->ncq_tag;
804 */
805 break;
806
807 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
808 /* / @todo When do we set no protocol type? */
809 break;
810
811 default:
812 /* This should never happen since we build the IO
813 * requests */
814 break;
815 }
816
817 /*
818 * Check to see if we need to copy the task context buffer
819 * or have been building into the task context buffer */
820 if (sci_req->was_tag_assigned_by_user == false)
821 scic_sds_controller_copy_task_context(scic, sci_req);
822
823 /* Add to the post_context the io tag value */
824 sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag);
825
826 /* Everything is good go ahead and change state */
827 sci_base_state_machine_change_state(&sci_req->state_machine,
828 SCI_BASE_REQUEST_STATE_STARTED);
829
830 return SCI_SUCCESS;
831 }
832
833 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700834}
835
836enum sci_status
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700837scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700838{
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700839 enum sci_base_request_states state;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700840
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700841 state = sci_req->state_machine.current_state_id;
842
843 switch (state) {
844 case SCI_BASE_REQUEST_STATE_CONSTRUCTED:
845 scic_sds_request_set_status(sci_req,
846 SCU_TASK_DONE_TASK_ABORT,
847 SCI_FAILURE_IO_TERMINATED);
848
849 sci_base_state_machine_change_state(&sci_req->state_machine,
850 SCI_BASE_REQUEST_STATE_COMPLETED);
851 return SCI_SUCCESS;
852 case SCI_BASE_REQUEST_STATE_STARTED:
853 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION:
854 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE:
855 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION:
856 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
857 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE:
858 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE:
859 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE:
860 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE:
861 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE:
862 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE:
863 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE:
864 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE:
865 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE:
866 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE:
867 sci_base_state_machine_change_state(&sci_req->state_machine,
868 SCI_BASE_REQUEST_STATE_ABORTING);
869 return SCI_SUCCESS;
870 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE:
871 sci_base_state_machine_change_state(&sci_req->state_machine,
872 SCI_BASE_REQUEST_STATE_ABORTING);
873 sci_base_state_machine_change_state(&sci_req->state_machine,
874 SCI_BASE_REQUEST_STATE_COMPLETED);
875 return SCI_SUCCESS;
876 case SCI_BASE_REQUEST_STATE_ABORTING:
877 sci_base_state_machine_change_state(&sci_req->state_machine,
878 SCI_BASE_REQUEST_STATE_COMPLETED);
879 return SCI_SUCCESS;
880 case SCI_BASE_REQUEST_STATE_COMPLETED:
881 default:
882 dev_warn(scic_to_dev(sci_req->owning_controller),
883 "%s: SCIC IO Request requested to abort while in wrong "
884 "state %d\n",
885 __func__,
886 sci_base_state_machine_get_state(&sci_req->state_machine));
887 break;
888 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700889
890 return SCI_FAILURE_INVALID_STATE;
891}
892
893enum sci_status scic_sds_io_request_event_handler(
894 struct scic_sds_request *request,
895 u32 event_code)
896{
897 if (request->state_handlers->event_handler)
898 return request->state_handlers->event_handler(request, event_code);
899
900 dev_warn(scic_to_dev(request->owning_controller),
901 "%s: SCIC IO Request given event code notification %x while "
902 "in wrong state %d\n",
903 __func__,
904 event_code,
905 sci_base_state_machine_get_state(&request->state_machine));
906
907 return SCI_FAILURE_INVALID_STATE;
908}
909
Dan Williamsf1f52e72011-05-10 02:28:45 -0700910/*
911 * This function copies response data for requests returning response data
912 * instead of sense data.
913 * @sci_req: This parameter specifies the request object for which to copy
914 * the response data.
915 */
Dan Williamsf1393032011-05-10 02:28:47 -0700916static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700917{
918 void *resp_buf;
919 u32 len;
920 struct ssp_response_iu *ssp_response;
921 struct isci_request *ireq = sci_req_to_ireq(sci_req);
922 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
923
924 ssp_response = &sci_req->ssp.rsp;
925
926 resp_buf = &isci_tmf->resp.resp_iu;
927
928 len = min_t(u32,
929 SSP_RESP_IU_MAX_SIZE,
930 be32_to_cpu(ssp_response->response_data_len));
931
932 memcpy(resp_buf, ssp_response->resp_data, len);
933}
934
935/*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700936 * scic_sds_request_started_state_tc_completion_handler() - This method process
937 * TC (task context) completions for normal IO request (i.e. Task/Abort
938 * Completions of type 0). This method will update the
939 * SCIC_SDS_IO_REQUEST_T::status field.
940 * @sci_req: This parameter specifies the request for which a completion
941 * occurred.
942 * @completion_code: This parameter specifies the completion code received from
943 * the SCU.
944 *
945 */
946static enum sci_status
947scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req,
948 u32 completion_code)
949{
950 u8 datapres;
951 struct ssp_response_iu *resp_iu;
952
953 /*
954 * TODO: Any SDMA return code of other than 0 is bad
955 * decode 0x003C0000 to determine SDMA status
956 */
957 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
958 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
959 scic_sds_request_set_status(sci_req,
960 SCU_TASK_DONE_GOOD,
961 SCI_SUCCESS);
962 break;
963
964 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
965 {
966 /*
967 * There are times when the SCU hardware will return an early
968 * response because the io request specified more data than is
969 * returned by the target device (mode pages, inquiry data,
970 * etc.). We must check the response stats to see if this is
971 * truly a failed request or a good request that just got
972 * completed early.
973 */
974 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
975 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
976
977 sci_swab32_cpy(&sci_req->ssp.rsp,
978 &sci_req->ssp.rsp,
979 word_cnt);
980
981 if (resp->status == 0) {
982 scic_sds_request_set_status(
983 sci_req,
984 SCU_TASK_DONE_GOOD,
985 SCI_SUCCESS_IO_DONE_EARLY);
986 } else {
987 scic_sds_request_set_status(
988 sci_req,
989 SCU_TASK_DONE_CHECK_RESPONSE,
990 SCI_FAILURE_IO_RESPONSE_VALID);
991 }
992 }
993 break;
994
995 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
996 {
997 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
998
999 sci_swab32_cpy(&sci_req->ssp.rsp,
1000 &sci_req->ssp.rsp,
1001 word_cnt);
1002
1003 scic_sds_request_set_status(sci_req,
1004 SCU_TASK_DONE_CHECK_RESPONSE,
1005 SCI_FAILURE_IO_RESPONSE_VALID);
1006 break;
1007 }
1008
1009 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1010 /*
1011 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
1012 * guaranteed to be received before this completion status is
1013 * posted?
1014 */
1015 resp_iu = &sci_req->ssp.rsp;
1016 datapres = resp_iu->datapres;
1017
1018 if ((datapres == 0x01) || (datapres == 0x02)) {
1019 scic_sds_request_set_status(
1020 sci_req,
1021 SCU_TASK_DONE_CHECK_RESPONSE,
1022 SCI_FAILURE_IO_RESPONSE_VALID);
1023 } else
1024 scic_sds_request_set_status(
1025 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1026 break;
1027
1028 /* only stp device gets suspended. */
1029 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1030 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1031 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1032 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1034 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1036 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1037 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1038 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1039 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1040 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
1041 scic_sds_request_set_status(
1042 sci_req,
1043 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1044 SCU_COMPLETION_TL_STATUS_SHIFT,
1045 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1046 } else {
1047 scic_sds_request_set_status(
1048 sci_req,
1049 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1050 SCU_COMPLETION_TL_STATUS_SHIFT,
1051 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1052 }
1053 break;
1054
1055 /* both stp/ssp device gets suspended */
1056 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1057 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1058 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1059 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1060 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1061 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1062 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1063 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1066 scic_sds_request_set_status(
1067 sci_req,
1068 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1069 SCU_COMPLETION_TL_STATUS_SHIFT,
1070 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1071 break;
1072
1073 /* neither ssp nor stp gets suspended. */
1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1076 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1077 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1078 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1079 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1080 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1081 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1082 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1083 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1084 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1085 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1086 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1089 default:
1090 scic_sds_request_set_status(
1091 sci_req,
1092 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1093 SCU_COMPLETION_TL_STATUS_SHIFT,
1094 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1095 break;
1096 }
1097
1098 /*
1099 * TODO: This is probably wrong for ACK/NAK timeout conditions
1100 */
1101
1102 /* In all cases we will treat this as the completion of the IO req. */
Dan Williams5dec6f42011-05-10 02:28:49 -07001103 sci_base_state_machine_change_state(&sci_req->state_machine,
1104 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001105 return SCI_SUCCESS;
1106}
1107
1108enum sci_status
1109scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
1110{
Dan Williams5dec6f42011-05-10 02:28:49 -07001111 if (request->state_handlers->tc_completion_handler)
Dan Williamsf1f52e72011-05-10 02:28:45 -07001112 return request->state_handlers->tc_completion_handler(request, completion_code);
1113
1114 dev_warn(scic_to_dev(request->owning_controller),
1115 "%s: SCIC IO Request given task completion notification %x "
1116 "while in wrong state %d\n",
1117 __func__,
1118 completion_code,
1119 sci_base_state_machine_get_state(&request->state_machine));
1120
1121 return SCI_FAILURE_INVALID_STATE;
Dan Williamsf1f52e72011-05-10 02:28:45 -07001122}
1123
1124/*
1125 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
Dan Williamsf1f52e72011-05-10 02:28:45 -07001126 * object receives a scic_sds_request_complete() request. This method frees up
1127 * any io request resources that have been allocated and transitions the
1128 * request to its final state. Consider stopping the state machine instead of
1129 * transitioning to the final state? enum sci_status SCI_SUCCESS
1130 */
1131static enum sci_status scic_sds_request_completed_state_complete_handler(
1132 struct scic_sds_request *request)
1133{
1134 if (request->was_tag_assigned_by_user != true) {
1135 scic_controller_free_io_tag(
1136 request->owning_controller, request->io_tag);
1137 }
1138
1139 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1140 scic_sds_controller_release_frame(
1141 request->owning_controller, request->saved_rx_frame_index);
1142 }
1143
1144 sci_base_state_machine_change_state(&request->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001145 SCI_BASE_REQUEST_STATE_FINAL);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001146 return SCI_SUCCESS;
1147}
1148
1149/*
Dan Williamsf1f52e72011-05-10 02:28:45 -07001150 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1151 * object receives a scic_sds_request_task_completion() request. This method
1152 * decodes the completion type waiting for the abort task complete
1153 * notification. When the abort task complete is received the io request
1154 * transitions to the completed state. enum sci_status SCI_SUCCESS
1155 */
1156static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1157 struct scic_sds_request *sci_req,
1158 u32 completion_code)
1159{
1160 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1161 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1162 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1163 scic_sds_request_set_status(
1164 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
1165 );
1166
1167 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001168 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001169 break;
1170
1171 default:
1172 /*
1173 * Unless we get some strange error wait for the task abort to complete
1174 * TODO: Should there be a state change for this completion? */
1175 break;
1176 }
1177
1178 return SCI_SUCCESS;
1179}
1180
Dan Williamsf1393032011-05-10 02:28:47 -07001181/**
1182 * This method processes the completions transport layer (TL) status to
1183 * determine if the RAW task management frame was sent successfully. If the
1184 * raw frame was sent successfully, then the state for the task request
1185 * transitions to waiting for a response frame.
1186 * @sci_req: This parameter specifies the request for which the TC
1187 * completion was received.
1188 * @completion_code: This parameter indicates the completion status information
1189 * for the TC.
1190 *
1191 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1192 * this method always returns success.
1193 */
1194static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler(
1195 struct scic_sds_request *sci_req,
1196 u32 completion_code)
1197{
1198 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1199 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1200 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1201 SCI_SUCCESS);
1202
1203 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001204 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
Dan Williamsf1393032011-05-10 02:28:47 -07001205 break;
1206
1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1208 /*
1209 * Currently, the decision is to simply allow the task request to
1210 * timeout if the task IU wasn't received successfully.
1211 * There is a potential for receiving multiple task responses if we
1212 * decide to send the task IU again. */
1213 dev_warn(scic_to_dev(sci_req->owning_controller),
1214 "%s: TaskRequest:0x%p CompletionCode:%x - "
1215 "ACK/NAK timeout\n",
1216 __func__,
1217 sci_req,
1218 completion_code);
1219
1220 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001221 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
Dan Williamsf1393032011-05-10 02:28:47 -07001222 break;
1223
1224 default:
1225 /*
1226 * All other completion status cause the IO to be complete. If a NAK
1227 * was received, then it is up to the user to retry the request. */
1228 scic_sds_request_set_status(
1229 sci_req,
1230 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1231 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1232 );
1233
1234 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001235 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsf1393032011-05-10 02:28:47 -07001236 break;
1237 }
1238
1239 return SCI_SUCCESS;
1240}
1241
1242/**
Dan Williamsc72086e2011-05-10 02:28:48 -07001243 * This method processes an abnormal TC completion while the SMP request is
1244 * waiting for a response frame. It decides what happened to the IO based
1245 * on TC completion status.
1246 * @sci_req: This parameter specifies the request for which the TC
1247 * completion was received.
1248 * @completion_code: This parameter indicates the completion status information
1249 * for the TC.
1250 *
1251 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1252 * this method always returns success.
1253 */
1254static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler(
1255 struct scic_sds_request *sci_req,
1256 u32 completion_code)
1257{
1258 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1259 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1260 /*
1261 * In the AWAIT RESPONSE state, any TC completion is unexpected.
1262 * but if the TC has success status, we complete the IO anyway. */
Dan Williams5dec6f42011-05-10 02:28:49 -07001263 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1264 SCI_SUCCESS);
Dan Williamsc72086e2011-05-10 02:28:48 -07001265
Dan Williams5dec6f42011-05-10 02:28:49 -07001266 sci_base_state_machine_change_state(&sci_req->state_machine,
1267 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001268 break;
1269
1270 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1271 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1272 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1273 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1274 /*
1275 * These status has been seen in a specific LSI expander, which sometimes
1276 * is not able to send smp response within 2 ms. This causes our hardware
1277 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR
1278 * status. For these type of error, we ask scic user to retry the request. */
Dan Williams5dec6f42011-05-10 02:28:49 -07001279 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1280 SCI_FAILURE_RETRY_REQUIRED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001281
Dan Williams5dec6f42011-05-10 02:28:49 -07001282 sci_base_state_machine_change_state(&sci_req->state_machine,
1283 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001284 break;
1285
1286 default:
1287 /*
1288 * All other completion status cause the IO to be complete. If a NAK
1289 * was received, then it is up to the user to retry the request. */
1290 scic_sds_request_set_status(
1291 sci_req,
1292 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1293 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1294 );
1295
Dan Williams5dec6f42011-05-10 02:28:49 -07001296 sci_base_state_machine_change_state(&sci_req->state_machine,
1297 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001298 break;
1299 }
1300
1301 return SCI_SUCCESS;
1302}
1303
Dan Williamsc72086e2011-05-10 02:28:48 -07001304/**
1305 * This method processes the completions transport layer (TL) status to
1306 * determine if the SMP request was sent successfully. If the SMP request
1307 * was sent successfully, then the state for the SMP request transits to
1308 * waiting for a response frame.
1309 * @sci_req: This parameter specifies the request for which the TC
1310 * completion was received.
1311 * @completion_code: This parameter indicates the completion status information
1312 * for the TC.
1313 *
1314 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1315 * this method always returns success.
1316 */
1317static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler(
1318 struct scic_sds_request *sci_req,
1319 u32 completion_code)
1320{
1321 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1322 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5dec6f42011-05-10 02:28:49 -07001323 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1324 SCI_SUCCESS);
Dan Williamsc72086e2011-05-10 02:28:48 -07001325
Dan Williams5dec6f42011-05-10 02:28:49 -07001326 sci_base_state_machine_change_state(&sci_req->state_machine,
1327 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001328 break;
1329
1330 default:
1331 /*
1332 * All other completion status cause the IO to be complete. If a NAK
1333 * was received, then it is up to the user to retry the request. */
1334 scic_sds_request_set_status(
1335 sci_req,
1336 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1337 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1338 );
1339
1340 sci_base_state_machine_change_state(
1341 &sci_req->state_machine,
1342 SCI_BASE_REQUEST_STATE_COMPLETED);
1343 break;
1344 }
1345
1346 return SCI_SUCCESS;
1347}
1348
Dan Williams5dec6f42011-05-10 02:28:49 -07001349void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1350 u16 ncq_tag)
1351{
1352 /**
1353 * @note This could be made to return an error to the user if the user
1354 * attempts to set the NCQ tag in the wrong state.
1355 */
1356 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1357}
1358
1359/**
1360 *
1361 * @sci_req:
1362 *
1363 * Get the next SGL element from the request. - Check on which SGL element pair
1364 * we are working - if working on SLG pair element A - advance to element B -
1365 * else - check to see if there are more SGL element pairs for this IO request
1366 * - if there are more SGL element pairs - advance to the next pair and return
1367 * element A struct scu_sgl_element*
1368 */
1369static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1370{
1371 struct scu_sgl_element *current_sgl;
1372 struct scic_sds_request *sci_req = to_sci_req(stp_req);
1373 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1374
1375 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1376 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1377 pio_sgl->sgl_pair->B.address_upper == 0) {
1378 current_sgl = NULL;
1379 } else {
1380 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1381 current_sgl = &pio_sgl->sgl_pair->B;
1382 }
1383 } else {
1384 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1385 pio_sgl->sgl_pair->next_pair_upper == 0) {
1386 current_sgl = NULL;
1387 } else {
1388 u64 phys_addr;
1389
1390 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1391 phys_addr <<= 32;
1392 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1393
1394 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1395 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1396 current_sgl = &pio_sgl->sgl_pair->A;
1397 }
1398 }
1399
1400 return current_sgl;
1401}
1402
1403/**
1404 *
1405 * @sci_req:
1406 * @completion_code:
1407 *
1408 * This method processes a TC completion. The expected TC completion is for
1409 * the transmission of the H2D register FIS containing the SATA/STP non-data
1410 * request. This method always successfully processes the TC completion.
1411 * SCI_SUCCESS This value is always returned.
1412 */
1413static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
1414 struct scic_sds_request *sci_req,
1415 u32 completion_code)
1416{
1417 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1418 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1419 scic_sds_request_set_status(
1420 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1421 );
1422
1423 sci_base_state_machine_change_state(
1424 &sci_req->state_machine,
1425 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
1426 );
1427 break;
1428
1429 default:
1430 /*
1431 * All other completion status cause the IO to be complete. If a NAK
1432 * was received, then it is up to the user to retry the request. */
1433 scic_sds_request_set_status(
1434 sci_req,
1435 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1436 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1437 );
1438
1439 sci_base_state_machine_change_state(
1440 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1441 break;
1442 }
1443
1444 return SCI_SUCCESS;
1445}
1446
Dan Williams5dec6f42011-05-10 02:28:49 -07001447#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1448
1449/* transmit DATA_FIS from (current sgl + offset) for input
1450 * parameter length. current sgl and offset is alreay stored in the IO request
1451 */
1452static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1453 struct scic_sds_request *sci_req,
1454 u32 length)
1455{
1456 struct scic_sds_controller *scic = sci_req->owning_controller;
1457 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1458 struct scu_task_context *task_context;
1459 struct scu_sgl_element *current_sgl;
1460
1461 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1462 * for the data from current_sgl+offset for the input length
1463 */
1464 task_context = scic_sds_controller_get_task_context_buffer(scic,
1465 sci_req->io_tag);
1466
1467 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1468 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1469 else
1470 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1471
1472 /* update the TC */
1473 task_context->command_iu_upper = current_sgl->address_upper;
1474 task_context->command_iu_lower = current_sgl->address_lower;
1475 task_context->transfer_length_bytes = length;
1476 task_context->type.stp.fis_type = FIS_DATA;
1477
1478 /* send the new TC out. */
1479 return scic_controller_continue_io(sci_req);
1480}
1481
1482static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1483{
1484
1485 struct scu_sgl_element *current_sgl;
1486 u32 sgl_offset;
1487 u32 remaining_bytes_in_current_sgl = 0;
1488 enum sci_status status = SCI_SUCCESS;
1489 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1490
1491 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1492
1493 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1494 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1495 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1496 } else {
1497 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1498 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1499 }
1500
1501
1502 if (stp_req->type.pio.pio_transfer_bytes > 0) {
1503 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1504 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1505 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1506 if (status == SCI_SUCCESS) {
1507 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1508
1509 /* update the current sgl, sgl_offset and save for future */
1510 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1511 sgl_offset = 0;
1512 }
1513 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1514 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1515 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1516
1517 if (status == SCI_SUCCESS) {
1518 /* Sgl offset will be adjusted and saved for future */
1519 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1520 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1521 stp_req->type.pio.pio_transfer_bytes = 0;
1522 }
1523 }
1524 }
1525
1526 if (status == SCI_SUCCESS) {
1527 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1528 }
1529
1530 return status;
1531}
1532
1533/**
1534 *
1535 * @stp_request: The request that is used for the SGL processing.
1536 * @data_buffer: The buffer of data to be copied.
1537 * @length: The length of the data transfer.
1538 *
1539 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1540 * specified data region. enum sci_status
1541 */
1542static enum sci_status
1543scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1544 u8 *data_buf, u32 len)
1545{
1546 struct scic_sds_request *sci_req;
1547 struct isci_request *ireq;
1548 u8 *src_addr;
1549 int copy_len;
1550 struct sas_task *task;
1551 struct scatterlist *sg;
1552 void *kaddr;
1553 int total_len = len;
1554
1555 sci_req = to_sci_req(stp_req);
1556 ireq = sci_req_to_ireq(sci_req);
1557 task = isci_request_access_task(ireq);
1558 src_addr = data_buf;
1559
1560 if (task->num_scatter > 0) {
1561 sg = task->scatter;
1562
1563 while (total_len > 0) {
1564 struct page *page = sg_page(sg);
1565
1566 copy_len = min_t(int, total_len, sg_dma_len(sg));
1567 kaddr = kmap_atomic(page, KM_IRQ0);
1568 memcpy(kaddr + sg->offset, src_addr, copy_len);
1569 kunmap_atomic(kaddr, KM_IRQ0);
1570 total_len -= copy_len;
1571 src_addr += copy_len;
1572 sg = sg_next(sg);
1573 }
1574 } else {
1575 BUG_ON(task->total_xfer_len < total_len);
1576 memcpy(task->scatter, src_addr, total_len);
1577 }
1578
1579 return SCI_SUCCESS;
1580}
1581
1582/**
1583 *
1584 * @sci_req: The PIO DATA IN request that is to receive the data.
1585 * @data_buffer: The buffer to copy from.
1586 *
1587 * Copy the data buffer to the io request data region. enum sci_status
1588 */
1589static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1590 struct scic_sds_stp_request *sci_req,
1591 u8 *data_buffer)
1592{
1593 enum sci_status status;
1594
1595 /*
1596 * If there is less than 1K remaining in the transfer request
1597 * copy just the data for the transfer */
1598 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1599 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1600 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1601
1602 if (status == SCI_SUCCESS)
1603 sci_req->type.pio.pio_transfer_bytes = 0;
1604 } else {
1605 /* We are transfering the whole frame so copy */
1606 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1607 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1608
1609 if (status == SCI_SUCCESS)
1610 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1611 }
1612
1613 return status;
1614}
1615
1616/**
1617 *
1618 * @sci_req:
1619 * @completion_code:
1620 *
1621 * enum sci_status
1622 */
1623static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
1624 struct scic_sds_request *sci_req,
1625 u32 completion_code)
1626{
1627 enum sci_status status = SCI_SUCCESS;
1628
1629 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1630 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1631 scic_sds_request_set_status(
1632 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1633 );
1634
1635 sci_base_state_machine_change_state(
1636 &sci_req->state_machine,
1637 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1638 );
1639 break;
1640
1641 default:
1642 /*
1643 * All other completion status cause the IO to be complete. If a NAK
1644 * was received, then it is up to the user to retry the request. */
1645 scic_sds_request_set_status(
1646 sci_req,
1647 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1648 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1649 );
1650
1651 sci_base_state_machine_change_state(
1652 &sci_req->state_machine,
1653 SCI_BASE_REQUEST_STATE_COMPLETED
1654 );
1655 break;
1656 }
1657
1658 return status;
1659}
1660
Dan Williams5dec6f42011-05-10 02:28:49 -07001661static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
1662
1663 struct scic_sds_request *sci_req,
1664 u32 completion_code)
1665{
1666 enum sci_status status = SCI_SUCCESS;
1667 bool all_frames_transferred = false;
1668 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1669
1670 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1671 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1672 /* Transmit data */
1673 if (stp_req->type.pio.pio_transfer_bytes != 0) {
1674 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1675 if (status == SCI_SUCCESS) {
1676 if (stp_req->type.pio.pio_transfer_bytes == 0)
1677 all_frames_transferred = true;
1678 }
1679 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1680 /*
1681 * this will happen if the all data is written at the
1682 * first time after the pio setup fis is received
1683 */
1684 all_frames_transferred = true;
1685 }
1686
1687 /* all data transferred. */
1688 if (all_frames_transferred) {
1689 /*
1690 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
1691 * and wait for PIO_SETUP fis / or D2H REg fis. */
1692 sci_base_state_machine_change_state(
1693 &sci_req->state_machine,
1694 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1695 );
1696 }
1697 break;
1698
1699 default:
1700 /*
1701 * All other completion status cause the IO to be complete. If a NAK
1702 * was received, then it is up to the user to retry the request. */
1703 scic_sds_request_set_status(
1704 sci_req,
1705 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1706 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1707 );
1708
1709 sci_base_state_machine_change_state(
1710 &sci_req->state_machine,
1711 SCI_BASE_REQUEST_STATE_COMPLETED
1712 );
1713 break;
1714 }
1715
1716 return status;
1717}
1718
1719/**
1720 *
1721 * @request: This is the request which is receiving the event.
1722 * @event_code: This is the event code that the request on which the request is
1723 * expected to take action.
1724 *
1725 * This method will handle any link layer events while waiting for the data
1726 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
1727 */
1728static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
1729 struct scic_sds_request *request,
1730 u32 event_code)
1731{
1732 enum sci_status status;
1733
1734 switch (scu_get_event_specifier(event_code)) {
1735 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
1736 /*
1737 * We are waiting for data and the SCU has R_ERR the data frame.
1738 * Go back to waiting for the D2H Register FIS */
1739 sci_base_state_machine_change_state(
1740 &request->state_machine,
1741 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1742 );
1743
1744 status = SCI_SUCCESS;
1745 break;
1746
1747 default:
1748 dev_err(scic_to_dev(request->owning_controller),
1749 "%s: SCIC PIO Request 0x%p received unexpected "
1750 "event 0x%08x\n",
1751 __func__, request, event_code);
1752
1753 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1754 status = SCI_FAILURE;
1755 break;
1756 }
1757
1758 return status;
1759}
1760
1761static void scic_sds_stp_request_udma_complete_request(
1762 struct scic_sds_request *request,
1763 u32 scu_status,
1764 enum sci_status sci_status)
1765{
1766 scic_sds_request_set_status(request, scu_status, sci_status);
1767 sci_base_state_machine_change_state(&request->state_machine,
1768 SCI_BASE_REQUEST_STATE_COMPLETED);
1769}
1770
1771static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1772 u32 frame_index)
1773{
1774 struct scic_sds_controller *scic = sci_req->owning_controller;
1775 struct dev_to_host_fis *frame_header;
1776 enum sci_status status;
1777 u32 *frame_buffer;
1778
1779 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1780 frame_index,
1781 (void **)&frame_header);
1782
1783 if ((status == SCI_SUCCESS) &&
1784 (frame_header->fis_type == FIS_REGD2H)) {
1785 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1786 frame_index,
1787 (void **)&frame_buffer);
1788
1789 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1790 frame_header,
1791 frame_buffer);
1792 }
1793
1794 scic_sds_controller_release_frame(scic, frame_index);
1795
1796 return status;
1797}
1798
Dan Williamsd1c637c32011-05-11 08:27:47 -07001799enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1800 u32 frame_index)
1801{
1802 struct scic_sds_controller *scic = sci_req->owning_controller;
1803 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1804 enum sci_base_request_states state;
1805 enum sci_status status;
1806 ssize_t word_cnt;
1807
1808 state = sci_req->state_machine.current_state_id;
1809 switch (state) {
1810 case SCI_BASE_REQUEST_STATE_STARTED: {
1811 struct ssp_frame_hdr ssp_hdr;
1812 void *frame_header;
1813
1814 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1815 frame_index,
1816 &frame_header);
1817
1818 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1819 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1820
1821 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1822 struct ssp_response_iu *resp_iu;
1823 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1824
1825 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1826 frame_index,
1827 (void **)&resp_iu);
1828
1829 sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt);
1830
1831 resp_iu = &sci_req->ssp.rsp;
1832
1833 if (resp_iu->datapres == 0x01 ||
1834 resp_iu->datapres == 0x02) {
1835 scic_sds_request_set_status(sci_req,
1836 SCU_TASK_DONE_CHECK_RESPONSE,
1837 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1838 } else
1839 scic_sds_request_set_status(sci_req,
1840 SCU_TASK_DONE_GOOD,
1841 SCI_SUCCESS);
1842 } else {
1843 /* not a response frame, why did it get forwarded? */
1844 dev_err(scic_to_dev(scic),
1845 "%s: SCIC IO Request 0x%p received unexpected "
1846 "frame %d type 0x%02x\n", __func__, sci_req,
1847 frame_index, ssp_hdr.frame_type);
1848 }
1849
1850 /*
1851 * In any case we are done with this frame buffer return it to the
1852 * controller
1853 */
1854 scic_sds_controller_release_frame(scic, frame_index);
1855
1856 return SCI_SUCCESS;
1857 }
1858 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE:
1859 scic_sds_io_request_copy_response(sci_req);
1860 sci_base_state_machine_change_state(&sci_req->state_machine,
1861 SCI_BASE_REQUEST_STATE_COMPLETED);
1862 scic_sds_controller_release_frame(scic,frame_index);
1863 return SCI_SUCCESS;
1864 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: {
1865 struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1866 void *frame_header;
1867
1868 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1869 frame_index,
1870 &frame_header);
1871
1872 /* byte swap the header. */
1873 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1874 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1875
1876 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1877 void *smp_resp;
1878
1879 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1880 frame_index,
1881 &smp_resp);
1882
1883 word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
1884 sizeof(u32);
1885
1886 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1887 smp_resp, word_cnt);
1888
1889 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1890 SCI_SUCCESS);
1891
1892 sci_base_state_machine_change_state(&sci_req->state_machine,
1893 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
1894 } else {
1895 /* This was not a response frame why did it get forwarded? */
1896 dev_err(scic_to_dev(scic),
1897 "%s: SCIC SMP Request 0x%p received unexpected frame "
1898 "%d type 0x%02x\n", __func__, sci_req,
1899 frame_index, rsp_hdr->frame_type);
1900
1901 scic_sds_request_set_status(sci_req,
1902 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1903 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1904
1905 sci_base_state_machine_change_state(&sci_req->state_machine,
1906 SCI_BASE_REQUEST_STATE_COMPLETED);
1907 }
1908
1909 scic_sds_controller_release_frame(scic, frame_index);
1910
1911 return SCI_SUCCESS;
1912 }
1913 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
1914 return scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1915 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE:
1916 /* Use the general frame handler to copy the resposne data */
1917 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1918
1919 if (status != SCI_SUCCESS)
1920 return status;
1921
1922 scic_sds_stp_request_udma_complete_request(sci_req,
1923 SCU_TASK_DONE_CHECK_RESPONSE,
1924 SCI_FAILURE_IO_RESPONSE_VALID);
1925 return SCI_SUCCESS;
1926 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: {
1927 struct dev_to_host_fis *frame_header;
1928 u32 *frame_buffer;
1929
1930 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1931 frame_index,
1932 (void **)&frame_header);
1933
1934 if (status != SCI_SUCCESS) {
1935 dev_err(scic_to_dev(scic),
1936 "%s: SCIC IO Request 0x%p could not get frame header "
1937 "for frame index %d, status %x\n",
1938 __func__, stp_req, frame_index, status);
1939
1940 return status;
1941 }
1942
1943 switch (frame_header->fis_type) {
1944 case FIS_REGD2H:
1945 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1946 frame_index,
1947 (void **)&frame_buffer);
1948
1949 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1950 frame_header,
1951 frame_buffer);
1952
1953 /* The command has completed with error */
1954 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1955 SCI_FAILURE_IO_RESPONSE_VALID);
1956 break;
1957
1958 default:
1959 dev_warn(scic_to_dev(scic),
1960 "%s: IO Request:0x%p Frame Id:%d protocol "
1961 "violation occurred\n", __func__, stp_req,
1962 frame_index);
1963
1964 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1965 SCI_FAILURE_PROTOCOL_VIOLATION);
1966 break;
1967 }
1968
1969 sci_base_state_machine_change_state(&sci_req->state_machine,
1970 SCI_BASE_REQUEST_STATE_COMPLETED);
1971
1972 /* Frame has been decoded return it to the controller */
1973 scic_sds_controller_release_frame(scic, frame_index);
1974
1975 return status;
1976 }
1977 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: {
1978 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1979 struct sas_task *task = isci_request_access_task(ireq);
1980 struct dev_to_host_fis *frame_header;
1981 u32 *frame_buffer;
1982
1983 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1984 frame_index,
1985 (void **)&frame_header);
1986
1987 if (status != SCI_SUCCESS) {
1988 dev_err(scic_to_dev(scic),
1989 "%s: SCIC IO Request 0x%p could not get frame header "
1990 "for frame index %d, status %x\n",
1991 __func__, stp_req, frame_index, status);
1992 return status;
1993 }
1994
1995 switch (frame_header->fis_type) {
1996 case FIS_PIO_SETUP:
1997 /* Get from the frame buffer the PIO Setup Data */
1998 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1999 frame_index,
2000 (void **)&frame_buffer);
2001
2002 /* Get the data from the PIO Setup The SCU Hardware returns
2003 * first word in the frame_header and the rest of the data is in
2004 * the frame buffer so we need to back up one dword
2005 */
2006
2007 /* transfer_count: first 16bits in the 4th dword */
2008 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
2009
2010 /* ending_status: 4th byte in the 3rd dword */
2011 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
2012
2013 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2014 frame_header,
2015 frame_buffer);
2016
2017 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
2018
2019 /* The next state is dependent on whether the
2020 * request was PIO Data-in or Data out
2021 */
2022 if (task->data_dir == DMA_FROM_DEVICE) {
2023 sci_base_state_machine_change_state(&sci_req->state_machine,
2024 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
2025 } else if (task->data_dir == DMA_TO_DEVICE) {
2026 /* Transmit data */
2027 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2028 if (status != SCI_SUCCESS)
2029 break;
2030 sci_base_state_machine_change_state(&sci_req->state_machine,
2031 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
2032 }
2033 break;
2034 case FIS_SETDEVBITS:
2035 sci_base_state_machine_change_state(&sci_req->state_machine,
2036 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2037 break;
2038 case FIS_REGD2H:
2039 if (frame_header->status & ATA_BUSY) {
2040 /* Now why is the drive sending a D2H Register FIS when
2041 * it is still busy? Do nothing since we are still in
2042 * the right state.
2043 */
2044 dev_dbg(scic_to_dev(scic),
2045 "%s: SCIC PIO Request 0x%p received "
2046 "D2H Register FIS with BSY status "
2047 "0x%x\n", __func__, stp_req,
2048 frame_header->status);
2049 break;
2050 }
2051
2052 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2053 frame_index,
2054 (void **)&frame_buffer);
2055
2056 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
2057 frame_header,
2058 frame_buffer);
2059
2060 scic_sds_request_set_status(sci_req,
2061 SCU_TASK_DONE_CHECK_RESPONSE,
2062 SCI_FAILURE_IO_RESPONSE_VALID);
2063
2064 sci_base_state_machine_change_state(&sci_req->state_machine,
2065 SCI_BASE_REQUEST_STATE_COMPLETED);
2066 break;
2067 default:
2068 /* FIXME: what do we do here? */
2069 break;
2070 }
2071
2072 /* Frame is decoded return it to the controller */
2073 scic_sds_controller_release_frame(scic, frame_index);
2074
2075 return status;
2076 }
2077 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: {
2078 struct dev_to_host_fis *frame_header;
2079 struct sata_fis_data *frame_buffer;
2080
2081 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2082 frame_index,
2083 (void **)&frame_header);
2084
2085 if (status != SCI_SUCCESS) {
2086 dev_err(scic_to_dev(scic),
2087 "%s: SCIC IO Request 0x%p could not get frame header "
2088 "for frame index %d, status %x\n",
2089 __func__, stp_req, frame_index, status);
2090 return status;
2091 }
2092
2093 if (frame_header->fis_type != FIS_DATA) {
2094 dev_err(scic_to_dev(scic),
2095 "%s: SCIC PIO Request 0x%p received frame %d "
2096 "with fis type 0x%02x when expecting a data "
2097 "fis.\n", __func__, stp_req, frame_index,
2098 frame_header->fis_type);
2099
2100 scic_sds_request_set_status(sci_req,
2101 SCU_TASK_DONE_GOOD,
2102 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
2103
2104 sci_base_state_machine_change_state(&sci_req->state_machine,
2105 SCI_BASE_REQUEST_STATE_COMPLETED);
2106
2107 /* Frame is decoded return it to the controller */
2108 scic_sds_controller_release_frame(scic, frame_index);
2109 return status;
2110 }
2111
2112 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
2113 sci_req->saved_rx_frame_index = frame_index;
2114 stp_req->type.pio.pio_transfer_bytes = 0;
2115 } else {
2116 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2117 frame_index,
2118 (void **)&frame_buffer);
2119
2120 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
2121 (u8 *)frame_buffer);
2122
2123 /* Frame is decoded return it to the controller */
2124 scic_sds_controller_release_frame(scic, frame_index);
2125 }
2126
2127 /* Check for the end of the transfer, are there more
2128 * bytes remaining for this data transfer
2129 */
2130 if (status != SCI_SUCCESS ||
2131 stp_req->type.pio.pio_transfer_bytes != 0)
2132 return status;
2133
2134 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
2135 scic_sds_request_set_status(sci_req,
2136 SCU_TASK_DONE_CHECK_RESPONSE,
2137 SCI_FAILURE_IO_RESPONSE_VALID);
2138
2139 sci_base_state_machine_change_state(&sci_req->state_machine,
2140 SCI_BASE_REQUEST_STATE_COMPLETED);
2141 } else {
2142 sci_base_state_machine_change_state(&sci_req->state_machine,
2143 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2144 }
2145 return status;
2146 }
2147 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: {
2148 struct dev_to_host_fis *frame_header;
2149 u32 *frame_buffer;
2150
2151 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2152 frame_index,
2153 (void **)&frame_header);
2154 if (status != SCI_SUCCESS) {
2155 dev_err(scic_to_dev(scic),
2156 "%s: SCIC IO Request 0x%p could not get frame header "
2157 "for frame index %d, status %x\n",
2158 __func__, stp_req, frame_index, status);
2159 return status;
2160 }
2161
2162 switch (frame_header->fis_type) {
2163 case FIS_REGD2H:
2164 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2165 frame_index,
2166 (void **)&frame_buffer);
2167
2168 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2169 frame_header,
2170 frame_buffer);
2171
2172 /* The command has completed with error */
2173 scic_sds_request_set_status(sci_req,
2174 SCU_TASK_DONE_CHECK_RESPONSE,
2175 SCI_FAILURE_IO_RESPONSE_VALID);
2176 break;
2177 default:
2178 dev_warn(scic_to_dev(scic),
2179 "%s: IO Request:0x%p Frame Id:%d protocol "
2180 "violation occurred\n", __func__, stp_req,
2181 frame_index);
2182
2183 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
2184 SCI_FAILURE_PROTOCOL_VIOLATION);
2185 break;
2186 }
2187
2188 sci_base_state_machine_change_state(&sci_req->state_machine,
2189 SCI_BASE_REQUEST_STATE_COMPLETED);
2190
2191 /* Frame has been decoded return it to the controller */
2192 scic_sds_controller_release_frame(scic, frame_index);
2193
2194 return status;
2195 }
2196 case SCI_BASE_REQUEST_STATE_ABORTING:
2197 /* TODO: Is it even possible to get an unsolicited frame in the
2198 * aborting state?
2199 */
2200 scic_sds_controller_release_frame(scic, frame_index);
2201 return SCI_SUCCESS;
2202 default:
2203 dev_warn(scic_to_dev(scic),
2204 "%s: SCIC IO Request given unexpected frame %x while in "
2205 "state %d\n", __func__, frame_index, state);
2206
2207 scic_sds_controller_release_frame(scic, frame_index);
2208 return SCI_FAILURE_INVALID_STATE;
2209 }
2210}
2211
2212
2213
2214
Dan Williams5dec6f42011-05-10 02:28:49 -07002215static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
2216 struct scic_sds_request *sci_req,
2217 u32 completion_code)
2218{
2219 enum sci_status status = SCI_SUCCESS;
2220
2221 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2222 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2223 scic_sds_stp_request_udma_complete_request(sci_req,
2224 SCU_TASK_DONE_GOOD,
2225 SCI_SUCCESS);
2226 break;
2227 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2228 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2229 /*
2230 * We must check ther response buffer to see if the D2H Register FIS was
2231 * received before we got the TC completion. */
2232 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2233 scic_sds_remote_device_suspend(sci_req->target_device,
2234 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2235
2236 scic_sds_stp_request_udma_complete_request(sci_req,
2237 SCU_TASK_DONE_CHECK_RESPONSE,
2238 SCI_FAILURE_IO_RESPONSE_VALID);
2239 } else {
2240 /*
2241 * If we have an error completion status for the TC then we can expect a
2242 * D2H register FIS from the device so we must change state to wait for it */
2243 sci_base_state_machine_change_state(&sci_req->state_machine,
2244 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
2245 }
2246 break;
2247
2248 /*
2249 * / @todo Check to see if any of these completion status need to wait for
2250 * / the device to host register fis. */
2251 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
2252 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2253 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2254 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2255 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2256 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2257 scic_sds_remote_device_suspend(sci_req->target_device,
2258 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2259 /* Fall through to the default case */
2260 default:
2261 /* All other completion status cause the IO to be complete. */
2262 scic_sds_stp_request_udma_complete_request(sci_req,
2263 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2264 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2265 break;
2266 }
2267
2268 return status;
2269}
2270
Dan Williams5dec6f42011-05-10 02:28:49 -07002271/**
2272 *
2273 * @sci_req:
2274 * @completion_code:
2275 *
2276 * This method processes a TC completion. The expected TC completion is for
2277 * the transmission of the H2D register FIS containing the SATA/STP non-data
2278 * request. This method always successfully processes the TC completion.
2279 * SCI_SUCCESS This value is always returned.
2280 */
2281static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
2282 struct scic_sds_request *sci_req,
2283 u32 completion_code)
2284{
2285 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2286 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2287 scic_sds_request_set_status(
2288 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
2289 );
2290
2291 sci_base_state_machine_change_state(
2292 &sci_req->state_machine,
2293 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
2294 );
2295 break;
2296
2297 default:
2298 /*
2299 * All other completion status cause the IO to be complete. If a NAK
2300 * was received, then it is up to the user to retry the request. */
2301 scic_sds_request_set_status(
2302 sci_req,
2303 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2304 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2305 );
2306
2307 sci_base_state_machine_change_state(
2308 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
2309 break;
2310 }
2311
2312 return SCI_SUCCESS;
2313}
2314
2315/**
2316 *
2317 * @sci_req:
2318 * @completion_code:
2319 *
2320 * This method processes a TC completion. The expected TC completion is for
2321 * the transmission of the H2D register FIS containing the SATA/STP non-data
2322 * request. This method always successfully processes the TC completion.
2323 * SCI_SUCCESS This value is always returned.
2324 */
2325static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
2326 struct scic_sds_request *sci_req,
2327 u32 completion_code)
2328{
2329 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2330 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2331 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2332 SCI_SUCCESS);
2333
2334 sci_base_state_machine_change_state(&sci_req->state_machine,
2335 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE);
2336 break;
2337
2338 default:
2339 /*
2340 * All other completion status cause the IO to be complete. If a NAK
2341 * was received, then it is up to the user to retry the request. */
2342 scic_sds_request_set_status(
2343 sci_req,
2344 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2345 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2346 );
2347
2348 sci_base_state_machine_change_state(&sci_req->state_machine,
2349 SCI_BASE_REQUEST_STATE_COMPLETED);
2350 break;
2351 }
2352
2353 return SCI_SUCCESS;
2354}
2355
Dan Williamsf1f52e72011-05-10 02:28:45 -07002356static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
Piotr Sawickif4636a72011-05-10 23:50:32 +00002357 [SCI_BASE_REQUEST_STATE_INITIAL] = {},
2358 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {},
Dan Williamsf1f52e72011-05-10 02:28:45 -07002359 [SCI_BASE_REQUEST_STATE_STARTED] = {
Dan Williamsf1f52e72011-05-10 02:28:45 -07002360 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
Dan Williamsf1f52e72011-05-10 02:28:45 -07002361 },
Dan Williamsf1393032011-05-10 02:28:47 -07002362 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
Dan Williamsf1393032011-05-10 02:28:47 -07002363 .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler,
2364 },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002365 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { },
Dan Williamsc72086e2011-05-10 02:28:48 -07002366 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
Dan Williamsc72086e2011-05-10 02:28:48 -07002367 .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler,
Dan Williamsc72086e2011-05-10 02:28:48 -07002368 },
2369 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
Dan Williamsc72086e2011-05-10 02:28:48 -07002370 .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler,
2371 },
Dan Williams5dec6f42011-05-10 02:28:49 -07002372 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002373 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
Dan Williams5dec6f42011-05-10 02:28:49 -07002374 },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002375 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { },
Dan Williams5dec6f42011-05-10 02:28:49 -07002376 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002377 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
2378 },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002379 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { },
Dan Williams5dec6f42011-05-10 02:28:49 -07002380 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002381 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
2382 },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002383 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { },
Dan Williams5dec6f42011-05-10 02:28:49 -07002384 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002385 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
Dan Williams5dec6f42011-05-10 02:28:49 -07002386 },
2387 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002388 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
2389 },
2390 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002391 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
2392 },
2393 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002394 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
2395 },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002396 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { },
Dan Williamsf1f52e72011-05-10 02:28:45 -07002397 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
2398 .complete_handler = scic_sds_request_completed_state_complete_handler,
2399 },
2400 [SCI_BASE_REQUEST_STATE_ABORTING] = {
Dan Williamsf1f52e72011-05-10 02:28:45 -07002401 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
Dan Williamsf1f52e72011-05-10 02:28:45 -07002402 },
Dan Williamsf1393032011-05-10 02:28:47 -07002403 [SCI_BASE_REQUEST_STATE_FINAL] = { },
Dan Williamsf1f52e72011-05-10 02:28:45 -07002404};
2405
Dan Williams6f231dd2011-07-02 22:56:22 -07002406
2407/**
2408 * isci_request_process_response_iu() - This function sets the status and
2409 * response iu, in the task struct, from the request object for the upper
2410 * layer driver.
2411 * @sas_task: This parameter is the task struct from the upper layer driver.
2412 * @resp_iu: This parameter points to the response iu of the completed request.
2413 * @dev: This parameter specifies the linux device struct.
2414 *
2415 * none.
2416 */
2417static void isci_request_process_response_iu(
2418 struct sas_task *task,
2419 struct ssp_response_iu *resp_iu,
2420 struct device *dev)
2421{
2422 dev_dbg(dev,
2423 "%s: resp_iu = %p "
2424 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2425 "resp_iu->response_data_len = %x, "
2426 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2427 __func__,
2428 resp_iu,
2429 resp_iu->status,
2430 resp_iu->datapres,
2431 resp_iu->response_data_len,
2432 resp_iu->sense_data_len);
2433
2434 task->task_status.stat = resp_iu->status;
2435
2436 /* libsas updates the task status fields based on the response iu. */
2437 sas_ssp_task_response(dev, task, resp_iu);
2438}
2439
2440/**
2441 * isci_request_set_open_reject_status() - This function prepares the I/O
2442 * completion for OPEN_REJECT conditions.
2443 * @request: This parameter is the completed isci_request object.
2444 * @response_ptr: This parameter specifies the service response for the I/O.
2445 * @status_ptr: This parameter specifies the exec status for the I/O.
2446 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2447 * the LLDD with respect to completing this request or forcing an abort
2448 * condition on the I/O.
2449 * @open_rej_reason: This parameter specifies the encoded reason for the
2450 * abandon-class reject.
2451 *
2452 * none.
2453 */
2454static void isci_request_set_open_reject_status(
2455 struct isci_request *request,
2456 struct sas_task *task,
2457 enum service_response *response_ptr,
2458 enum exec_status *status_ptr,
2459 enum isci_completion_selection *complete_to_host_ptr,
2460 enum sas_open_rej_reason open_rej_reason)
2461{
2462 /* Task in the target is done. */
2463 request->complete_in_target = true;
2464 *response_ptr = SAS_TASK_UNDELIVERED;
2465 *status_ptr = SAS_OPEN_REJECT;
2466 *complete_to_host_ptr = isci_perform_normal_io_completion;
2467 task->task_status.open_rej_reason = open_rej_reason;
2468}
2469
2470/**
2471 * isci_request_handle_controller_specific_errors() - This function decodes
2472 * controller-specific I/O completion error conditions.
2473 * @request: This parameter is the completed isci_request object.
2474 * @response_ptr: This parameter specifies the service response for the I/O.
2475 * @status_ptr: This parameter specifies the exec status for the I/O.
2476 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2477 * the LLDD with respect to completing this request or forcing an abort
2478 * condition on the I/O.
2479 *
2480 * none.
2481 */
2482static void isci_request_handle_controller_specific_errors(
2483 struct isci_remote_device *isci_device,
2484 struct isci_request *request,
2485 struct sas_task *task,
2486 enum service_response *response_ptr,
2487 enum exec_status *status_ptr,
2488 enum isci_completion_selection *complete_to_host_ptr)
2489{
2490 unsigned int cstatus;
2491
Dan Williamsf1f52e72011-05-10 02:28:45 -07002492 cstatus = request->sci.scu_status;
Dan Williams6f231dd2011-07-02 22:56:22 -07002493
2494 dev_dbg(&request->isci_host->pdev->dev,
2495 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2496 "- controller status = 0x%x\n",
2497 __func__, request, cstatus);
2498
2499 /* Decode the controller-specific errors; most
2500 * important is to recognize those conditions in which
2501 * the target may still have a task outstanding that
2502 * must be aborted.
2503 *
2504 * Note that there are SCU completion codes being
2505 * named in the decode below for which SCIC has already
2506 * done work to handle them in a way other than as
2507 * a controller-specific completion code; these are left
2508 * in the decode below for completeness sake.
2509 */
2510 switch (cstatus) {
2511 case SCU_TASK_DONE_DMASETUP_DIRERR:
2512 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2513 case SCU_TASK_DONE_XFERCNT_ERR:
2514 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2515 if (task->task_proto == SAS_PROTOCOL_SMP) {
2516 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2517 *response_ptr = SAS_TASK_COMPLETE;
2518
2519 /* See if the device has been/is being stopped. Note
2520 * that we ignore the quiesce state, since we are
2521 * concerned about the actual device state.
2522 */
2523 if ((isci_device->status == isci_stopping) ||
2524 (isci_device->status == isci_stopped))
2525 *status_ptr = SAS_DEVICE_UNKNOWN;
2526 else
2527 *status_ptr = SAS_ABORTED_TASK;
2528
2529 request->complete_in_target = true;
2530
2531 *complete_to_host_ptr =
2532 isci_perform_normal_io_completion;
2533 } else {
2534 /* Task in the target is not done. */
2535 *response_ptr = SAS_TASK_UNDELIVERED;
2536
2537 if ((isci_device->status == isci_stopping) ||
2538 (isci_device->status == isci_stopped))
2539 *status_ptr = SAS_DEVICE_UNKNOWN;
2540 else
2541 *status_ptr = SAM_STAT_TASK_ABORTED;
2542
2543 request->complete_in_target = false;
2544
2545 *complete_to_host_ptr =
2546 isci_perform_error_io_completion;
2547 }
2548
2549 break;
2550
2551 case SCU_TASK_DONE_CRC_ERR:
2552 case SCU_TASK_DONE_NAK_CMD_ERR:
2553 case SCU_TASK_DONE_EXCESS_DATA:
2554 case SCU_TASK_DONE_UNEXP_FIS:
2555 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2556 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2557 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2558 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2559 /* These are conditions in which the target
2560 * has completed the task, so that no cleanup
2561 * is necessary.
2562 */
2563 *response_ptr = SAS_TASK_COMPLETE;
2564
2565 /* See if the device has been/is being stopped. Note
2566 * that we ignore the quiesce state, since we are
2567 * concerned about the actual device state.
2568 */
2569 if ((isci_device->status == isci_stopping) ||
2570 (isci_device->status == isci_stopped))
2571 *status_ptr = SAS_DEVICE_UNKNOWN;
2572 else
2573 *status_ptr = SAS_ABORTED_TASK;
2574
2575 request->complete_in_target = true;
2576
2577 *complete_to_host_ptr = isci_perform_normal_io_completion;
2578 break;
2579
2580
2581 /* Note that the only open reject completion codes seen here will be
2582 * abandon-class codes; all others are automatically retried in the SCU.
2583 */
2584 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2585
2586 isci_request_set_open_reject_status(
2587 request, task, response_ptr, status_ptr,
2588 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2589 break;
2590
2591 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2592
2593 /* Note - the return of AB0 will change when
2594 * libsas implements detection of zone violations.
2595 */
2596 isci_request_set_open_reject_status(
2597 request, task, response_ptr, status_ptr,
2598 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2599 break;
2600
2601 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2602
2603 isci_request_set_open_reject_status(
2604 request, task, response_ptr, status_ptr,
2605 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2606 break;
2607
2608 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2609
2610 isci_request_set_open_reject_status(
2611 request, task, response_ptr, status_ptr,
2612 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2613 break;
2614
2615 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2616
2617 isci_request_set_open_reject_status(
2618 request, task, response_ptr, status_ptr,
2619 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2620 break;
2621
2622 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2623
2624 isci_request_set_open_reject_status(
2625 request, task, response_ptr, status_ptr,
2626 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2627 break;
2628
2629 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2630
2631 isci_request_set_open_reject_status(
2632 request, task, response_ptr, status_ptr,
2633 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2634 break;
2635
2636 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2637
2638 isci_request_set_open_reject_status(
2639 request, task, response_ptr, status_ptr,
2640 complete_to_host_ptr, SAS_OREJ_EPROTO);
2641 break;
2642
2643 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2644
2645 isci_request_set_open_reject_status(
2646 request, task, response_ptr, status_ptr,
2647 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2648 break;
2649
2650 case SCU_TASK_DONE_LL_R_ERR:
2651 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2652 case SCU_TASK_DONE_LL_PERR:
2653 case SCU_TASK_DONE_LL_SY_TERM:
2654 /* Also SCU_TASK_DONE_NAK_ERR:*/
2655 case SCU_TASK_DONE_LL_LF_TERM:
2656 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2657 case SCU_TASK_DONE_LL_ABORT_ERR:
2658 case SCU_TASK_DONE_SEQ_INV_TYPE:
2659 /* Also SCU_TASK_DONE_UNEXP_XR: */
2660 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2661 case SCU_TASK_DONE_INV_FIS_LEN:
2662 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2663 case SCU_TASK_DONE_SDMA_ERR:
2664 case SCU_TASK_DONE_OFFSET_ERR:
2665 case SCU_TASK_DONE_MAX_PLD_ERR:
2666 case SCU_TASK_DONE_LF_ERR:
2667 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2668 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2669 case SCU_TASK_DONE_UNEXP_DATA:
2670 case SCU_TASK_DONE_UNEXP_SDBFIS:
2671 case SCU_TASK_DONE_REG_ERR:
2672 case SCU_TASK_DONE_SDB_ERR:
2673 case SCU_TASK_DONE_TASK_ABORT:
2674 default:
2675 /* Task in the target is not done. */
2676 *response_ptr = SAS_TASK_UNDELIVERED;
2677 *status_ptr = SAM_STAT_TASK_ABORTED;
2678 request->complete_in_target = false;
2679
2680 *complete_to_host_ptr = isci_perform_error_io_completion;
2681 break;
2682 }
2683}
2684
2685/**
2686 * isci_task_save_for_upper_layer_completion() - This function saves the
2687 * request for later completion to the upper layer driver.
2688 * @host: This parameter is a pointer to the host on which the the request
2689 * should be queued (either as an error or success).
2690 * @request: This parameter is the completed request.
2691 * @response: This parameter is the response code for the completed task.
2692 * @status: This parameter is the status code for the completed task.
2693 *
2694 * none.
2695 */
2696static void isci_task_save_for_upper_layer_completion(
2697 struct isci_host *host,
2698 struct isci_request *request,
2699 enum service_response response,
2700 enum exec_status status,
2701 enum isci_completion_selection task_notification_selection)
2702{
2703 struct sas_task *task = isci_request_access_task(request);
2704
Jeff Skirvinec6c9632011-03-04 14:06:44 -08002705 task_notification_selection
2706 = isci_task_set_completion_status(task, response, status,
2707 task_notification_selection);
Dan Williams6f231dd2011-07-02 22:56:22 -07002708
2709 /* Tasks aborted specifically by a call to the lldd_abort_task
2710 * function should not be completed to the host in the regular path.
2711 */
2712 switch (task_notification_selection) {
2713
2714 case isci_perform_normal_io_completion:
2715
2716 /* Normal notification (task_done) */
2717 dev_dbg(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002718 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002719 __func__,
2720 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002721 task->task_status.resp, response,
2722 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002723 /* Add to the completed list. */
2724 list_add(&request->completed_node,
2725 &host->requests_to_complete);
Jeff Skirvinec6c9632011-03-04 14:06:44 -08002726
2727 /* Take the request off the device's pending request list. */
2728 list_del_init(&request->dev_node);
Dan Williams6f231dd2011-07-02 22:56:22 -07002729 break;
2730
2731 case isci_perform_aborted_io_completion:
Jeff Skirvina5fde222011-03-04 14:06:42 -08002732 /* No notification to libsas because this request is
2733 * already in the abort path.
Dan Williams6f231dd2011-07-02 22:56:22 -07002734 */
2735 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002736 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002737 __func__,
2738 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002739 task->task_status.resp, response,
2740 task->task_status.stat, status);
Jeff Skirvina5fde222011-03-04 14:06:42 -08002741
2742 /* Wake up whatever process was waiting for this
2743 * request to complete.
2744 */
2745 WARN_ON(request->io_request_completion == NULL);
2746
2747 if (request->io_request_completion != NULL) {
2748
2749 /* Signal whoever is waiting that this
2750 * request is complete.
2751 */
2752 complete(request->io_request_completion);
2753 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002754 break;
2755
2756 case isci_perform_error_io_completion:
2757 /* Use sas_task_abort */
2758 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002759 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002760 __func__,
2761 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002762 task->task_status.resp, response,
2763 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002764 /* Add to the aborted list. */
2765 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002766 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002767 break;
2768
2769 default:
2770 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002771 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002772 __func__,
2773 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002774 task->task_status.resp, response,
2775 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002776
Jeff Skirvina5fde222011-03-04 14:06:42 -08002777 /* Add to the error to libsas list. */
Dan Williams6f231dd2011-07-02 22:56:22 -07002778 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002779 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002780 break;
2781 }
2782}
2783
Dan Williamsf1f52e72011-05-10 02:28:45 -07002784static void isci_request_io_request_complete(struct isci_host *isci_host,
2785 struct isci_request *request,
2786 enum sci_io_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -07002787{
2788 struct sas_task *task = isci_request_access_task(request);
2789 struct ssp_response_iu *resp_iu;
2790 void *resp_buf;
2791 unsigned long task_flags;
Dan Williams6f231dd2011-07-02 22:56:22 -07002792 struct isci_remote_device *isci_device = request->isci_device;
2793 enum service_response response = SAS_TASK_UNDELIVERED;
2794 enum exec_status status = SAS_ABORTED_TASK;
2795 enum isci_request_status request_status;
2796 enum isci_completion_selection complete_to_host
2797 = isci_perform_normal_io_completion;
2798
2799 dev_dbg(&isci_host->pdev->dev,
2800 "%s: request = %p, task = %p,\n"
2801 "task->data_dir = %d completion_status = 0x%x\n",
2802 __func__,
2803 request,
2804 task,
2805 task->data_dir,
2806 completion_status);
2807
Jeff Skirvina5fde222011-03-04 14:06:42 -08002808 spin_lock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002809 request_status = isci_request_get_state(request);
Dan Williams6f231dd2011-07-02 22:56:22 -07002810
2811 /* Decode the request status. Note that if the request has been
2812 * aborted by a task management function, we don't care
2813 * what the status is.
2814 */
2815 switch (request_status) {
2816
2817 case aborted:
2818 /* "aborted" indicates that the request was aborted by a task
2819 * management function, since once a task management request is
2820 * perfomed by the device, the request only completes because
2821 * of the subsequent driver terminate.
2822 *
2823 * Aborted also means an external thread is explicitly managing
2824 * this request, so that we do not complete it up the stack.
2825 *
2826 * The target is still there (since the TMF was successful).
2827 */
2828 request->complete_in_target = true;
2829 response = SAS_TASK_COMPLETE;
2830
2831 /* See if the device has been/is being stopped. Note
2832 * that we ignore the quiesce state, since we are
2833 * concerned about the actual device state.
2834 */
2835 if ((isci_device->status == isci_stopping)
2836 || (isci_device->status == isci_stopped)
2837 )
2838 status = SAS_DEVICE_UNKNOWN;
2839 else
2840 status = SAS_ABORTED_TASK;
2841
2842 complete_to_host = isci_perform_aborted_io_completion;
2843 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002844
2845 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002846 break;
2847
2848 case aborting:
2849 /* aborting means that the task management function tried and
2850 * failed to abort the request. We need to note the request
2851 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2852 * target as down.
2853 *
2854 * Aborting also means an external thread is explicitly managing
2855 * this request, so that we do not complete it up the stack.
2856 */
2857 request->complete_in_target = true;
2858 response = SAS_TASK_UNDELIVERED;
2859
2860 if ((isci_device->status == isci_stopping) ||
2861 (isci_device->status == isci_stopped))
2862 /* The device has been /is being stopped. Note that
2863 * we ignore the quiesce state, since we are
2864 * concerned about the actual device state.
2865 */
2866 status = SAS_DEVICE_UNKNOWN;
2867 else
2868 status = SAS_PHY_DOWN;
2869
2870 complete_to_host = isci_perform_aborted_io_completion;
2871
2872 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002873
2874 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002875 break;
2876
2877 case terminating:
2878
2879 /* This was an terminated request. This happens when
2880 * the I/O is being terminated because of an action on
2881 * the device (reset, tear down, etc.), and the I/O needs
2882 * to be completed up the stack.
2883 */
2884 request->complete_in_target = true;
2885 response = SAS_TASK_UNDELIVERED;
2886
2887 /* See if the device has been/is being stopped. Note
2888 * that we ignore the quiesce state, since we are
2889 * concerned about the actual device state.
2890 */
2891 if ((isci_device->status == isci_stopping) ||
2892 (isci_device->status == isci_stopped))
2893 status = SAS_DEVICE_UNKNOWN;
2894 else
2895 status = SAS_ABORTED_TASK;
2896
Jeff Skirvina5fde222011-03-04 14:06:42 -08002897 complete_to_host = isci_perform_aborted_io_completion;
Dan Williams6f231dd2011-07-02 22:56:22 -07002898
2899 /* This was a terminated request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002900
2901 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002902 break;
2903
2904 default:
2905
Jeff Skirvina5fde222011-03-04 14:06:42 -08002906 /* The request is done from an SCU HW perspective. */
2907 request->status = completed;
2908
2909 spin_unlock(&request->state_lock);
2910
Dan Williams6f231dd2011-07-02 22:56:22 -07002911 /* This is an active request being completed from the core. */
2912 switch (completion_status) {
2913
2914 case SCI_IO_FAILURE_RESPONSE_VALID:
2915 dev_dbg(&isci_host->pdev->dev,
2916 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2917 __func__,
2918 request,
2919 task);
2920
2921 if (sas_protocol_ata(task->task_proto)) {
Dan Williams67ea8382011-05-08 11:47:15 -07002922 resp_buf = &request->sci.stp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002923 isci_request_process_stp_response(task,
Dan Williamsb7645812011-05-08 02:35:32 -07002924 resp_buf);
Dan Williams6f231dd2011-07-02 22:56:22 -07002925 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2926
2927 /* crack the iu response buffer. */
Dan Williams67ea8382011-05-08 11:47:15 -07002928 resp_iu = &request->sci.ssp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002929 isci_request_process_response_iu(task, resp_iu,
Dan Williamsb7645812011-05-08 02:35:32 -07002930 &isci_host->pdev->dev);
Dan Williams6f231dd2011-07-02 22:56:22 -07002931
2932 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2933
2934 dev_err(&isci_host->pdev->dev,
2935 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2936 "SAS_PROTOCOL_SMP protocol\n",
2937 __func__);
2938
2939 } else
2940 dev_err(&isci_host->pdev->dev,
2941 "%s: unknown protocol\n", __func__);
2942
2943 /* use the task status set in the task struct by the
2944 * isci_request_process_response_iu call.
2945 */
2946 request->complete_in_target = true;
2947 response = task->task_status.resp;
2948 status = task->task_status.stat;
2949 break;
2950
2951 case SCI_IO_SUCCESS:
2952 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2953
2954 response = SAS_TASK_COMPLETE;
2955 status = SAM_STAT_GOOD;
2956 request->complete_in_target = true;
2957
2958 if (task->task_proto == SAS_PROTOCOL_SMP) {
Dan Williams67ea8382011-05-08 11:47:15 -07002959 void *rsp = &request->sci.smp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002960
2961 dev_dbg(&isci_host->pdev->dev,
2962 "%s: SMP protocol completion\n",
2963 __func__);
2964
2965 sg_copy_from_buffer(
2966 &task->smp_task.smp_resp, 1,
Dan Williamsb7645812011-05-08 02:35:32 -07002967 rsp, sizeof(struct smp_resp));
Dan Williams6f231dd2011-07-02 22:56:22 -07002968 } else if (completion_status
2969 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2970
2971 /* This was an SSP / STP / SATA transfer.
2972 * There is a possibility that less data than
2973 * the maximum was transferred.
2974 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07002975 u32 transferred_length = sci_req_tx_bytes(&request->sci);
Dan Williams6f231dd2011-07-02 22:56:22 -07002976
2977 task->task_status.residual
2978 = task->total_xfer_len - transferred_length;
2979
2980 /* If there were residual bytes, call this an
2981 * underrun.
2982 */
2983 if (task->task_status.residual != 0)
2984 status = SAS_DATA_UNDERRUN;
2985
2986 dev_dbg(&isci_host->pdev->dev,
2987 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2988 __func__,
2989 status);
2990
2991 } else
2992 dev_dbg(&isci_host->pdev->dev,
2993 "%s: SCI_IO_SUCCESS\n",
2994 __func__);
2995
2996 break;
2997
2998 case SCI_IO_FAILURE_TERMINATED:
2999 dev_dbg(&isci_host->pdev->dev,
3000 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
3001 __func__,
3002 request,
3003 task);
3004
3005 /* The request was terminated explicitly. No handling
3006 * is needed in the SCSI error handler path.
3007 */
3008 request->complete_in_target = true;
3009 response = SAS_TASK_UNDELIVERED;
3010
3011 /* See if the device has been/is being stopped. Note
3012 * that we ignore the quiesce state, since we are
3013 * concerned about the actual device state.
3014 */
3015 if ((isci_device->status == isci_stopping) ||
3016 (isci_device->status == isci_stopped))
3017 status = SAS_DEVICE_UNKNOWN;
3018 else
3019 status = SAS_ABORTED_TASK;
3020
3021 complete_to_host = isci_perform_normal_io_completion;
3022 break;
3023
3024 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
3025
3026 isci_request_handle_controller_specific_errors(
3027 isci_device, request, task, &response, &status,
3028 &complete_to_host);
3029
3030 break;
3031
3032 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
3033 /* This is a special case, in that the I/O completion
3034 * is telling us that the device needs a reset.
3035 * In order for the device reset condition to be
3036 * noticed, the I/O has to be handled in the error
3037 * handler. Set the reset flag and cause the
3038 * SCSI error thread to be scheduled.
3039 */
3040 spin_lock_irqsave(&task->task_state_lock, task_flags);
3041 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3042 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
3043
Jeff Skirvinaa145102011-03-07 16:40:47 -07003044 /* Fail the I/O. */
3045 response = SAS_TASK_UNDELIVERED;
3046 status = SAM_STAT_TASK_ABORTED;
3047
Dan Williams6f231dd2011-07-02 22:56:22 -07003048 complete_to_host = isci_perform_error_io_completion;
3049 request->complete_in_target = false;
3050 break;
3051
3052 default:
3053 /* Catch any otherwise unhandled error codes here. */
3054 dev_warn(&isci_host->pdev->dev,
3055 "%s: invalid completion code: 0x%x - "
3056 "isci_request = %p\n",
3057 __func__, completion_status, request);
3058
3059 response = SAS_TASK_UNDELIVERED;
3060
3061 /* See if the device has been/is being stopped. Note
3062 * that we ignore the quiesce state, since we are
3063 * concerned about the actual device state.
3064 */
3065 if ((isci_device->status == isci_stopping) ||
3066 (isci_device->status == isci_stopped))
3067 status = SAS_DEVICE_UNKNOWN;
3068 else
3069 status = SAS_ABORTED_TASK;
3070
3071 complete_to_host = isci_perform_error_io_completion;
3072 request->complete_in_target = false;
3073 break;
3074 }
3075 break;
3076 }
3077
3078 isci_request_unmap_sgl(request, isci_host->pdev);
3079
3080 /* Put the completed request on the correct list */
3081 isci_task_save_for_upper_layer_completion(isci_host, request, response,
3082 status, complete_to_host
3083 );
3084
3085 /* complete the io request to the core. */
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00003086 scic_controller_complete_io(&isci_host->sci,
Dan Williams57f20f42011-04-21 18:14:45 -07003087 &isci_device->sci,
Dan Williams67ea8382011-05-08 11:47:15 -07003088 &request->sci);
3089 /* set terminated handle so it cannot be completed or
Dan Williams6f231dd2011-07-02 22:56:22 -07003090 * terminated again, and to cause any calls into abort
3091 * task to recognize the already completed case.
3092 */
Dan Williams67ea8382011-05-08 11:47:15 -07003093 request->terminated = true;
Dan Williams6f231dd2011-07-02 22:56:22 -07003094
Dan Williams6f231dd2011-07-02 22:56:22 -07003095 isci_host_can_dequeue(isci_host, 1);
3096}
Dan Williamsf1f52e72011-05-10 02:28:45 -07003097
3098/**
3099 * scic_sds_request_initial_state_enter() -
3100 * @object: This parameter specifies the base object for which the state
3101 * transition is occurring.
3102 *
3103 * This method implements the actions taken when entering the
3104 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
3105 * base request is constructed. Entry into the initial state sets all handlers
3106 * for the io request object to their default handlers. none
3107 */
3108static void scic_sds_request_initial_state_enter(void *object)
3109{
3110 struct scic_sds_request *sci_req = object;
3111
3112 SET_STATE_HANDLER(
3113 sci_req,
3114 scic_sds_request_state_handler_table,
3115 SCI_BASE_REQUEST_STATE_INITIAL
3116 );
3117}
3118
3119/**
3120 * scic_sds_request_constructed_state_enter() -
3121 * @object: The io request object that is to enter the constructed state.
3122 *
3123 * This method implements the actions taken when entering the
3124 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
3125 * for the the constructed state. none
3126 */
3127static void scic_sds_request_constructed_state_enter(void *object)
3128{
3129 struct scic_sds_request *sci_req = object;
3130
3131 SET_STATE_HANDLER(
3132 sci_req,
3133 scic_sds_request_state_handler_table,
3134 SCI_BASE_REQUEST_STATE_CONSTRUCTED
3135 );
3136}
3137
Dan Williamsf1f52e72011-05-10 02:28:45 -07003138static void scic_sds_request_started_state_enter(void *object)
3139{
3140 struct scic_sds_request *sci_req = object;
Dan Williamsf1393032011-05-10 02:28:47 -07003141 struct sci_base_state_machine *sm = &sci_req->state_machine;
3142 struct isci_request *ireq = sci_req_to_ireq(sci_req);
3143 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
Dan Williamsc72086e2011-05-10 02:28:48 -07003144 struct sas_task *task;
3145
3146 /* XXX as hch said always creating an internal sas_task for tmf
3147 * requests would simplify the driver
3148 */
3149 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003150
3151 SET_STATE_HANDLER(
3152 sci_req,
3153 scic_sds_request_state_handler_table,
3154 SCI_BASE_REQUEST_STATE_STARTED
3155 );
3156
Dan Williams5dec6f42011-05-10 02:28:49 -07003157 /* all unaccelerated request types (non ssp or ncq) handled with
3158 * substates
Dan Williamsf1393032011-05-10 02:28:47 -07003159 */
Dan Williamsc72086e2011-05-10 02:28:48 -07003160 if (!task && dev->dev_type == SAS_END_DEV) {
3161 sci_base_state_machine_change_state(sm,
3162 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
Dan Williams5dec6f42011-05-10 02:28:49 -07003163 } else if (!task &&
3164 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3165 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3166 sci_base_state_machine_change_state(sm,
3167 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
Dan Williamsc72086e2011-05-10 02:28:48 -07003168 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3169 sci_base_state_machine_change_state(sm,
3170 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
Dan Williams5dec6f42011-05-10 02:28:49 -07003171 } else if (task && sas_protocol_ata(task->task_proto) &&
3172 !task->ata_task.use_ncq) {
3173 u32 state;
3174
3175 if (task->data_dir == DMA_NONE)
3176 state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE;
3177 else if (task->ata_task.dma_xfer)
3178 state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE;
3179 else /* PIO */
3180 state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE;
3181
3182 sci_base_state_machine_change_state(sm, state);
Dan Williamsc72086e2011-05-10 02:28:48 -07003183 }
Dan Williamsf1f52e72011-05-10 02:28:45 -07003184}
3185
3186/**
Dan Williamsf1f52e72011-05-10 02:28:45 -07003187 * scic_sds_request_completed_state_enter() -
3188 * @object: This parameter specifies the base object for which the state
3189 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
3190 * object.
3191 *
3192 * This method implements the actions taken when entering the
3193 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
3194 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
3195 * completion status and convert it to an enum sci_status to return in the
3196 * completion callback function. none
3197 */
3198static void scic_sds_request_completed_state_enter(void *object)
3199{
3200 struct scic_sds_request *sci_req = object;
3201 struct scic_sds_controller *scic =
3202 scic_sds_request_get_controller(sci_req);
3203 struct isci_host *ihost = scic_to_ihost(scic);
3204 struct isci_request *ireq = sci_req_to_ireq(sci_req);
3205
3206 SET_STATE_HANDLER(sci_req,
3207 scic_sds_request_state_handler_table,
3208 SCI_BASE_REQUEST_STATE_COMPLETED);
3209
3210 /* Tell the SCI_USER that the IO request is complete */
3211 if (sci_req->is_task_management_request == false)
3212 isci_request_io_request_complete(ihost, ireq,
3213 sci_req->sci_status);
3214 else
3215 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
3216}
3217
3218/**
3219 * scic_sds_request_aborting_state_enter() -
3220 * @object: This parameter specifies the base object for which the state
3221 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
3222 * object.
3223 *
3224 * This method implements the actions taken when entering the
3225 * SCI_BASE_REQUEST_STATE_ABORTING state. none
3226 */
3227static void scic_sds_request_aborting_state_enter(void *object)
3228{
3229 struct scic_sds_request *sci_req = object;
3230
3231 /* Setting the abort bit in the Task Context is required by the silicon. */
3232 sci_req->task_context_buffer->abort = 1;
3233
3234 SET_STATE_HANDLER(
3235 sci_req,
3236 scic_sds_request_state_handler_table,
3237 SCI_BASE_REQUEST_STATE_ABORTING
3238 );
3239}
3240
3241/**
3242 * scic_sds_request_final_state_enter() -
3243 * @object: This parameter specifies the base object for which the state
3244 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
3245 *
3246 * This method implements the actions taken when entering the
3247 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
3248 * state handlers in place. none
3249 */
3250static void scic_sds_request_final_state_enter(void *object)
3251{
3252 struct scic_sds_request *sci_req = object;
3253
3254 SET_STATE_HANDLER(
3255 sci_req,
3256 scic_sds_request_state_handler_table,
3257 SCI_BASE_REQUEST_STATE_FINAL
3258 );
3259}
3260
Dan Williamsf1393032011-05-10 02:28:47 -07003261static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter(
3262 void *object)
3263{
3264 struct scic_sds_request *sci_req = object;
3265
3266 SET_STATE_HANDLER(
3267 sci_req,
3268 scic_sds_request_state_handler_table,
3269 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
3270 );
3271}
3272
3273static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter(
3274 void *object)
3275{
3276 struct scic_sds_request *sci_req = object;
3277
3278 SET_STATE_HANDLER(
3279 sci_req,
3280 scic_sds_request_state_handler_table,
3281 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE
3282 );
3283}
3284
Dan Williamsc72086e2011-05-10 02:28:48 -07003285static void scic_sds_smp_request_started_await_response_substate_enter(void *object)
3286{
3287 struct scic_sds_request *sci_req = object;
3288
3289 SET_STATE_HANDLER(
3290 sci_req,
3291 scic_sds_request_state_handler_table,
3292 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE
3293 );
3294}
3295
3296static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void *object)
3297{
3298 struct scic_sds_request *sci_req = object;
3299
3300 SET_STATE_HANDLER(
3301 sci_req,
3302 scic_sds_request_state_handler_table,
3303 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION
3304 );
3305}
3306
Dan Williams5dec6f42011-05-10 02:28:49 -07003307static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
3308 void *object)
3309{
3310 struct scic_sds_request *sci_req = object;
3311
3312 SET_STATE_HANDLER(
3313 sci_req,
3314 scic_sds_request_state_handler_table,
3315 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
3316 );
3317
3318 scic_sds_remote_device_set_working_request(
3319 sci_req->target_device, sci_req
3320 );
3321}
3322
3323static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
3324{
3325 struct scic_sds_request *sci_req = object;
3326
3327 SET_STATE_HANDLER(
3328 sci_req,
3329 scic_sds_request_state_handler_table,
3330 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
3331 );
3332}
3333
3334
3335
3336static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
3337 void *object)
3338{
3339 struct scic_sds_request *sci_req = object;
3340
3341 SET_STATE_HANDLER(
3342 sci_req,
3343 scic_sds_request_state_handler_table,
3344 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
3345 );
3346
3347 scic_sds_remote_device_set_working_request(
3348 sci_req->target_device, sci_req);
3349}
3350
3351static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
3352{
3353 struct scic_sds_request *sci_req = object;
3354
3355 SET_STATE_HANDLER(
3356 sci_req,
3357 scic_sds_request_state_handler_table,
3358 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
3359 );
3360}
3361
3362static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
3363 void *object)
3364{
3365 struct scic_sds_request *sci_req = object;
3366
3367 SET_STATE_HANDLER(
3368 sci_req,
3369 scic_sds_request_state_handler_table,
3370 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
3371 );
3372}
3373
3374static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
3375 void *object)
3376{
3377 struct scic_sds_request *sci_req = object;
3378
3379 SET_STATE_HANDLER(
3380 sci_req,
3381 scic_sds_request_state_handler_table,
3382 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
3383 );
3384}
3385
3386
3387
3388static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
3389 void *object)
3390{
3391 struct scic_sds_request *sci_req = object;
3392
3393 SET_STATE_HANDLER(
3394 sci_req,
3395 scic_sds_request_state_handler_table,
3396 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
3397 );
3398}
3399
3400/**
3401 *
3402 *
3403 * This state is entered when there is an TC completion failure. The hardware
3404 * received an unexpected condition while processing the IO request and now
3405 * will UF the D2H register FIS to complete the IO.
3406 */
3407static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
3408 void *object)
3409{
3410 struct scic_sds_request *sci_req = object;
3411
3412 SET_STATE_HANDLER(
3413 sci_req,
3414 scic_sds_request_state_handler_table,
3415 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
3416 );
3417}
3418
3419
3420
3421static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
3422 void *object)
3423{
3424 struct scic_sds_request *sci_req = object;
3425
3426 SET_STATE_HANDLER(
3427 sci_req,
3428 scic_sds_request_state_handler_table,
3429 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
3430 );
3431
3432 scic_sds_remote_device_set_working_request(
3433 sci_req->target_device, sci_req
3434 );
3435}
3436
3437static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
3438 void *object)
3439{
3440 struct scic_sds_request *sci_req = object;
3441 struct scu_task_context *task_context;
3442 struct host_to_dev_fis *h2d_fis;
3443 enum sci_status status;
3444
3445 /* Clear the SRST bit */
3446 h2d_fis = &sci_req->stp.cmd;
3447 h2d_fis->control = 0;
3448
3449 /* Clear the TC control bit */
3450 task_context = scic_sds_controller_get_task_context_buffer(
3451 sci_req->owning_controller, sci_req->io_tag);
3452 task_context->control_frame = 0;
3453
3454 status = scic_controller_continue_io(sci_req);
3455 if (status == SCI_SUCCESS) {
3456 SET_STATE_HANDLER(
3457 sci_req,
3458 scic_sds_request_state_handler_table,
3459 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
3460 );
3461 }
3462}
3463
3464static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
3465 void *object)
3466{
3467 struct scic_sds_request *sci_req = object;
3468
3469 SET_STATE_HANDLER(
3470 sci_req,
3471 scic_sds_request_state_handler_table,
3472 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
3473 );
3474}
3475
Dan Williamsf1f52e72011-05-10 02:28:45 -07003476static const struct sci_base_state scic_sds_request_state_table[] = {
3477 [SCI_BASE_REQUEST_STATE_INITIAL] = {
3478 .enter_state = scic_sds_request_initial_state_enter,
3479 },
3480 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
3481 .enter_state = scic_sds_request_constructed_state_enter,
3482 },
3483 [SCI_BASE_REQUEST_STATE_STARTED] = {
3484 .enter_state = scic_sds_request_started_state_enter,
Dan Williams5dec6f42011-05-10 02:28:49 -07003485 },
3486 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3487 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3488 },
3489 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
3490 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
3491 },
3492 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3493 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3494 },
3495 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
3496 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
3497 },
3498 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
3499 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
3500 },
3501 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
3502 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
3503 },
3504 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
3505 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
3506 },
3507 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
3508 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
3509 },
3510 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
3511 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3512 },
3513 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
3514 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3515 },
3516 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
3517 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
Dan Williamsf1f52e72011-05-10 02:28:45 -07003518 },
Dan Williamsf1393032011-05-10 02:28:47 -07003519 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
3520 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
3521 },
3522 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
3523 .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter,
3524 },
Dan Williamsc72086e2011-05-10 02:28:48 -07003525 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
3526 .enter_state = scic_sds_smp_request_started_await_response_substate_enter,
3527 },
3528 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
3529 .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter,
3530 },
Dan Williamsf1f52e72011-05-10 02:28:45 -07003531 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
3532 .enter_state = scic_sds_request_completed_state_enter,
3533 },
3534 [SCI_BASE_REQUEST_STATE_ABORTING] = {
3535 .enter_state = scic_sds_request_aborting_state_enter,
3536 },
3537 [SCI_BASE_REQUEST_STATE_FINAL] = {
3538 .enter_state = scic_sds_request_final_state_enter,
3539 },
3540};
3541
3542static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
3543 struct scic_sds_remote_device *sci_dev,
3544 u16 io_tag, struct scic_sds_request *sci_req)
3545{
3546 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
3547 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
3548 sci_base_state_machine_start(&sci_req->state_machine);
3549
3550 sci_req->io_tag = io_tag;
3551 sci_req->owning_controller = scic;
3552 sci_req->target_device = sci_dev;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003553 sci_req->protocol = SCIC_NO_PROTOCOL;
3554 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3555 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
3556
3557 sci_req->sci_status = SCI_SUCCESS;
3558 sci_req->scu_status = 0;
3559 sci_req->post_context = 0xFFFFFFFF;
3560
3561 sci_req->is_task_management_request = false;
3562
3563 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
3564 sci_req->was_tag_assigned_by_user = false;
Dan Williamsc72086e2011-05-10 02:28:48 -07003565 sci_req->task_context_buffer = &sci_req->tc;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003566 } else {
3567 sci_req->was_tag_assigned_by_user = true;
3568
3569 sci_req->task_context_buffer =
3570 scic_sds_controller_get_task_context_buffer(scic, io_tag);
3571 }
3572}
3573
3574static enum sci_status
3575scic_io_request_construct(struct scic_sds_controller *scic,
3576 struct scic_sds_remote_device *sci_dev,
3577 u16 io_tag, struct scic_sds_request *sci_req)
3578{
3579 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3580 enum sci_status status = SCI_SUCCESS;
3581
3582 /* Build the common part of the request */
3583 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3584
Dan Williamsc72086e2011-05-10 02:28:48 -07003585 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003586 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3587
3588 if (dev->dev_type == SAS_END_DEV)
Dan Williamsc72086e2011-05-10 02:28:48 -07003589 /* pass */;
3590 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
Dan Williamsf1f52e72011-05-10 02:28:45 -07003591 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
Dan Williamsc72086e2011-05-10 02:28:48 -07003592 else if (dev_is_expander(dev))
Dan Williamsf1f52e72011-05-10 02:28:45 -07003593 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
Dan Williamsc72086e2011-05-10 02:28:48 -07003594 else
3595 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003596
Dan Williamsc72086e2011-05-10 02:28:48 -07003597 memset(sci_req->task_context_buffer, 0,
3598 offsetof(struct scu_task_context, sgl_pair_ab));
Dan Williamsf1f52e72011-05-10 02:28:45 -07003599
3600 return status;
3601}
3602
3603enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3604 struct scic_sds_remote_device *sci_dev,
3605 u16 io_tag, struct scic_sds_request *sci_req)
3606{
3607 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3608 enum sci_status status = SCI_SUCCESS;
3609
3610 /* Build the common part of the request */
3611 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3612
Dan Williamsc72086e2011-05-10 02:28:48 -07003613 if (dev->dev_type == SAS_END_DEV ||
3614 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
Dan Williamsf1f52e72011-05-10 02:28:45 -07003615 sci_req->is_task_management_request = true;
3616 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
Dan Williamsc72086e2011-05-10 02:28:48 -07003617 } else
3618 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003619
3620 return status;
3621}
3622
3623static enum sci_status isci_request_ssp_request_construct(
3624 struct isci_request *request)
3625{
3626 enum sci_status status;
3627
3628 dev_dbg(&request->isci_host->pdev->dev,
3629 "%s: request = %p\n",
3630 __func__,
3631 request);
3632 status = scic_io_request_construct_basic_ssp(&request->sci);
3633 return status;
3634}
3635
3636static enum sci_status isci_request_stp_request_construct(
3637 struct isci_request *request)
3638{
3639 struct sas_task *task = isci_request_access_task(request);
3640 enum sci_status status;
3641 struct host_to_dev_fis *register_fis;
3642
3643 dev_dbg(&request->isci_host->pdev->dev,
3644 "%s: request = %p\n",
3645 __func__,
3646 request);
3647
3648 /* Get the host_to_dev_fis from the core and copy
3649 * the fis from the task into it.
3650 */
3651 register_fis = isci_sata_task_to_fis_copy(task);
3652
3653 status = scic_io_request_construct_basic_sata(&request->sci);
3654
3655 /* Set the ncq tag in the fis, from the queue
3656 * command in the task.
3657 */
3658 if (isci_sata_is_task_ncq(task)) {
3659
3660 isci_sata_set_ncq_tag(
3661 register_fis,
3662 task
3663 );
3664 }
3665
3666 return status;
3667}
3668
3669/*
Dan Williamsc72086e2011-05-10 02:28:48 -07003670 * This function will fill in the SCU Task Context for a SMP request. The
3671 * following important settings are utilized: -# task_type ==
3672 * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type
3673 * (i.e. non-raw frame) is being utilized to perform task management. -#
3674 * control_frame == 1. This ensures that the proper endianess is set so
3675 * that the bytes are transmitted in the right order for a smp request frame.
3676 * @sci_req: This parameter specifies the smp request object being
3677 * constructed.
3678 *
3679 */
3680static void
3681scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
3682 struct smp_req *smp_req)
3683{
3684 dma_addr_t dma_addr;
3685 struct scic_sds_controller *scic;
3686 struct scic_sds_remote_device *sci_dev;
3687 struct scic_sds_port *sci_port;
3688 struct scu_task_context *task_context;
3689 ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
3690
3691 /* byte swap the smp request. */
3692 sci_swab32_cpy(&sci_req->smp.cmd, smp_req,
3693 word_cnt);
3694
3695 task_context = scic_sds_request_get_task_context(sci_req);
3696
3697 scic = scic_sds_request_get_controller(sci_req);
3698 sci_dev = scic_sds_request_get_device(sci_req);
3699 sci_port = scic_sds_request_get_port(sci_req);
3700
3701 /*
3702 * Fill in the TC with the its required data
3703 * 00h
3704 */
3705 task_context->priority = 0;
3706 task_context->initiator_request = 1;
3707 task_context->connection_rate = sci_dev->connection_rate;
3708 task_context->protocol_engine_index =
3709 scic_sds_controller_get_protocol_engine_group(scic);
3710 task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3711 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3712 task_context->abort = 0;
3713 task_context->valid = SCU_TASK_CONTEXT_VALID;
3714 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3715
3716 /* 04h */
3717 task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3718 task_context->command_code = 0;
3719 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3720
3721 /* 08h */
3722 task_context->link_layer_control = 0;
3723 task_context->do_not_dma_ssp_good_response = 1;
3724 task_context->strict_ordering = 0;
3725 task_context->control_frame = 1;
3726 task_context->timeout_enable = 0;
3727 task_context->block_guard_enable = 0;
3728
3729 /* 0ch */
3730 task_context->address_modifier = 0;
3731
3732 /* 10h */
3733 task_context->ssp_command_iu_length = smp_req->req_len;
3734
3735 /* 14h */
3736 task_context->transfer_length_bytes = 0;
3737
3738 /*
3739 * 18h ~ 30h, protocol specific
3740 * since commandIU has been build by framework at this point, we just
3741 * copy the frist DWord from command IU to this location. */
3742 memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
3743
3744 /*
3745 * 40h
3746 * "For SMP you could program it to zero. We would prefer that way
3747 * so that done code will be consistent." - Venki
3748 */
3749 task_context->task_phase = 0;
3750
3751 if (sci_req->was_tag_assigned_by_user) {
3752 /*
3753 * Build the task context now since we have already read
3754 * the data
3755 */
3756 sci_req->post_context =
3757 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3758 (scic_sds_controller_get_protocol_engine_group(scic) <<
3759 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3760 (scic_sds_port_get_index(sci_port) <<
3761 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3762 scic_sds_io_tag_get_index(sci_req->io_tag));
3763 } else {
3764 /*
3765 * Build the task context now since we have already read
3766 * the data.
3767 * I/O tag index is not assigned because we have to wait
3768 * until we get a TCi.
3769 */
3770 sci_req->post_context =
3771 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3772 (scic_sds_controller_get_protocol_engine_group(scic) <<
3773 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3774 (scic_sds_port_get_index(sci_port) <<
3775 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
3776 }
3777
3778 /*
3779 * Copy the physical address for the command buffer to the SCU Task
3780 * Context command buffer should not contain command header.
3781 */
3782 dma_addr = scic_io_request_get_dma_addr(sci_req,
3783 ((char *) &sci_req->smp.cmd) +
3784 sizeof(u32));
3785
3786 task_context->command_iu_upper = upper_32_bits(dma_addr);
3787 task_context->command_iu_lower = lower_32_bits(dma_addr);
3788
3789 /* SMP response comes as UF, so no need to set response IU address. */
3790 task_context->response_iu_upper = 0;
3791 task_context->response_iu_lower = 0;
3792}
3793
3794static enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req)
3795{
3796 struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL);
3797
3798 if (!smp_req)
3799 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3800
3801 sci_req->protocol = SCIC_SMP_PROTOCOL;
3802
3803 /* Construct the SMP SCU Task Context */
3804 memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req));
3805
3806 /*
3807 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3808 * functions under SAS 2.0, a zero request length really indicates
3809 * a non-zero default length. */
3810 if (smp_req->req_len == 0) {
3811 switch (smp_req->func) {
3812 case SMP_DISCOVER:
3813 case SMP_REPORT_PHY_ERR_LOG:
3814 case SMP_REPORT_PHY_SATA:
3815 case SMP_REPORT_ROUTE_INFO:
3816 smp_req->req_len = 2;
3817 break;
3818 case SMP_CONF_ROUTE_INFO:
3819 case SMP_PHY_CONTROL:
3820 case SMP_PHY_TEST_FUNCTION:
3821 smp_req->req_len = 9;
3822 break;
3823 /* Default - zero is a valid default for 2.0. */
3824 }
3825 }
3826
3827 scu_smp_request_construct_task_context(sci_req, smp_req);
3828
3829 sci_base_state_machine_change_state(&sci_req->state_machine,
3830 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
3831
3832 kfree(smp_req);
3833
3834 return SCI_SUCCESS;
3835}
3836
3837/*
Dan Williamsf1f52e72011-05-10 02:28:45 -07003838 * isci_smp_request_build() - This function builds the smp request.
3839 * @ireq: This parameter points to the isci_request allocated in the
3840 * request construct function.
3841 *
3842 * SCI_SUCCESS on successfull completion, or specific failure code.
3843 */
3844static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3845{
3846 enum sci_status status = SCI_FAILURE;
3847 struct sas_task *task = isci_request_access_task(ireq);
3848 struct scic_sds_request *sci_req = &ireq->sci;
3849
3850 dev_dbg(&ireq->isci_host->pdev->dev,
3851 "%s: request = %p\n", __func__, ireq);
3852
3853 dev_dbg(&ireq->isci_host->pdev->dev,
3854 "%s: smp_req len = %d\n",
3855 __func__,
3856 task->smp_task.smp_req.length);
3857
3858 /* copy the smp_command to the address; */
3859 sg_copy_to_buffer(&task->smp_task.smp_req, 1,
3860 &sci_req->smp.cmd,
3861 sizeof(struct smp_req));
3862
3863 status = scic_io_request_construct_smp(sci_req);
3864 if (status != SCI_SUCCESS)
3865 dev_warn(&ireq->isci_host->pdev->dev,
3866 "%s: failed with status = %d\n",
3867 __func__,
3868 status);
3869
3870 return status;
3871}
3872
3873/**
3874 * isci_io_request_build() - This function builds the io request object.
3875 * @isci_host: This parameter specifies the ISCI host object
3876 * @request: This parameter points to the isci_request object allocated in the
3877 * request construct function.
3878 * @sci_device: This parameter is the handle for the sci core's remote device
3879 * object that is the destination for this request.
3880 *
3881 * SCI_SUCCESS on successfull completion, or specific failure code.
3882 */
3883static enum sci_status isci_io_request_build(
3884 struct isci_host *isci_host,
3885 struct isci_request *request,
3886 struct isci_remote_device *isci_device)
3887{
3888 enum sci_status status = SCI_SUCCESS;
3889 struct sas_task *task = isci_request_access_task(request);
3890 struct scic_sds_remote_device *sci_device = &isci_device->sci;
3891
3892 dev_dbg(&isci_host->pdev->dev,
3893 "%s: isci_device = 0x%p; request = %p, "
3894 "num_scatter = %d\n",
3895 __func__,
3896 isci_device,
3897 request,
3898 task->num_scatter);
3899
3900 /* map the sgl addresses, if present.
3901 * libata does the mapping for sata devices
3902 * before we get the request.
3903 */
3904 if (task->num_scatter &&
3905 !sas_protocol_ata(task->task_proto) &&
3906 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3907
3908 request->num_sg_entries = dma_map_sg(
3909 &isci_host->pdev->dev,
3910 task->scatter,
3911 task->num_scatter,
3912 task->data_dir
3913 );
3914
3915 if (request->num_sg_entries == 0)
3916 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3917 }
3918
3919 /* build the common request object. For now,
3920 * we will let the core allocate the IO tag.
3921 */
3922 status = scic_io_request_construct(&isci_host->sci, sci_device,
3923 SCI_CONTROLLER_INVALID_IO_TAG,
3924 &request->sci);
3925
3926 if (status != SCI_SUCCESS) {
3927 dev_warn(&isci_host->pdev->dev,
3928 "%s: failed request construct\n",
3929 __func__);
3930 return SCI_FAILURE;
3931 }
3932
3933 switch (task->task_proto) {
3934 case SAS_PROTOCOL_SMP:
3935 status = isci_smp_request_build(request);
3936 break;
3937 case SAS_PROTOCOL_SSP:
3938 status = isci_request_ssp_request_construct(request);
3939 break;
3940 case SAS_PROTOCOL_SATA:
3941 case SAS_PROTOCOL_STP:
3942 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3943 status = isci_request_stp_request_construct(request);
3944 break;
3945 default:
3946 dev_warn(&isci_host->pdev->dev,
3947 "%s: unknown protocol\n", __func__);
3948 return SCI_FAILURE;
3949 }
3950
3951 return SCI_SUCCESS;
3952}
3953
3954/**
3955 * isci_request_alloc_core() - This function gets the request object from the
3956 * isci_host dma cache.
3957 * @isci_host: This parameter specifies the ISCI host object
3958 * @isci_request: This parameter will contain the pointer to the new
3959 * isci_request object.
3960 * @isci_device: This parameter is the pointer to the isci remote device object
3961 * that is the destination for this request.
3962 * @gfp_flags: This parameter specifies the os allocation flags.
3963 *
3964 * SCI_SUCCESS on successfull completion, or specific failure code.
3965 */
3966static int isci_request_alloc_core(
3967 struct isci_host *isci_host,
3968 struct isci_request **isci_request,
3969 struct isci_remote_device *isci_device,
3970 gfp_t gfp_flags)
3971{
3972 int ret = 0;
3973 dma_addr_t handle;
3974 struct isci_request *request;
3975
3976
3977 /* get pointer to dma memory. This actually points
3978 * to both the isci_remote_device object and the
3979 * sci object. The isci object is at the beginning
3980 * of the memory allocated here.
3981 */
3982 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
3983 if (!request) {
3984 dev_warn(&isci_host->pdev->dev,
3985 "%s: dma_pool_alloc returned NULL\n", __func__);
3986 return -ENOMEM;
3987 }
3988
3989 /* initialize the request object. */
3990 spin_lock_init(&request->state_lock);
3991 request->request_daddr = handle;
3992 request->isci_host = isci_host;
3993 request->isci_device = isci_device;
3994 request->io_request_completion = NULL;
3995 request->terminated = false;
3996
3997 request->num_sg_entries = 0;
3998
3999 request->complete_in_target = false;
4000
4001 INIT_LIST_HEAD(&request->completed_node);
4002 INIT_LIST_HEAD(&request->dev_node);
4003
4004 *isci_request = request;
4005 isci_request_change_state(request, allocated);
4006
4007 return ret;
4008}
4009
4010static int isci_request_alloc_io(
4011 struct isci_host *isci_host,
4012 struct sas_task *task,
4013 struct isci_request **isci_request,
4014 struct isci_remote_device *isci_device,
4015 gfp_t gfp_flags)
4016{
4017 int retval = isci_request_alloc_core(isci_host, isci_request,
4018 isci_device, gfp_flags);
4019
4020 if (!retval) {
4021 (*isci_request)->ttype_ptr.io_task_ptr = task;
4022 (*isci_request)->ttype = io_task;
4023
4024 task->lldd_task = *isci_request;
4025 }
4026 return retval;
4027}
4028
4029/**
4030 * isci_request_alloc_tmf() - This function gets the request object from the
4031 * isci_host dma cache and initializes the relevant fields as a sas_task.
4032 * @isci_host: This parameter specifies the ISCI host object
4033 * @sas_task: This parameter is the task struct from the upper layer driver.
4034 * @isci_request: This parameter will contain the pointer to the new
4035 * isci_request object.
4036 * @isci_device: This parameter is the pointer to the isci remote device object
4037 * that is the destination for this request.
4038 * @gfp_flags: This parameter specifies the os allocation flags.
4039 *
4040 * SCI_SUCCESS on successfull completion, or specific failure code.
4041 */
4042int isci_request_alloc_tmf(
4043 struct isci_host *isci_host,
4044 struct isci_tmf *isci_tmf,
4045 struct isci_request **isci_request,
4046 struct isci_remote_device *isci_device,
4047 gfp_t gfp_flags)
4048{
4049 int retval = isci_request_alloc_core(isci_host, isci_request,
4050 isci_device, gfp_flags);
4051
4052 if (!retval) {
4053
4054 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
4055 (*isci_request)->ttype = tmf_task;
4056 }
4057 return retval;
4058}
4059
4060/**
4061 * isci_request_execute() - This function allocates the isci_request object,
4062 * all fills in some common fields.
4063 * @isci_host: This parameter specifies the ISCI host object
4064 * @sas_task: This parameter is the task struct from the upper layer driver.
4065 * @isci_request: This parameter will contain the pointer to the new
4066 * isci_request object.
4067 * @gfp_flags: This parameter specifies the os allocation flags.
4068 *
4069 * SCI_SUCCESS on successfull completion, or specific failure code.
4070 */
4071int isci_request_execute(
4072 struct isci_host *isci_host,
4073 struct sas_task *task,
4074 struct isci_request **isci_request,
4075 gfp_t gfp_flags)
4076{
4077 int ret = 0;
4078 struct scic_sds_remote_device *sci_device;
4079 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
4080 struct isci_remote_device *isci_device;
4081 struct isci_request *request;
4082 unsigned long flags;
4083
4084 isci_device = task->dev->lldd_dev;
4085 sci_device = &isci_device->sci;
4086
4087 /* do common allocation and init of request object. */
4088 ret = isci_request_alloc_io(
4089 isci_host,
4090 task,
4091 &request,
4092 isci_device,
4093 gfp_flags
4094 );
4095
4096 if (ret)
4097 goto out;
4098
4099 status = isci_io_request_build(isci_host, request, isci_device);
4100 if (status != SCI_SUCCESS) {
4101 dev_warn(&isci_host->pdev->dev,
4102 "%s: request_construct failed - status = 0x%x\n",
4103 __func__,
4104 status);
4105 goto out;
4106 }
4107
4108 spin_lock_irqsave(&isci_host->scic_lock, flags);
4109
4110 /* send the request, let the core assign the IO TAG. */
4111 status = scic_controller_start_io(&isci_host->sci, sci_device,
4112 &request->sci,
4113 SCI_CONTROLLER_INVALID_IO_TAG);
4114 if (status != SCI_SUCCESS &&
4115 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
4116 dev_warn(&isci_host->pdev->dev,
4117 "%s: failed request start (0x%x)\n",
4118 __func__, status);
4119 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
4120 goto out;
4121 }
4122
4123 /* Either I/O started OK, or the core has signaled that
4124 * the device needs a target reset.
4125 *
4126 * In either case, hold onto the I/O for later.
4127 *
4128 * Update it's status and add it to the list in the
4129 * remote device object.
4130 */
4131 isci_request_change_state(request, started);
4132 list_add(&request->dev_node, &isci_device->reqs_in_process);
4133
4134 if (status == SCI_SUCCESS) {
4135 /* Save the tag for possible task mgmt later. */
4136 request->io_tag = request->sci.io_tag;
4137 } else {
4138 /* The request did not really start in the
4139 * hardware, so clear the request handle
4140 * here so no terminations will be done.
4141 */
4142 request->terminated = true;
4143 }
4144 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
4145
4146 if (status ==
4147 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
4148 /* Signal libsas that we need the SCSI error
4149 * handler thread to work on this I/O and that
4150 * we want a device reset.
4151 */
4152 spin_lock_irqsave(&task->task_state_lock, flags);
4153 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
4154 spin_unlock_irqrestore(&task->task_state_lock, flags);
4155
4156 /* Cause this task to be scheduled in the SCSI error
4157 * handler thread.
4158 */
4159 isci_execpath_callback(isci_host, task,
4160 sas_task_abort);
4161
4162 /* Change the status, since we are holding
4163 * the I/O until it is managed by the SCSI
4164 * error handler.
4165 */
4166 status = SCI_SUCCESS;
4167 }
4168
4169 out:
4170 if (status != SCI_SUCCESS) {
4171 /* release dma memory on failure. */
4172 isci_request_free(isci_host, request);
4173 request = NULL;
4174 ret = SCI_FAILURE;
4175 }
4176
4177 *isci_request = request;
4178 return ret;
4179}