blob: 5f51681105568cdc2a3a3ec9fde92c09d7d80d1e [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070057#include "task.h"
58#include "request.h"
59#include "sata.h"
60#include "scu_completion_codes.h"
Dave Jiang2ec53eb2011-05-04 18:01:22 -070061#include "sas.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070062
Dan Williamsf1f52e72011-05-10 02:28:45 -070063/**
64 * This method returns the sgl element pair for the specificed sgl_pair index.
65 * @sci_req: This parameter specifies the IO request for which to retrieve
66 * the Scatter-Gather List element pair.
67 * @sgl_pair_index: This parameter specifies the index into the SGL element
68 * pair to be retrieved.
69 *
70 * This method returns a pointer to an struct scu_sgl_element_pair.
71 */
72static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
73 struct scic_sds_request *sci_req,
74 u32 sgl_pair_index
75 ) {
76 struct scu_task_context *task_context;
Dan Williams6f231dd2011-07-02 22:56:22 -070077
Dan Williamsf1f52e72011-05-10 02:28:45 -070078 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
Dan Williams6f231dd2011-07-02 22:56:22 -070079
Dan Williamsf1f52e72011-05-10 02:28:45 -070080 if (sgl_pair_index == 0) {
81 return &task_context->sgl_pair_ab;
82 } else if (sgl_pair_index == 1) {
83 return &task_context->sgl_pair_cd;
Dan Williams6f231dd2011-07-02 22:56:22 -070084 }
85
Dan Williamsf1f52e72011-05-10 02:28:45 -070086 return &sci_req->sg_table[sgl_pair_index - 2];
Dan Williams6f231dd2011-07-02 22:56:22 -070087}
88
89/**
Dan Williamsf1f52e72011-05-10 02:28:45 -070090 * This function will build the SGL list for an IO request.
91 * @sci_req: This parameter specifies the IO request for which to build
92 * the Scatter-Gather List.
Dan Williams6f231dd2011-07-02 22:56:22 -070093 *
Dan Williams6f231dd2011-07-02 22:56:22 -070094 */
Dan Williamsf1f52e72011-05-10 02:28:45 -070095void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
96{
97 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
98 struct isci_host *isci_host = isci_request->isci_host;
99 struct sas_task *task = isci_request_access_task(isci_request);
100 struct scatterlist *sg = NULL;
101 dma_addr_t dma_addr;
102 u32 sg_idx = 0;
103 struct scu_sgl_element_pair *scu_sg = NULL;
104 struct scu_sgl_element_pair *prev_sg = NULL;
105
106 if (task->num_scatter > 0) {
107 sg = task->scatter;
108
109 while (sg) {
110 scu_sg = scic_sds_request_get_sgl_element_pair(
111 sds_request,
112 sg_idx);
113
114 SCU_SGL_COPY(scu_sg->A, sg);
115
116 sg = sg_next(sg);
117
118 if (sg) {
119 SCU_SGL_COPY(scu_sg->B, sg);
120 sg = sg_next(sg);
121 } else
122 SCU_SGL_ZERO(scu_sg->B);
123
124 if (prev_sg) {
125 dma_addr =
126 scic_io_request_get_dma_addr(
127 sds_request,
128 scu_sg);
129
130 prev_sg->next_pair_upper =
131 upper_32_bits(dma_addr);
132 prev_sg->next_pair_lower =
133 lower_32_bits(dma_addr);
134 }
135
136 prev_sg = scu_sg;
137 sg_idx++;
138 }
139 } else { /* handle when no sg */
140 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
141 sg_idx);
142
143 dma_addr = dma_map_single(&isci_host->pdev->dev,
144 task->scatter,
145 task->total_xfer_len,
146 task->data_dir);
147
148 isci_request->zero_scatter_daddr = dma_addr;
149
150 scu_sg->A.length = task->total_xfer_len;
151 scu_sg->A.address_upper = upper_32_bits(dma_addr);
152 scu_sg->A.address_lower = lower_32_bits(dma_addr);
153 }
154
155 if (scu_sg) {
156 scu_sg->next_pair_upper = 0;
157 scu_sg->next_pair_lower = 0;
158 }
159}
160
161static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req)
162{
163 if (sci_req->was_tag_assigned_by_user == false)
164 sci_req->task_context_buffer = &sci_req->tc;
165}
166
167static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
168{
169 struct ssp_cmd_iu *cmd_iu;
170 struct isci_request *ireq = sci_req_to_ireq(sci_req);
171 struct sas_task *task = isci_request_access_task(ireq);
172
173 cmd_iu = &sci_req->ssp.cmd;
174
175 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
176 cmd_iu->add_cdb_len = 0;
177 cmd_iu->_r_a = 0;
178 cmd_iu->_r_b = 0;
179 cmd_iu->en_fburst = 0; /* unsupported */
180 cmd_iu->task_prio = task->ssp_task.task_prio;
181 cmd_iu->task_attr = task->ssp_task.task_attr;
182 cmd_iu->_r_c = 0;
183
184 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
185 sizeof(task->ssp_task.cdb) / sizeof(u32));
186}
187
188static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
189{
190 struct ssp_task_iu *task_iu;
191 struct isci_request *ireq = sci_req_to_ireq(sci_req);
192 struct sas_task *task = isci_request_access_task(ireq);
193 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
194
195 task_iu = &sci_req->ssp.tmf;
196
197 memset(task_iu, 0, sizeof(struct ssp_task_iu));
198
199 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
200
201 task_iu->task_func = isci_tmf->tmf_code;
202 task_iu->task_tag =
203 (ireq->ttype == tmf_task) ?
204 isci_tmf->io_tag :
205 SCI_CONTROLLER_INVALID_IO_TAG;
206}
207
208/**
209 * This method is will fill in the SCU Task Context for any type of SSP request.
210 * @sci_req:
211 * @task_context:
212 *
213 */
214static void scu_ssp_reqeust_construct_task_context(
215 struct scic_sds_request *sds_request,
216 struct scu_task_context *task_context)
217{
218 dma_addr_t dma_addr;
219 struct scic_sds_controller *controller;
220 struct scic_sds_remote_device *target_device;
221 struct scic_sds_port *target_port;
222
223 controller = scic_sds_request_get_controller(sds_request);
224 target_device = scic_sds_request_get_device(sds_request);
225 target_port = scic_sds_request_get_port(sds_request);
226
227 /* Fill in the TC with the its required data */
228 task_context->abort = 0;
229 task_context->priority = 0;
230 task_context->initiator_request = 1;
231 task_context->connection_rate = target_device->connection_rate;
232 task_context->protocol_engine_index =
233 scic_sds_controller_get_protocol_engine_group(controller);
234 task_context->logical_port_index =
235 scic_sds_port_get_index(target_port);
236 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
237 task_context->valid = SCU_TASK_CONTEXT_VALID;
238 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
239
240 task_context->remote_node_index =
241 scic_sds_remote_device_get_index(sds_request->target_device);
242 task_context->command_code = 0;
243
244 task_context->link_layer_control = 0;
245 task_context->do_not_dma_ssp_good_response = 1;
246 task_context->strict_ordering = 0;
247 task_context->control_frame = 0;
248 task_context->timeout_enable = 0;
249 task_context->block_guard_enable = 0;
250
251 task_context->address_modifier = 0;
252
253 /* task_context->type.ssp.tag = sci_req->io_tag; */
254 task_context->task_phase = 0x01;
255
256 if (sds_request->was_tag_assigned_by_user) {
257 /*
258 * Build the task context now since we have already read
259 * the data
260 */
261 sds_request->post_context =
262 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
263 (scic_sds_controller_get_protocol_engine_group(
264 controller) <<
265 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
266 (scic_sds_port_get_index(target_port) <<
267 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
268 scic_sds_io_tag_get_index(sds_request->io_tag));
269 } else {
270 /*
271 * Build the task context now since we have already read
272 * the data
273 *
274 * I/O tag index is not assigned because we have to wait
275 * until we get a TCi
276 */
277 sds_request->post_context =
278 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
279 (scic_sds_controller_get_protocol_engine_group(
280 owning_controller) <<
281 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
282 (scic_sds_port_get_index(target_port) <<
283 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
284 }
285
286 /*
287 * Copy the physical address for the command buffer to the
288 * SCU Task Context
289 */
290 dma_addr = scic_io_request_get_dma_addr(sds_request,
291 &sds_request->ssp.cmd);
292
293 task_context->command_iu_upper = upper_32_bits(dma_addr);
294 task_context->command_iu_lower = lower_32_bits(dma_addr);
295
296 /*
297 * Copy the physical address for the response buffer to the
298 * SCU Task Context
299 */
300 dma_addr = scic_io_request_get_dma_addr(sds_request,
301 &sds_request->ssp.rsp);
302
303 task_context->response_iu_upper = upper_32_bits(dma_addr);
304 task_context->response_iu_lower = lower_32_bits(dma_addr);
305}
306
307/**
308 * This method is will fill in the SCU Task Context for a SSP IO request.
309 * @sci_req:
310 *
311 */
312static void scu_ssp_io_request_construct_task_context(
313 struct scic_sds_request *sci_req,
314 enum dma_data_direction dir,
315 u32 len)
316{
317 struct scu_task_context *task_context;
318
319 task_context = scic_sds_request_get_task_context(sci_req);
320
321 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
322
323 task_context->ssp_command_iu_length =
324 sizeof(struct ssp_cmd_iu) / sizeof(u32);
325 task_context->type.ssp.frame_type = SSP_COMMAND;
326
327 switch (dir) {
328 case DMA_FROM_DEVICE:
329 case DMA_NONE:
330 default:
331 task_context->task_type = SCU_TASK_TYPE_IOREAD;
332 break;
333 case DMA_TO_DEVICE:
334 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
335 break;
336 }
337
338 task_context->transfer_length_bytes = len;
339
340 if (task_context->transfer_length_bytes > 0)
341 scic_sds_request_build_sgl(sci_req);
342}
343
344static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req)
345{
346 if (sci_req->was_tag_assigned_by_user == false)
347 sci_req->task_context_buffer = &sci_req->tc;
348}
349
350/**
351 * This method will fill in the SCU Task Context for a SSP Task request. The
352 * following important settings are utilized: -# priority ==
353 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
354 * ahead of other task destined for the same Remote Node. -# task_type ==
355 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
356 * (i.e. non-raw frame) is being utilized to perform task management. -#
357 * control_frame == 1. This ensures that the proper endianess is set so
358 * that the bytes are transmitted in the right order for a task frame.
359 * @sci_req: This parameter specifies the task request object being
360 * constructed.
361 *
362 */
363static void scu_ssp_task_request_construct_task_context(
364 struct scic_sds_request *sci_req)
365{
366 struct scu_task_context *task_context;
367
368 task_context = scic_sds_request_get_task_context(sci_req);
369
370 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
371
372 task_context->control_frame = 1;
373 task_context->priority = SCU_TASK_PRIORITY_HIGH;
374 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
375 task_context->transfer_length_bytes = 0;
376 task_context->type.ssp.frame_type = SSP_TASK;
377 task_context->ssp_command_iu_length =
378 sizeof(struct ssp_task_iu) / sizeof(u32);
379}
380
381
382/**
383 * This method constructs the SSP Command IU data for this ssp passthrough
384 * comand request object.
385 * @sci_req: This parameter specifies the request object for which the SSP
386 * command information unit is being built.
387 *
388 * enum sci_status, returns invalid parameter is cdb > 16
389 */
390
391
392/**
393 * This method constructs the SATA request object.
394 * @sci_req:
395 * @sat_protocol:
396 * @transfer_length:
397 * @data_direction:
398 * @copy_rx_frame:
399 *
400 * enum sci_status
401 */
402static enum sci_status
403scic_io_request_construct_sata(struct scic_sds_request *sci_req,
404 u32 len,
405 enum dma_data_direction dir,
406 bool copy)
Dan Williams6f231dd2011-07-02 22:56:22 -0700407{
Dan Williams6f231dd2011-07-02 22:56:22 -0700408 enum sci_status status = SCI_SUCCESS;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700409 struct isci_request *ireq = sci_req_to_ireq(sci_req);
410 struct sas_task *task = isci_request_access_task(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700411
Dan Williamsf1f52e72011-05-10 02:28:45 -0700412 /* check for management protocols */
413 if (ireq->ttype == tmf_task) {
414 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700415
Dan Williamsf1f52e72011-05-10 02:28:45 -0700416 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
417 tmf->tmf_code == isci_tmf_sata_srst_low)
418 return scic_sds_stp_soft_reset_request_construct(sci_req);
419 else {
420 dev_err(scic_to_dev(sci_req->owning_controller),
421 "%s: Request 0x%p received un-handled SAT "
422 "management protocol 0x%x.\n",
423 __func__, sci_req, tmf->tmf_code);
Dan Williams6f231dd2011-07-02 22:56:22 -0700424
Dan Williamsf1f52e72011-05-10 02:28:45 -0700425 return SCI_FAILURE;
426 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700427 }
428
Dan Williamsf1f52e72011-05-10 02:28:45 -0700429 if (!sas_protocol_ata(task->task_proto)) {
430 dev_err(scic_to_dev(sci_req->owning_controller),
431 "%s: Non-ATA protocol in SATA path: 0x%x\n",
432 __func__,
433 task->task_proto);
Dan Williams6f231dd2011-07-02 22:56:22 -0700434 return SCI_FAILURE;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700435
Dan Williams6f231dd2011-07-02 22:56:22 -0700436 }
437
Dan Williamsf1f52e72011-05-10 02:28:45 -0700438 /* non data */
439 if (task->data_dir == DMA_NONE)
440 return scic_sds_stp_non_data_request_construct(sci_req);
441
442 /* NCQ */
443 if (task->ata_task.use_ncq)
444 return scic_sds_stp_ncq_request_construct(sci_req, len, dir);
445
446 /* DMA */
447 if (task->ata_task.dma_xfer)
448 return scic_sds_stp_udma_request_construct(sci_req, len, dir);
449 else /* PIO */
450 return scic_sds_stp_pio_request_construct(sci_req, copy);
451
452 return status;
453}
454
455static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
456{
457 struct isci_request *ireq = sci_req_to_ireq(sci_req);
458 struct sas_task *task = isci_request_access_task(ireq);
459
460 sci_req->protocol = SCIC_SSP_PROTOCOL;
461
462 scu_ssp_io_request_construct_task_context(sci_req,
463 task->data_dir,
464 task->total_xfer_len);
465
466 scic_sds_io_request_build_ssp_command_iu(sci_req);
467
468 sci_base_state_machine_change_state(
469 &sci_req->state_machine,
470 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
471
472 return SCI_SUCCESS;
473}
474
475enum sci_status scic_task_request_construct_ssp(
476 struct scic_sds_request *sci_req)
477{
478 /* Construct the SSP Task SCU Task Context */
479 scu_ssp_task_request_construct_task_context(sci_req);
480
481 /* Fill in the SSP Task IU */
482 scic_sds_task_request_build_ssp_task_iu(sci_req);
483
484 sci_base_state_machine_change_state(&sci_req->state_machine,
485 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700486
487 return SCI_SUCCESS;
488}
489
490
Dan Williamsf1f52e72011-05-10 02:28:45 -0700491static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700492{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700493 enum sci_status status;
494 struct scic_sds_stp_request *stp_req;
495 bool copy = false;
496 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
497 struct sas_task *task = isci_request_access_task(isci_request);
Dan Williams6f231dd2011-07-02 22:56:22 -0700498
Dan Williamsf1f52e72011-05-10 02:28:45 -0700499 stp_req = &sci_req->stp.req;
500 sci_req->protocol = SCIC_STP_PROTOCOL;
Dan Williams6f231dd2011-07-02 22:56:22 -0700501
Dan Williamsf1f52e72011-05-10 02:28:45 -0700502 copy = (task->data_dir == DMA_NONE) ? false : true;
Dan Williams6f231dd2011-07-02 22:56:22 -0700503
Dan Williamsf1f52e72011-05-10 02:28:45 -0700504 status = scic_io_request_construct_sata(sci_req,
505 task->total_xfer_len,
506 task->data_dir,
507 copy);
Dan Williams6f231dd2011-07-02 22:56:22 -0700508
Dan Williamsf1f52e72011-05-10 02:28:45 -0700509 if (status == SCI_SUCCESS)
510 sci_base_state_machine_change_state(&sci_req->state_machine,
511 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700512
Dan Williamsf1f52e72011-05-10 02:28:45 -0700513 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700514}
515
Dan Williamsf1f52e72011-05-10 02:28:45 -0700516
517enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700518{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700519 enum sci_status status = SCI_SUCCESS;
520 struct isci_request *ireq = sci_req_to_ireq(sci_req);
Dan Williams6f231dd2011-07-02 22:56:22 -0700521
Dan Williamsf1f52e72011-05-10 02:28:45 -0700522 /* check for management protocols */
523 if (ireq->ttype == tmf_task) {
524 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700525
Dan Williamsf1f52e72011-05-10 02:28:45 -0700526 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
527 tmf->tmf_code == isci_tmf_sata_srst_low) {
528 status = scic_sds_stp_soft_reset_request_construct(sci_req);
529 } else {
530 dev_err(scic_to_dev(sci_req->owning_controller),
531 "%s: Request 0x%p received un-handled SAT "
532 "Protocol 0x%x.\n",
533 __func__, sci_req, tmf->tmf_code);
534
535 return SCI_FAILURE;
536 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700537 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700538
539 if (status == SCI_SUCCESS)
540 sci_base_state_machine_change_state(
541 &sci_req->state_machine,
542 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
543
544 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700545}
546
547/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700548 * sci_req_tx_bytes - bytes transferred when reply underruns request
549 * @sci_req: request that was terminated early
Dan Williams6f231dd2011-07-02 22:56:22 -0700550 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700551#define SCU_TASK_CONTEXT_SRAM 0x200000
552static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700553{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700554 struct scic_sds_controller *scic = sci_req->owning_controller;
555 u32 ret_val = 0;
Dan Williams6f231dd2011-07-02 22:56:22 -0700556
Dan Williamsf1f52e72011-05-10 02:28:45 -0700557 if (readl(&scic->smu_registers->address_modifier) == 0) {
558 void __iomem *scu_reg_base = scic->scu_registers;
Dan Williams6f231dd2011-07-02 22:56:22 -0700559
Dan Williamsf1f52e72011-05-10 02:28:45 -0700560 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
561 * BAR1 is the scu_registers
562 * 0x20002C = 0x200000 + 0x2c
563 * = start of task context SRAM + offset of (type.ssp.data_offset)
564 * TCi is the io_tag of struct scic_sds_request
Dan Williams67ea8382011-05-08 11:47:15 -0700565 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700566 ret_val = readl(scu_reg_base +
567 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
568 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
Dan Williams67ea8382011-05-08 11:47:15 -0700569 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700570
Dan Williamsf1f52e72011-05-10 02:28:45 -0700571 return ret_val;
Dan Williams6f231dd2011-07-02 22:56:22 -0700572}
573
Dan Williamsf1f52e72011-05-10 02:28:45 -0700574enum sci_status
575scic_sds_request_start(struct scic_sds_request *request)
576{
577 if (request->device_sequence !=
578 scic_sds_remote_device_get_sequence(request->target_device))
579 return SCI_FAILURE;
580
581 if (request->state_handlers->start_handler)
582 return request->state_handlers->start_handler(request);
583
584 dev_warn(scic_to_dev(request->owning_controller),
585 "%s: SCIC IO Request requested to start while in wrong "
586 "state %d\n",
587 __func__,
588 sci_base_state_machine_get_state(&request->state_machine));
589
590 return SCI_FAILURE_INVALID_STATE;
591}
592
593enum sci_status
594scic_sds_io_request_terminate(struct scic_sds_request *request)
595{
596 if (request->state_handlers->abort_handler)
597 return request->state_handlers->abort_handler(request);
598
599 dev_warn(scic_to_dev(request->owning_controller),
600 "%s: SCIC IO Request requested to abort while in wrong "
601 "state %d\n",
602 __func__,
603 sci_base_state_machine_get_state(&request->state_machine));
604
605 return SCI_FAILURE_INVALID_STATE;
606}
607
608enum sci_status scic_sds_io_request_event_handler(
609 struct scic_sds_request *request,
610 u32 event_code)
611{
612 if (request->state_handlers->event_handler)
613 return request->state_handlers->event_handler(request, event_code);
614
615 dev_warn(scic_to_dev(request->owning_controller),
616 "%s: SCIC IO Request given event code notification %x while "
617 "in wrong state %d\n",
618 __func__,
619 event_code,
620 sci_base_state_machine_get_state(&request->state_machine));
621
622 return SCI_FAILURE_INVALID_STATE;
623}
624
625/**
626 *
627 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
628 * operation is to be executed.
629 * @frame_index: The frame index returned by the hardware for the reqeust
630 * object.
631 *
632 * This method invokes the core state frame handler for the
633 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
634 */
635enum sci_status scic_sds_io_request_frame_handler(
636 struct scic_sds_request *request,
637 u32 frame_index)
638{
639 if (request->state_handlers->frame_handler)
640 return request->state_handlers->frame_handler(request, frame_index);
641
642 dev_warn(scic_to_dev(request->owning_controller),
643 "%s: SCIC IO Request given unexpected frame %x while in "
644 "state %d\n",
645 __func__,
646 frame_index,
647 sci_base_state_machine_get_state(&request->state_machine));
648
649 scic_sds_controller_release_frame(request->owning_controller, frame_index);
650 return SCI_FAILURE_INVALID_STATE;
651}
652
653/*
654 * This function copies response data for requests returning response data
655 * instead of sense data.
656 * @sci_req: This parameter specifies the request object for which to copy
657 * the response data.
658 */
Dan Williamsf1393032011-05-10 02:28:47 -0700659static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700660{
661 void *resp_buf;
662 u32 len;
663 struct ssp_response_iu *ssp_response;
664 struct isci_request *ireq = sci_req_to_ireq(sci_req);
665 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
666
667 ssp_response = &sci_req->ssp.rsp;
668
669 resp_buf = &isci_tmf->resp.resp_iu;
670
671 len = min_t(u32,
672 SSP_RESP_IU_MAX_SIZE,
673 be32_to_cpu(ssp_response->response_data_len));
674
675 memcpy(resp_buf, ssp_response->resp_data, len);
676}
677
678/*
679 * This method implements the action taken when a constructed
680 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
681 * This method will, if necessary, allocate a TCi for the io request object and
682 * then will, if necessary, copy the constructed TC data into the actual TC
683 * buffer. If everything is successful the post context field is updated with
684 * the TCi so the controller can post the request to the hardware. enum sci_status
685 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
686 */
687static enum sci_status scic_sds_request_constructed_state_start_handler(
688 struct scic_sds_request *request)
689{
690 struct scu_task_context *task_context;
691
692 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
693 request->io_tag =
694 scic_controller_allocate_io_tag(request->owning_controller);
695 }
696
697 /* Record the IO Tag in the request */
698 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
699 task_context = request->task_context_buffer;
700
701 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
702
703 switch (task_context->protocol_type) {
704 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
705 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
706 /* SSP/SMP Frame */
707 task_context->type.ssp.tag = request->io_tag;
708 task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
709 break;
710
711 case SCU_TASK_CONTEXT_PROTOCOL_STP:
712 /*
713 * STP/SATA Frame
714 * task_context->type.stp.ncq_tag = request->ncq_tag; */
715 break;
716
717 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
718 /* / @todo When do we set no protocol type? */
719 break;
720
721 default:
722 /* This should never happen since we build the IO requests */
723 break;
724 }
725
726 /*
727 * Check to see if we need to copy the task context buffer
728 * or have been building into the task context buffer */
729 if (request->was_tag_assigned_by_user == false) {
730 scic_sds_controller_copy_task_context(
731 request->owning_controller, request);
732 }
733
734 /* Add to the post_context the io tag value */
735 request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
736
737 /* Everything is good go ahead and change state */
738 sci_base_state_machine_change_state(&request->state_machine,
739 SCI_BASE_REQUEST_STATE_STARTED);
740
741 return SCI_SUCCESS;
742 }
743
744 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
745}
746
747/*
748 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
749 * object receives a scic_sds_request_terminate() request. Since the request
750 * has not yet been posted to the hardware the request transitions to the
751 * completed state. enum sci_status SCI_SUCCESS
752 */
753static enum sci_status scic_sds_request_constructed_state_abort_handler(
754 struct scic_sds_request *request)
755{
756 /*
757 * This request has been terminated by the user make sure that the correct
758 * status code is returned */
759 scic_sds_request_set_status(request,
760 SCU_TASK_DONE_TASK_ABORT,
761 SCI_FAILURE_IO_TERMINATED);
762
763 sci_base_state_machine_change_state(&request->state_machine,
764 SCI_BASE_REQUEST_STATE_COMPLETED);
765 return SCI_SUCCESS;
766}
767
768/*
769 * *****************************************************************************
770 * * STARTED STATE HANDLERS
771 * ***************************************************************************** */
772
773/*
774 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
775 * object receives a scic_sds_request_terminate() request. Since the request
776 * has been posted to the hardware the io request state is changed to the
777 * aborting state. enum sci_status SCI_SUCCESS
778 */
779enum sci_status scic_sds_request_started_state_abort_handler(
780 struct scic_sds_request *request)
781{
782 if (request->has_started_substate_machine)
783 sci_base_state_machine_stop(&request->started_substate_machine);
784
785 sci_base_state_machine_change_state(&request->state_machine,
786 SCI_BASE_REQUEST_STATE_ABORTING);
787 return SCI_SUCCESS;
788}
789
790/*
791 * scic_sds_request_started_state_tc_completion_handler() - This method process
792 * TC (task context) completions for normal IO request (i.e. Task/Abort
793 * Completions of type 0). This method will update the
794 * SCIC_SDS_IO_REQUEST_T::status field.
795 * @sci_req: This parameter specifies the request for which a completion
796 * occurred.
797 * @completion_code: This parameter specifies the completion code received from
798 * the SCU.
799 *
800 */
801static enum sci_status
802scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req,
803 u32 completion_code)
804{
805 u8 datapres;
806 struct ssp_response_iu *resp_iu;
807
808 /*
809 * TODO: Any SDMA return code of other than 0 is bad
810 * decode 0x003C0000 to determine SDMA status
811 */
812 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
813 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
814 scic_sds_request_set_status(sci_req,
815 SCU_TASK_DONE_GOOD,
816 SCI_SUCCESS);
817 break;
818
819 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
820 {
821 /*
822 * There are times when the SCU hardware will return an early
823 * response because the io request specified more data than is
824 * returned by the target device (mode pages, inquiry data,
825 * etc.). We must check the response stats to see if this is
826 * truly a failed request or a good request that just got
827 * completed early.
828 */
829 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
830 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
831
832 sci_swab32_cpy(&sci_req->ssp.rsp,
833 &sci_req->ssp.rsp,
834 word_cnt);
835
836 if (resp->status == 0) {
837 scic_sds_request_set_status(
838 sci_req,
839 SCU_TASK_DONE_GOOD,
840 SCI_SUCCESS_IO_DONE_EARLY);
841 } else {
842 scic_sds_request_set_status(
843 sci_req,
844 SCU_TASK_DONE_CHECK_RESPONSE,
845 SCI_FAILURE_IO_RESPONSE_VALID);
846 }
847 }
848 break;
849
850 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
851 {
852 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
853
854 sci_swab32_cpy(&sci_req->ssp.rsp,
855 &sci_req->ssp.rsp,
856 word_cnt);
857
858 scic_sds_request_set_status(sci_req,
859 SCU_TASK_DONE_CHECK_RESPONSE,
860 SCI_FAILURE_IO_RESPONSE_VALID);
861 break;
862 }
863
864 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
865 /*
866 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
867 * guaranteed to be received before this completion status is
868 * posted?
869 */
870 resp_iu = &sci_req->ssp.rsp;
871 datapres = resp_iu->datapres;
872
873 if ((datapres == 0x01) || (datapres == 0x02)) {
874 scic_sds_request_set_status(
875 sci_req,
876 SCU_TASK_DONE_CHECK_RESPONSE,
877 SCI_FAILURE_IO_RESPONSE_VALID);
878 } else
879 scic_sds_request_set_status(
880 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
881 break;
882
883 /* only stp device gets suspended. */
884 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
885 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
886 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
887 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
888 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
889 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
890 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
891 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
892 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
893 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
894 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
895 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
896 scic_sds_request_set_status(
897 sci_req,
898 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
899 SCU_COMPLETION_TL_STATUS_SHIFT,
900 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
901 } else {
902 scic_sds_request_set_status(
903 sci_req,
904 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
905 SCU_COMPLETION_TL_STATUS_SHIFT,
906 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
907 }
908 break;
909
910 /* both stp/ssp device gets suspended */
911 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
912 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
913 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
914 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
915 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
916 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
917 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
918 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
919 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
920 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
921 scic_sds_request_set_status(
922 sci_req,
923 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
924 SCU_COMPLETION_TL_STATUS_SHIFT,
925 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
926 break;
927
928 /* neither ssp nor stp gets suspended. */
929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
933 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
934 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
935 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
936 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
937 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
940 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
941 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
942 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
943 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
944 default:
945 scic_sds_request_set_status(
946 sci_req,
947 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
948 SCU_COMPLETION_TL_STATUS_SHIFT,
949 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
950 break;
951 }
952
953 /*
954 * TODO: This is probably wrong for ACK/NAK timeout conditions
955 */
956
957 /* In all cases we will treat this as the completion of the IO req. */
958 sci_base_state_machine_change_state(
959 &sci_req->state_machine,
960 SCI_BASE_REQUEST_STATE_COMPLETED);
961 return SCI_SUCCESS;
962}
963
964enum sci_status
965scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
966{
967 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED &&
968 request->has_started_substate_machine == false)
969 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
970 else if (request->state_handlers->tc_completion_handler)
971 return request->state_handlers->tc_completion_handler(request, completion_code);
972
973 dev_warn(scic_to_dev(request->owning_controller),
974 "%s: SCIC IO Request given task completion notification %x "
975 "while in wrong state %d\n",
976 __func__,
977 completion_code,
978 sci_base_state_machine_get_state(&request->state_machine));
979
980 return SCI_FAILURE_INVALID_STATE;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700981}
982
983/*
984 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
985 * object receives a scic_sds_request_frame_handler() request. This method
986 * first determines the frame type received. If this is a response frame then
987 * the response data is copied to the io request response buffer for processing
988 * at completion time. If the frame type is not a response buffer an error is
989 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
990 */
991static enum sci_status
992scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
993 u32 frame_index)
994{
995 enum sci_status status;
996 u32 *frame_header;
997 struct ssp_frame_hdr ssp_hdr;
998 ssize_t word_cnt;
999
1000 status = scic_sds_unsolicited_frame_control_get_header(
1001 &(scic_sds_request_get_controller(sci_req)->uf_control),
1002 frame_index,
1003 (void **)&frame_header);
1004
1005 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1006 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1007
1008 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1009 struct ssp_response_iu *resp_iu;
1010 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1011
1012 status = scic_sds_unsolicited_frame_control_get_buffer(
1013 &(scic_sds_request_get_controller(sci_req)->uf_control),
1014 frame_index,
1015 (void **)&resp_iu);
1016
1017 sci_swab32_cpy(&sci_req->ssp.rsp,
1018 resp_iu, word_cnt);
1019
1020 resp_iu = &sci_req->ssp.rsp;
1021
1022 if ((resp_iu->datapres == 0x01) ||
1023 (resp_iu->datapres == 0x02)) {
1024 scic_sds_request_set_status(
1025 sci_req,
1026 SCU_TASK_DONE_CHECK_RESPONSE,
1027 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1028 } else
1029 scic_sds_request_set_status(
1030 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1031 } else {
1032 /* This was not a response frame why did it get forwarded? */
1033 dev_err(scic_to_dev(sci_req->owning_controller),
1034 "%s: SCIC IO Request 0x%p received unexpected "
1035 "frame %d type 0x%02x\n",
1036 __func__,
1037 sci_req,
1038 frame_index,
1039 ssp_hdr.frame_type);
1040 }
1041
1042 /*
1043 * In any case we are done with this frame buffer return it to the
1044 * controller
1045 */
1046 scic_sds_controller_release_frame(
1047 sci_req->owning_controller, frame_index);
1048
1049 return SCI_SUCCESS;
1050}
1051
1052/*
1053 * *****************************************************************************
1054 * * COMPLETED STATE HANDLERS
1055 * ***************************************************************************** */
1056
1057
1058/*
1059 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1060 * object receives a scic_sds_request_complete() request. This method frees up
1061 * any io request resources that have been allocated and transitions the
1062 * request to its final state. Consider stopping the state machine instead of
1063 * transitioning to the final state? enum sci_status SCI_SUCCESS
1064 */
1065static enum sci_status scic_sds_request_completed_state_complete_handler(
1066 struct scic_sds_request *request)
1067{
1068 if (request->was_tag_assigned_by_user != true) {
1069 scic_controller_free_io_tag(
1070 request->owning_controller, request->io_tag);
1071 }
1072
1073 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1074 scic_sds_controller_release_frame(
1075 request->owning_controller, request->saved_rx_frame_index);
1076 }
1077
1078 sci_base_state_machine_change_state(&request->state_machine,
1079 SCI_BASE_REQUEST_STATE_FINAL);
1080 return SCI_SUCCESS;
1081}
1082
1083/*
1084 * *****************************************************************************
1085 * * ABORTING STATE HANDLERS
1086 * ***************************************************************************** */
1087
1088/*
1089 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1090 * object receives a scic_sds_request_terminate() request. This method is the
1091 * io request aborting state abort handlers. On receipt of a multiple
1092 * terminate requests the io request will transition to the completed state.
1093 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1094 */
1095static enum sci_status scic_sds_request_aborting_state_abort_handler(
1096 struct scic_sds_request *request)
1097{
1098 sci_base_state_machine_change_state(&request->state_machine,
1099 SCI_BASE_REQUEST_STATE_COMPLETED);
1100 return SCI_SUCCESS;
1101}
1102
1103/*
1104 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1105 * object receives a scic_sds_request_task_completion() request. This method
1106 * decodes the completion type waiting for the abort task complete
1107 * notification. When the abort task complete is received the io request
1108 * transitions to the completed state. enum sci_status SCI_SUCCESS
1109 */
1110static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1111 struct scic_sds_request *sci_req,
1112 u32 completion_code)
1113{
1114 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1115 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1116 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1117 scic_sds_request_set_status(
1118 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
1119 );
1120
1121 sci_base_state_machine_change_state(&sci_req->state_machine,
1122 SCI_BASE_REQUEST_STATE_COMPLETED);
1123 break;
1124
1125 default:
1126 /*
1127 * Unless we get some strange error wait for the task abort to complete
1128 * TODO: Should there be a state change for this completion? */
1129 break;
1130 }
1131
1132 return SCI_SUCCESS;
1133}
1134
1135/*
1136 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1137 * object receives a scic_sds_request_frame_handler() request. This method
1138 * discards the unsolicited frame since we are waiting for the abort task
1139 * completion. enum sci_status SCI_SUCCESS
1140 */
1141static enum sci_status scic_sds_request_aborting_state_frame_handler(
1142 struct scic_sds_request *sci_req,
1143 u32 frame_index)
1144{
1145 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1146
1147 scic_sds_controller_release_frame(
1148 sci_req->owning_controller, frame_index);
1149
1150 return SCI_SUCCESS;
1151}
1152
Dan Williamsf1393032011-05-10 02:28:47 -07001153/**
1154 * This method processes the completions transport layer (TL) status to
1155 * determine if the RAW task management frame was sent successfully. If the
1156 * raw frame was sent successfully, then the state for the task request
1157 * transitions to waiting for a response frame.
1158 * @sci_req: This parameter specifies the request for which the TC
1159 * completion was received.
1160 * @completion_code: This parameter indicates the completion status information
1161 * for the TC.
1162 *
1163 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1164 * this method always returns success.
1165 */
1166static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler(
1167 struct scic_sds_request *sci_req,
1168 u32 completion_code)
1169{
1170 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1171 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1172 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1173 SCI_SUCCESS);
1174
1175 sci_base_state_machine_change_state(&sci_req->state_machine,
1176 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1177 break;
1178
1179 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1180 /*
1181 * Currently, the decision is to simply allow the task request to
1182 * timeout if the task IU wasn't received successfully.
1183 * There is a potential for receiving multiple task responses if we
1184 * decide to send the task IU again. */
1185 dev_warn(scic_to_dev(sci_req->owning_controller),
1186 "%s: TaskRequest:0x%p CompletionCode:%x - "
1187 "ACK/NAK timeout\n",
1188 __func__,
1189 sci_req,
1190 completion_code);
1191
1192 sci_base_state_machine_change_state(&sci_req->state_machine,
1193 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1194 break;
1195
1196 default:
1197 /*
1198 * All other completion status cause the IO to be complete. If a NAK
1199 * was received, then it is up to the user to retry the request. */
1200 scic_sds_request_set_status(
1201 sci_req,
1202 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1203 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1204 );
1205
1206 sci_base_state_machine_change_state(&sci_req->state_machine,
1207 SCI_BASE_REQUEST_STATE_COMPLETED);
1208 break;
1209 }
1210
1211 return SCI_SUCCESS;
1212}
1213
1214/**
1215 * This method is responsible for processing a terminate/abort request for this
1216 * TC while the request is waiting for the task management response
1217 * unsolicited frame.
1218 * @sci_req: This parameter specifies the request for which the
1219 * termination was requested.
1220 *
1221 * This method returns an indication as to whether the abort request was
1222 * successfully handled. need to update to ensure the received UF doesn't cause
1223 * damage to subsequent requests (i.e. put the extended tag in a holding
1224 * pattern for this particular device).
1225 */
1226static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler(
1227 struct scic_sds_request *request)
1228{
1229 sci_base_state_machine_change_state(&request->state_machine,
1230 SCI_BASE_REQUEST_STATE_ABORTING);
1231 sci_base_state_machine_change_state(&request->state_machine,
1232 SCI_BASE_REQUEST_STATE_COMPLETED);
1233 return SCI_SUCCESS;
1234}
1235
1236/**
1237 * This method processes an unsolicited frame while the task mgmt request is
1238 * waiting for a response frame. It will copy the response data, release
1239 * the unsolicited frame, and transition the request to the
1240 * SCI_BASE_REQUEST_STATE_COMPLETED state.
1241 * @sci_req: This parameter specifies the request for which the
1242 * unsolicited frame was received.
1243 * @frame_index: This parameter indicates the unsolicited frame index that
1244 * should contain the response.
1245 *
1246 * This method returns an indication of whether the TC response frame was
1247 * handled successfully or not. SCI_SUCCESS Currently this value is always
1248 * returned and indicates successful processing of the TC response. Should
1249 * probably update to check frame type and make sure it is a response frame.
1250 */
1251static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler(
1252 struct scic_sds_request *request,
1253 u32 frame_index)
1254{
1255 scic_sds_io_request_copy_response(request);
1256
1257 sci_base_state_machine_change_state(&request->state_machine,
1258 SCI_BASE_REQUEST_STATE_COMPLETED);
1259 scic_sds_controller_release_frame(request->owning_controller,
1260 frame_index);
1261 return SCI_SUCCESS;
1262}
1263
Dan Williamsf1f52e72011-05-10 02:28:45 -07001264static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
Dan Williamsf1393032011-05-10 02:28:47 -07001265 [SCI_BASE_REQUEST_STATE_INITIAL] = { },
Dan Williamsf1f52e72011-05-10 02:28:45 -07001266 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1267 .start_handler = scic_sds_request_constructed_state_start_handler,
1268 .abort_handler = scic_sds_request_constructed_state_abort_handler,
1269 },
1270 [SCI_BASE_REQUEST_STATE_STARTED] = {
1271 .abort_handler = scic_sds_request_started_state_abort_handler,
1272 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
1273 .frame_handler = scic_sds_request_started_state_frame_handler,
1274 },
Dan Williamsf1393032011-05-10 02:28:47 -07001275 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
1276 .abort_handler = scic_sds_request_started_state_abort_handler,
1277 .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler,
1278 },
1279 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
1280 .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler,
1281 .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler,
1282 },
Dan Williamsf1f52e72011-05-10 02:28:45 -07001283 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1284 .complete_handler = scic_sds_request_completed_state_complete_handler,
1285 },
1286 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1287 .abort_handler = scic_sds_request_aborting_state_abort_handler,
1288 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
1289 .frame_handler = scic_sds_request_aborting_state_frame_handler,
1290 },
Dan Williamsf1393032011-05-10 02:28:47 -07001291 [SCI_BASE_REQUEST_STATE_FINAL] = { },
Dan Williamsf1f52e72011-05-10 02:28:45 -07001292};
1293
Dan Williams6f231dd2011-07-02 22:56:22 -07001294
1295/**
1296 * isci_request_process_response_iu() - This function sets the status and
1297 * response iu, in the task struct, from the request object for the upper
1298 * layer driver.
1299 * @sas_task: This parameter is the task struct from the upper layer driver.
1300 * @resp_iu: This parameter points to the response iu of the completed request.
1301 * @dev: This parameter specifies the linux device struct.
1302 *
1303 * none.
1304 */
1305static void isci_request_process_response_iu(
1306 struct sas_task *task,
1307 struct ssp_response_iu *resp_iu,
1308 struct device *dev)
1309{
1310 dev_dbg(dev,
1311 "%s: resp_iu = %p "
1312 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
1313 "resp_iu->response_data_len = %x, "
1314 "resp_iu->sense_data_len = %x\nrepsonse data: ",
1315 __func__,
1316 resp_iu,
1317 resp_iu->status,
1318 resp_iu->datapres,
1319 resp_iu->response_data_len,
1320 resp_iu->sense_data_len);
1321
1322 task->task_status.stat = resp_iu->status;
1323
1324 /* libsas updates the task status fields based on the response iu. */
1325 sas_ssp_task_response(dev, task, resp_iu);
1326}
1327
1328/**
1329 * isci_request_set_open_reject_status() - This function prepares the I/O
1330 * completion for OPEN_REJECT conditions.
1331 * @request: This parameter is the completed isci_request object.
1332 * @response_ptr: This parameter specifies the service response for the I/O.
1333 * @status_ptr: This parameter specifies the exec status for the I/O.
1334 * @complete_to_host_ptr: This parameter specifies the action to be taken by
1335 * the LLDD with respect to completing this request or forcing an abort
1336 * condition on the I/O.
1337 * @open_rej_reason: This parameter specifies the encoded reason for the
1338 * abandon-class reject.
1339 *
1340 * none.
1341 */
1342static void isci_request_set_open_reject_status(
1343 struct isci_request *request,
1344 struct sas_task *task,
1345 enum service_response *response_ptr,
1346 enum exec_status *status_ptr,
1347 enum isci_completion_selection *complete_to_host_ptr,
1348 enum sas_open_rej_reason open_rej_reason)
1349{
1350 /* Task in the target is done. */
1351 request->complete_in_target = true;
1352 *response_ptr = SAS_TASK_UNDELIVERED;
1353 *status_ptr = SAS_OPEN_REJECT;
1354 *complete_to_host_ptr = isci_perform_normal_io_completion;
1355 task->task_status.open_rej_reason = open_rej_reason;
1356}
1357
1358/**
1359 * isci_request_handle_controller_specific_errors() - This function decodes
1360 * controller-specific I/O completion error conditions.
1361 * @request: This parameter is the completed isci_request object.
1362 * @response_ptr: This parameter specifies the service response for the I/O.
1363 * @status_ptr: This parameter specifies the exec status for the I/O.
1364 * @complete_to_host_ptr: This parameter specifies the action to be taken by
1365 * the LLDD with respect to completing this request or forcing an abort
1366 * condition on the I/O.
1367 *
1368 * none.
1369 */
1370static void isci_request_handle_controller_specific_errors(
1371 struct isci_remote_device *isci_device,
1372 struct isci_request *request,
1373 struct sas_task *task,
1374 enum service_response *response_ptr,
1375 enum exec_status *status_ptr,
1376 enum isci_completion_selection *complete_to_host_ptr)
1377{
1378 unsigned int cstatus;
1379
Dan Williamsf1f52e72011-05-10 02:28:45 -07001380 cstatus = request->sci.scu_status;
Dan Williams6f231dd2011-07-02 22:56:22 -07001381
1382 dev_dbg(&request->isci_host->pdev->dev,
1383 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
1384 "- controller status = 0x%x\n",
1385 __func__, request, cstatus);
1386
1387 /* Decode the controller-specific errors; most
1388 * important is to recognize those conditions in which
1389 * the target may still have a task outstanding that
1390 * must be aborted.
1391 *
1392 * Note that there are SCU completion codes being
1393 * named in the decode below for which SCIC has already
1394 * done work to handle them in a way other than as
1395 * a controller-specific completion code; these are left
1396 * in the decode below for completeness sake.
1397 */
1398 switch (cstatus) {
1399 case SCU_TASK_DONE_DMASETUP_DIRERR:
1400 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
1401 case SCU_TASK_DONE_XFERCNT_ERR:
1402 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
1403 if (task->task_proto == SAS_PROTOCOL_SMP) {
1404 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
1405 *response_ptr = SAS_TASK_COMPLETE;
1406
1407 /* See if the device has been/is being stopped. Note
1408 * that we ignore the quiesce state, since we are
1409 * concerned about the actual device state.
1410 */
1411 if ((isci_device->status == isci_stopping) ||
1412 (isci_device->status == isci_stopped))
1413 *status_ptr = SAS_DEVICE_UNKNOWN;
1414 else
1415 *status_ptr = SAS_ABORTED_TASK;
1416
1417 request->complete_in_target = true;
1418
1419 *complete_to_host_ptr =
1420 isci_perform_normal_io_completion;
1421 } else {
1422 /* Task in the target is not done. */
1423 *response_ptr = SAS_TASK_UNDELIVERED;
1424
1425 if ((isci_device->status == isci_stopping) ||
1426 (isci_device->status == isci_stopped))
1427 *status_ptr = SAS_DEVICE_UNKNOWN;
1428 else
1429 *status_ptr = SAM_STAT_TASK_ABORTED;
1430
1431 request->complete_in_target = false;
1432
1433 *complete_to_host_ptr =
1434 isci_perform_error_io_completion;
1435 }
1436
1437 break;
1438
1439 case SCU_TASK_DONE_CRC_ERR:
1440 case SCU_TASK_DONE_NAK_CMD_ERR:
1441 case SCU_TASK_DONE_EXCESS_DATA:
1442 case SCU_TASK_DONE_UNEXP_FIS:
1443 /* Also SCU_TASK_DONE_UNEXP_RESP: */
1444 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
1445 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
1446 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
1447 /* These are conditions in which the target
1448 * has completed the task, so that no cleanup
1449 * is necessary.
1450 */
1451 *response_ptr = SAS_TASK_COMPLETE;
1452
1453 /* See if the device has been/is being stopped. Note
1454 * that we ignore the quiesce state, since we are
1455 * concerned about the actual device state.
1456 */
1457 if ((isci_device->status == isci_stopping) ||
1458 (isci_device->status == isci_stopped))
1459 *status_ptr = SAS_DEVICE_UNKNOWN;
1460 else
1461 *status_ptr = SAS_ABORTED_TASK;
1462
1463 request->complete_in_target = true;
1464
1465 *complete_to_host_ptr = isci_perform_normal_io_completion;
1466 break;
1467
1468
1469 /* Note that the only open reject completion codes seen here will be
1470 * abandon-class codes; all others are automatically retried in the SCU.
1471 */
1472 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
1473
1474 isci_request_set_open_reject_status(
1475 request, task, response_ptr, status_ptr,
1476 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
1477 break;
1478
1479 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
1480
1481 /* Note - the return of AB0 will change when
1482 * libsas implements detection of zone violations.
1483 */
1484 isci_request_set_open_reject_status(
1485 request, task, response_ptr, status_ptr,
1486 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
1487 break;
1488
1489 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
1490
1491 isci_request_set_open_reject_status(
1492 request, task, response_ptr, status_ptr,
1493 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
1494 break;
1495
1496 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
1497
1498 isci_request_set_open_reject_status(
1499 request, task, response_ptr, status_ptr,
1500 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
1501 break;
1502
1503 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
1504
1505 isci_request_set_open_reject_status(
1506 request, task, response_ptr, status_ptr,
1507 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
1508 break;
1509
1510 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
1511
1512 isci_request_set_open_reject_status(
1513 request, task, response_ptr, status_ptr,
1514 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
1515 break;
1516
1517 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
1518
1519 isci_request_set_open_reject_status(
1520 request, task, response_ptr, status_ptr,
1521 complete_to_host_ptr, SAS_OREJ_STP_NORES);
1522 break;
1523
1524 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
1525
1526 isci_request_set_open_reject_status(
1527 request, task, response_ptr, status_ptr,
1528 complete_to_host_ptr, SAS_OREJ_EPROTO);
1529 break;
1530
1531 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
1532
1533 isci_request_set_open_reject_status(
1534 request, task, response_ptr, status_ptr,
1535 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
1536 break;
1537
1538 case SCU_TASK_DONE_LL_R_ERR:
1539 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
1540 case SCU_TASK_DONE_LL_PERR:
1541 case SCU_TASK_DONE_LL_SY_TERM:
1542 /* Also SCU_TASK_DONE_NAK_ERR:*/
1543 case SCU_TASK_DONE_LL_LF_TERM:
1544 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
1545 case SCU_TASK_DONE_LL_ABORT_ERR:
1546 case SCU_TASK_DONE_SEQ_INV_TYPE:
1547 /* Also SCU_TASK_DONE_UNEXP_XR: */
1548 case SCU_TASK_DONE_XR_IU_LEN_ERR:
1549 case SCU_TASK_DONE_INV_FIS_LEN:
1550 /* Also SCU_TASK_DONE_XR_WD_LEN: */
1551 case SCU_TASK_DONE_SDMA_ERR:
1552 case SCU_TASK_DONE_OFFSET_ERR:
1553 case SCU_TASK_DONE_MAX_PLD_ERR:
1554 case SCU_TASK_DONE_LF_ERR:
1555 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
1556 case SCU_TASK_DONE_SMP_LL_RX_ERR:
1557 case SCU_TASK_DONE_UNEXP_DATA:
1558 case SCU_TASK_DONE_UNEXP_SDBFIS:
1559 case SCU_TASK_DONE_REG_ERR:
1560 case SCU_TASK_DONE_SDB_ERR:
1561 case SCU_TASK_DONE_TASK_ABORT:
1562 default:
1563 /* Task in the target is not done. */
1564 *response_ptr = SAS_TASK_UNDELIVERED;
1565 *status_ptr = SAM_STAT_TASK_ABORTED;
1566 request->complete_in_target = false;
1567
1568 *complete_to_host_ptr = isci_perform_error_io_completion;
1569 break;
1570 }
1571}
1572
1573/**
1574 * isci_task_save_for_upper_layer_completion() - This function saves the
1575 * request for later completion to the upper layer driver.
1576 * @host: This parameter is a pointer to the host on which the the request
1577 * should be queued (either as an error or success).
1578 * @request: This parameter is the completed request.
1579 * @response: This parameter is the response code for the completed task.
1580 * @status: This parameter is the status code for the completed task.
1581 *
1582 * none.
1583 */
1584static void isci_task_save_for_upper_layer_completion(
1585 struct isci_host *host,
1586 struct isci_request *request,
1587 enum service_response response,
1588 enum exec_status status,
1589 enum isci_completion_selection task_notification_selection)
1590{
1591 struct sas_task *task = isci_request_access_task(request);
1592
Jeff Skirvinec6c9632011-03-04 14:06:44 -08001593 task_notification_selection
1594 = isci_task_set_completion_status(task, response, status,
1595 task_notification_selection);
Dan Williams6f231dd2011-07-02 22:56:22 -07001596
1597 /* Tasks aborted specifically by a call to the lldd_abort_task
1598 * function should not be completed to the host in the regular path.
1599 */
1600 switch (task_notification_selection) {
1601
1602 case isci_perform_normal_io_completion:
1603
1604 /* Normal notification (task_done) */
1605 dev_dbg(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001606 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07001607 __func__,
1608 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001609 task->task_status.resp, response,
1610 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07001611 /* Add to the completed list. */
1612 list_add(&request->completed_node,
1613 &host->requests_to_complete);
Jeff Skirvinec6c9632011-03-04 14:06:44 -08001614
1615 /* Take the request off the device's pending request list. */
1616 list_del_init(&request->dev_node);
Dan Williams6f231dd2011-07-02 22:56:22 -07001617 break;
1618
1619 case isci_perform_aborted_io_completion:
Jeff Skirvina5fde222011-03-04 14:06:42 -08001620 /* No notification to libsas because this request is
1621 * already in the abort path.
Dan Williams6f231dd2011-07-02 22:56:22 -07001622 */
1623 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001624 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07001625 __func__,
1626 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001627 task->task_status.resp, response,
1628 task->task_status.stat, status);
Jeff Skirvina5fde222011-03-04 14:06:42 -08001629
1630 /* Wake up whatever process was waiting for this
1631 * request to complete.
1632 */
1633 WARN_ON(request->io_request_completion == NULL);
1634
1635 if (request->io_request_completion != NULL) {
1636
1637 /* Signal whoever is waiting that this
1638 * request is complete.
1639 */
1640 complete(request->io_request_completion);
1641 }
Dan Williams6f231dd2011-07-02 22:56:22 -07001642 break;
1643
1644 case isci_perform_error_io_completion:
1645 /* Use sas_task_abort */
1646 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001647 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07001648 __func__,
1649 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001650 task->task_status.resp, response,
1651 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07001652 /* Add to the aborted list. */
1653 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001654 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07001655 break;
1656
1657 default:
1658 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001659 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07001660 __func__,
1661 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07001662 task->task_status.resp, response,
1663 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07001664
Jeff Skirvina5fde222011-03-04 14:06:42 -08001665 /* Add to the error to libsas list. */
Dan Williams6f231dd2011-07-02 22:56:22 -07001666 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001667 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07001668 break;
1669 }
1670}
1671
Dan Williamsf1f52e72011-05-10 02:28:45 -07001672static void isci_request_io_request_complete(struct isci_host *isci_host,
1673 struct isci_request *request,
1674 enum sci_io_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -07001675{
1676 struct sas_task *task = isci_request_access_task(request);
1677 struct ssp_response_iu *resp_iu;
1678 void *resp_buf;
1679 unsigned long task_flags;
Dan Williams6f231dd2011-07-02 22:56:22 -07001680 struct isci_remote_device *isci_device = request->isci_device;
1681 enum service_response response = SAS_TASK_UNDELIVERED;
1682 enum exec_status status = SAS_ABORTED_TASK;
1683 enum isci_request_status request_status;
1684 enum isci_completion_selection complete_to_host
1685 = isci_perform_normal_io_completion;
1686
1687 dev_dbg(&isci_host->pdev->dev,
1688 "%s: request = %p, task = %p,\n"
1689 "task->data_dir = %d completion_status = 0x%x\n",
1690 __func__,
1691 request,
1692 task,
1693 task->data_dir,
1694 completion_status);
1695
Jeff Skirvina5fde222011-03-04 14:06:42 -08001696 spin_lock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07001697 request_status = isci_request_get_state(request);
Dan Williams6f231dd2011-07-02 22:56:22 -07001698
1699 /* Decode the request status. Note that if the request has been
1700 * aborted by a task management function, we don't care
1701 * what the status is.
1702 */
1703 switch (request_status) {
1704
1705 case aborted:
1706 /* "aborted" indicates that the request was aborted by a task
1707 * management function, since once a task management request is
1708 * perfomed by the device, the request only completes because
1709 * of the subsequent driver terminate.
1710 *
1711 * Aborted also means an external thread is explicitly managing
1712 * this request, so that we do not complete it up the stack.
1713 *
1714 * The target is still there (since the TMF was successful).
1715 */
1716 request->complete_in_target = true;
1717 response = SAS_TASK_COMPLETE;
1718
1719 /* See if the device has been/is being stopped. Note
1720 * that we ignore the quiesce state, since we are
1721 * concerned about the actual device state.
1722 */
1723 if ((isci_device->status == isci_stopping)
1724 || (isci_device->status == isci_stopped)
1725 )
1726 status = SAS_DEVICE_UNKNOWN;
1727 else
1728 status = SAS_ABORTED_TASK;
1729
1730 complete_to_host = isci_perform_aborted_io_completion;
1731 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08001732
1733 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07001734 break;
1735
1736 case aborting:
1737 /* aborting means that the task management function tried and
1738 * failed to abort the request. We need to note the request
1739 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
1740 * target as down.
1741 *
1742 * Aborting also means an external thread is explicitly managing
1743 * this request, so that we do not complete it up the stack.
1744 */
1745 request->complete_in_target = true;
1746 response = SAS_TASK_UNDELIVERED;
1747
1748 if ((isci_device->status == isci_stopping) ||
1749 (isci_device->status == isci_stopped))
1750 /* The device has been /is being stopped. Note that
1751 * we ignore the quiesce state, since we are
1752 * concerned about the actual device state.
1753 */
1754 status = SAS_DEVICE_UNKNOWN;
1755 else
1756 status = SAS_PHY_DOWN;
1757
1758 complete_to_host = isci_perform_aborted_io_completion;
1759
1760 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08001761
1762 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07001763 break;
1764
1765 case terminating:
1766
1767 /* This was an terminated request. This happens when
1768 * the I/O is being terminated because of an action on
1769 * the device (reset, tear down, etc.), and the I/O needs
1770 * to be completed up the stack.
1771 */
1772 request->complete_in_target = true;
1773 response = SAS_TASK_UNDELIVERED;
1774
1775 /* See if the device has been/is being stopped. Note
1776 * that we ignore the quiesce state, since we are
1777 * concerned about the actual device state.
1778 */
1779 if ((isci_device->status == isci_stopping) ||
1780 (isci_device->status == isci_stopped))
1781 status = SAS_DEVICE_UNKNOWN;
1782 else
1783 status = SAS_ABORTED_TASK;
1784
Jeff Skirvina5fde222011-03-04 14:06:42 -08001785 complete_to_host = isci_perform_aborted_io_completion;
Dan Williams6f231dd2011-07-02 22:56:22 -07001786
1787 /* This was a terminated request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08001788
1789 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07001790 break;
1791
1792 default:
1793
Jeff Skirvina5fde222011-03-04 14:06:42 -08001794 /* The request is done from an SCU HW perspective. */
1795 request->status = completed;
1796
1797 spin_unlock(&request->state_lock);
1798
Dan Williams6f231dd2011-07-02 22:56:22 -07001799 /* This is an active request being completed from the core. */
1800 switch (completion_status) {
1801
1802 case SCI_IO_FAILURE_RESPONSE_VALID:
1803 dev_dbg(&isci_host->pdev->dev,
1804 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
1805 __func__,
1806 request,
1807 task);
1808
1809 if (sas_protocol_ata(task->task_proto)) {
Dan Williams67ea8382011-05-08 11:47:15 -07001810 resp_buf = &request->sci.stp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07001811 isci_request_process_stp_response(task,
Dan Williamsb7645812011-05-08 02:35:32 -07001812 resp_buf);
Dan Williams6f231dd2011-07-02 22:56:22 -07001813 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
1814
1815 /* crack the iu response buffer. */
Dan Williams67ea8382011-05-08 11:47:15 -07001816 resp_iu = &request->sci.ssp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07001817 isci_request_process_response_iu(task, resp_iu,
Dan Williamsb7645812011-05-08 02:35:32 -07001818 &isci_host->pdev->dev);
Dan Williams6f231dd2011-07-02 22:56:22 -07001819
1820 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
1821
1822 dev_err(&isci_host->pdev->dev,
1823 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
1824 "SAS_PROTOCOL_SMP protocol\n",
1825 __func__);
1826
1827 } else
1828 dev_err(&isci_host->pdev->dev,
1829 "%s: unknown protocol\n", __func__);
1830
1831 /* use the task status set in the task struct by the
1832 * isci_request_process_response_iu call.
1833 */
1834 request->complete_in_target = true;
1835 response = task->task_status.resp;
1836 status = task->task_status.stat;
1837 break;
1838
1839 case SCI_IO_SUCCESS:
1840 case SCI_IO_SUCCESS_IO_DONE_EARLY:
1841
1842 response = SAS_TASK_COMPLETE;
1843 status = SAM_STAT_GOOD;
1844 request->complete_in_target = true;
1845
1846 if (task->task_proto == SAS_PROTOCOL_SMP) {
Dan Williams67ea8382011-05-08 11:47:15 -07001847 void *rsp = &request->sci.smp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07001848
1849 dev_dbg(&isci_host->pdev->dev,
1850 "%s: SMP protocol completion\n",
1851 __func__);
1852
1853 sg_copy_from_buffer(
1854 &task->smp_task.smp_resp, 1,
Dan Williamsb7645812011-05-08 02:35:32 -07001855 rsp, sizeof(struct smp_resp));
Dan Williams6f231dd2011-07-02 22:56:22 -07001856 } else if (completion_status
1857 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
1858
1859 /* This was an SSP / STP / SATA transfer.
1860 * There is a possibility that less data than
1861 * the maximum was transferred.
1862 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07001863 u32 transferred_length = sci_req_tx_bytes(&request->sci);
Dan Williams6f231dd2011-07-02 22:56:22 -07001864
1865 task->task_status.residual
1866 = task->total_xfer_len - transferred_length;
1867
1868 /* If there were residual bytes, call this an
1869 * underrun.
1870 */
1871 if (task->task_status.residual != 0)
1872 status = SAS_DATA_UNDERRUN;
1873
1874 dev_dbg(&isci_host->pdev->dev,
1875 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
1876 __func__,
1877 status);
1878
1879 } else
1880 dev_dbg(&isci_host->pdev->dev,
1881 "%s: SCI_IO_SUCCESS\n",
1882 __func__);
1883
1884 break;
1885
1886 case SCI_IO_FAILURE_TERMINATED:
1887 dev_dbg(&isci_host->pdev->dev,
1888 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
1889 __func__,
1890 request,
1891 task);
1892
1893 /* The request was terminated explicitly. No handling
1894 * is needed in the SCSI error handler path.
1895 */
1896 request->complete_in_target = true;
1897 response = SAS_TASK_UNDELIVERED;
1898
1899 /* See if the device has been/is being stopped. Note
1900 * that we ignore the quiesce state, since we are
1901 * concerned about the actual device state.
1902 */
1903 if ((isci_device->status == isci_stopping) ||
1904 (isci_device->status == isci_stopped))
1905 status = SAS_DEVICE_UNKNOWN;
1906 else
1907 status = SAS_ABORTED_TASK;
1908
1909 complete_to_host = isci_perform_normal_io_completion;
1910 break;
1911
1912 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
1913
1914 isci_request_handle_controller_specific_errors(
1915 isci_device, request, task, &response, &status,
1916 &complete_to_host);
1917
1918 break;
1919
1920 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
1921 /* This is a special case, in that the I/O completion
1922 * is telling us that the device needs a reset.
1923 * In order for the device reset condition to be
1924 * noticed, the I/O has to be handled in the error
1925 * handler. Set the reset flag and cause the
1926 * SCSI error thread to be scheduled.
1927 */
1928 spin_lock_irqsave(&task->task_state_lock, task_flags);
1929 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1930 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
1931
Jeff Skirvinaa145102011-03-07 16:40:47 -07001932 /* Fail the I/O. */
1933 response = SAS_TASK_UNDELIVERED;
1934 status = SAM_STAT_TASK_ABORTED;
1935
Dan Williams6f231dd2011-07-02 22:56:22 -07001936 complete_to_host = isci_perform_error_io_completion;
1937 request->complete_in_target = false;
1938 break;
1939
1940 default:
1941 /* Catch any otherwise unhandled error codes here. */
1942 dev_warn(&isci_host->pdev->dev,
1943 "%s: invalid completion code: 0x%x - "
1944 "isci_request = %p\n",
1945 __func__, completion_status, request);
1946
1947 response = SAS_TASK_UNDELIVERED;
1948
1949 /* See if the device has been/is being stopped. Note
1950 * that we ignore the quiesce state, since we are
1951 * concerned about the actual device state.
1952 */
1953 if ((isci_device->status == isci_stopping) ||
1954 (isci_device->status == isci_stopped))
1955 status = SAS_DEVICE_UNKNOWN;
1956 else
1957 status = SAS_ABORTED_TASK;
1958
1959 complete_to_host = isci_perform_error_io_completion;
1960 request->complete_in_target = false;
1961 break;
1962 }
1963 break;
1964 }
1965
1966 isci_request_unmap_sgl(request, isci_host->pdev);
1967
1968 /* Put the completed request on the correct list */
1969 isci_task_save_for_upper_layer_completion(isci_host, request, response,
1970 status, complete_to_host
1971 );
1972
1973 /* complete the io request to the core. */
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001974 scic_controller_complete_io(&isci_host->sci,
Dan Williams57f20f42011-04-21 18:14:45 -07001975 &isci_device->sci,
Dan Williams67ea8382011-05-08 11:47:15 -07001976 &request->sci);
1977 /* set terminated handle so it cannot be completed or
Dan Williams6f231dd2011-07-02 22:56:22 -07001978 * terminated again, and to cause any calls into abort
1979 * task to recognize the already completed case.
1980 */
Dan Williams67ea8382011-05-08 11:47:15 -07001981 request->terminated = true;
Dan Williams6f231dd2011-07-02 22:56:22 -07001982
Dan Williams6f231dd2011-07-02 22:56:22 -07001983 isci_host_can_dequeue(isci_host, 1);
1984}
Dan Williamsf1f52e72011-05-10 02:28:45 -07001985
1986/**
1987 * scic_sds_request_initial_state_enter() -
1988 * @object: This parameter specifies the base object for which the state
1989 * transition is occurring.
1990 *
1991 * This method implements the actions taken when entering the
1992 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
1993 * base request is constructed. Entry into the initial state sets all handlers
1994 * for the io request object to their default handlers. none
1995 */
1996static void scic_sds_request_initial_state_enter(void *object)
1997{
1998 struct scic_sds_request *sci_req = object;
1999
2000 SET_STATE_HANDLER(
2001 sci_req,
2002 scic_sds_request_state_handler_table,
2003 SCI_BASE_REQUEST_STATE_INITIAL
2004 );
2005}
2006
2007/**
2008 * scic_sds_request_constructed_state_enter() -
2009 * @object: The io request object that is to enter the constructed state.
2010 *
2011 * This method implements the actions taken when entering the
2012 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
2013 * for the the constructed state. none
2014 */
2015static void scic_sds_request_constructed_state_enter(void *object)
2016{
2017 struct scic_sds_request *sci_req = object;
2018
2019 SET_STATE_HANDLER(
2020 sci_req,
2021 scic_sds_request_state_handler_table,
2022 SCI_BASE_REQUEST_STATE_CONSTRUCTED
2023 );
2024}
2025
2026/**
2027 * scic_sds_request_started_state_enter() -
2028 * @object: This parameter specifies the base object for which the state
2029 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
2030 *
2031 * This method implements the actions taken when entering the
2032 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
2033 * SCSI Task request we must enter the started substate machine. none
2034 */
2035static void scic_sds_request_started_state_enter(void *object)
2036{
2037 struct scic_sds_request *sci_req = object;
Dan Williamsf1393032011-05-10 02:28:47 -07002038 struct sci_base_state_machine *sm = &sci_req->state_machine;
2039 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2040 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
Dan Williamsf1f52e72011-05-10 02:28:45 -07002041
2042 SET_STATE_HANDLER(
2043 sci_req,
2044 scic_sds_request_state_handler_table,
2045 SCI_BASE_REQUEST_STATE_STARTED
2046 );
2047
Dan Williamsf1393032011-05-10 02:28:47 -07002048 if (ireq->ttype == tmf_task && dev->dev_type == SAS_END_DEV)
2049 sci_base_state_machine_change_state(sm,
2050 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
2051
2052 /* Most of the request state machines have a started substate machine so
2053 * start its execution on the entry to the started state.
2054 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07002055 if (sci_req->has_started_substate_machine == true)
2056 sci_base_state_machine_start(&sci_req->started_substate_machine);
2057}
2058
2059/**
2060 * scic_sds_request_started_state_exit() -
2061 * @object: This parameter specifies the base object for which the state
2062 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2063 * object.
2064 *
2065 * This method implements the actions taken when exiting the
2066 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
2067 * to stop the started substate machine. none
2068 */
2069static void scic_sds_request_started_state_exit(void *object)
2070{
2071 struct scic_sds_request *sci_req = object;
2072
2073 if (sci_req->has_started_substate_machine == true)
2074 sci_base_state_machine_stop(&sci_req->started_substate_machine);
2075}
2076
2077/**
2078 * scic_sds_request_completed_state_enter() -
2079 * @object: This parameter specifies the base object for which the state
2080 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2081 * object.
2082 *
2083 * This method implements the actions taken when entering the
2084 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
2085 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
2086 * completion status and convert it to an enum sci_status to return in the
2087 * completion callback function. none
2088 */
2089static void scic_sds_request_completed_state_enter(void *object)
2090{
2091 struct scic_sds_request *sci_req = object;
2092 struct scic_sds_controller *scic =
2093 scic_sds_request_get_controller(sci_req);
2094 struct isci_host *ihost = scic_to_ihost(scic);
2095 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2096
2097 SET_STATE_HANDLER(sci_req,
2098 scic_sds_request_state_handler_table,
2099 SCI_BASE_REQUEST_STATE_COMPLETED);
2100
2101 /* Tell the SCI_USER that the IO request is complete */
2102 if (sci_req->is_task_management_request == false)
2103 isci_request_io_request_complete(ihost, ireq,
2104 sci_req->sci_status);
2105 else
2106 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
2107}
2108
2109/**
2110 * scic_sds_request_aborting_state_enter() -
2111 * @object: This parameter specifies the base object for which the state
2112 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2113 * object.
2114 *
2115 * This method implements the actions taken when entering the
2116 * SCI_BASE_REQUEST_STATE_ABORTING state. none
2117 */
2118static void scic_sds_request_aborting_state_enter(void *object)
2119{
2120 struct scic_sds_request *sci_req = object;
2121
2122 /* Setting the abort bit in the Task Context is required by the silicon. */
2123 sci_req->task_context_buffer->abort = 1;
2124
2125 SET_STATE_HANDLER(
2126 sci_req,
2127 scic_sds_request_state_handler_table,
2128 SCI_BASE_REQUEST_STATE_ABORTING
2129 );
2130}
2131
2132/**
2133 * scic_sds_request_final_state_enter() -
2134 * @object: This parameter specifies the base object for which the state
2135 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
2136 *
2137 * This method implements the actions taken when entering the
2138 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
2139 * state handlers in place. none
2140 */
2141static void scic_sds_request_final_state_enter(void *object)
2142{
2143 struct scic_sds_request *sci_req = object;
2144
2145 SET_STATE_HANDLER(
2146 sci_req,
2147 scic_sds_request_state_handler_table,
2148 SCI_BASE_REQUEST_STATE_FINAL
2149 );
2150}
2151
Dan Williamsf1393032011-05-10 02:28:47 -07002152static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter(
2153 void *object)
2154{
2155 struct scic_sds_request *sci_req = object;
2156
2157 SET_STATE_HANDLER(
2158 sci_req,
2159 scic_sds_request_state_handler_table,
2160 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
2161 );
2162}
2163
2164static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter(
2165 void *object)
2166{
2167 struct scic_sds_request *sci_req = object;
2168
2169 SET_STATE_HANDLER(
2170 sci_req,
2171 scic_sds_request_state_handler_table,
2172 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE
2173 );
2174}
2175
Dan Williamsf1f52e72011-05-10 02:28:45 -07002176static const struct sci_base_state scic_sds_request_state_table[] = {
2177 [SCI_BASE_REQUEST_STATE_INITIAL] = {
2178 .enter_state = scic_sds_request_initial_state_enter,
2179 },
2180 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
2181 .enter_state = scic_sds_request_constructed_state_enter,
2182 },
2183 [SCI_BASE_REQUEST_STATE_STARTED] = {
2184 .enter_state = scic_sds_request_started_state_enter,
2185 .exit_state = scic_sds_request_started_state_exit
2186 },
Dan Williamsf1393032011-05-10 02:28:47 -07002187 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
2188 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
2189 },
2190 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
2191 .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter,
2192 },
Dan Williamsf1f52e72011-05-10 02:28:45 -07002193 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
2194 .enter_state = scic_sds_request_completed_state_enter,
2195 },
2196 [SCI_BASE_REQUEST_STATE_ABORTING] = {
2197 .enter_state = scic_sds_request_aborting_state_enter,
2198 },
2199 [SCI_BASE_REQUEST_STATE_FINAL] = {
2200 .enter_state = scic_sds_request_final_state_enter,
2201 },
2202};
2203
2204static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
2205 struct scic_sds_remote_device *sci_dev,
2206 u16 io_tag, struct scic_sds_request *sci_req)
2207{
2208 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
2209 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
2210 sci_base_state_machine_start(&sci_req->state_machine);
2211
2212 sci_req->io_tag = io_tag;
2213 sci_req->owning_controller = scic;
2214 sci_req->target_device = sci_dev;
2215 sci_req->has_started_substate_machine = false;
2216 sci_req->protocol = SCIC_NO_PROTOCOL;
2217 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2218 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
2219
2220 sci_req->sci_status = SCI_SUCCESS;
2221 sci_req->scu_status = 0;
2222 sci_req->post_context = 0xFFFFFFFF;
2223
2224 sci_req->is_task_management_request = false;
2225
2226 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
2227 sci_req->was_tag_assigned_by_user = false;
2228 sci_req->task_context_buffer = NULL;
2229 } else {
2230 sci_req->was_tag_assigned_by_user = true;
2231
2232 sci_req->task_context_buffer =
2233 scic_sds_controller_get_task_context_buffer(scic, io_tag);
2234 }
2235}
2236
2237static enum sci_status
2238scic_io_request_construct(struct scic_sds_controller *scic,
2239 struct scic_sds_remote_device *sci_dev,
2240 u16 io_tag, struct scic_sds_request *sci_req)
2241{
2242 struct domain_device *dev = sci_dev_to_domain(sci_dev);
2243 enum sci_status status = SCI_SUCCESS;
2244
2245 /* Build the common part of the request */
2246 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
2247
2248 if (sci_dev->rnc.remote_node_index ==
2249 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
2250 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
2251
2252 if (dev->dev_type == SAS_END_DEV)
2253 scic_sds_ssp_io_request_assign_buffers(sci_req);
2254 else if ((dev->dev_type == SATA_DEV) ||
2255 (dev->tproto & SAS_PROTOCOL_STP)) {
2256 scic_sds_stp_request_assign_buffers(sci_req);
2257 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
2258 } else if (dev_is_expander(dev)) {
2259 scic_sds_smp_request_assign_buffers(sci_req);
2260 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
2261 } else
2262 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2263
2264 if (status == SCI_SUCCESS) {
2265 memset(sci_req->task_context_buffer, 0,
2266 offsetof(struct scu_task_context, sgl_pair_ab));
2267 }
2268
2269 return status;
2270}
2271
2272enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
2273 struct scic_sds_remote_device *sci_dev,
2274 u16 io_tag, struct scic_sds_request *sci_req)
2275{
2276 struct domain_device *dev = sci_dev_to_domain(sci_dev);
2277 enum sci_status status = SCI_SUCCESS;
2278
2279 /* Build the common part of the request */
2280 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
2281
Dan Williamsf1393032011-05-10 02:28:47 -07002282 if (dev->dev_type == SAS_END_DEV)
Dan Williamsf1f52e72011-05-10 02:28:45 -07002283 scic_sds_ssp_task_request_assign_buffers(sci_req);
Dan Williamsf1393032011-05-10 02:28:47 -07002284 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
Dan Williamsf1f52e72011-05-10 02:28:45 -07002285 scic_sds_stp_request_assign_buffers(sci_req);
2286 else
2287 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2288
2289 if (status == SCI_SUCCESS) {
2290 sci_req->is_task_management_request = true;
2291 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
2292 }
2293
2294 return status;
2295}
2296
2297static enum sci_status isci_request_ssp_request_construct(
2298 struct isci_request *request)
2299{
2300 enum sci_status status;
2301
2302 dev_dbg(&request->isci_host->pdev->dev,
2303 "%s: request = %p\n",
2304 __func__,
2305 request);
2306 status = scic_io_request_construct_basic_ssp(&request->sci);
2307 return status;
2308}
2309
2310static enum sci_status isci_request_stp_request_construct(
2311 struct isci_request *request)
2312{
2313 struct sas_task *task = isci_request_access_task(request);
2314 enum sci_status status;
2315 struct host_to_dev_fis *register_fis;
2316
2317 dev_dbg(&request->isci_host->pdev->dev,
2318 "%s: request = %p\n",
2319 __func__,
2320 request);
2321
2322 /* Get the host_to_dev_fis from the core and copy
2323 * the fis from the task into it.
2324 */
2325 register_fis = isci_sata_task_to_fis_copy(task);
2326
2327 status = scic_io_request_construct_basic_sata(&request->sci);
2328
2329 /* Set the ncq tag in the fis, from the queue
2330 * command in the task.
2331 */
2332 if (isci_sata_is_task_ncq(task)) {
2333
2334 isci_sata_set_ncq_tag(
2335 register_fis,
2336 task
2337 );
2338 }
2339
2340 return status;
2341}
2342
2343/*
2344 * isci_smp_request_build() - This function builds the smp request.
2345 * @ireq: This parameter points to the isci_request allocated in the
2346 * request construct function.
2347 *
2348 * SCI_SUCCESS on successfull completion, or specific failure code.
2349 */
2350static enum sci_status isci_smp_request_build(struct isci_request *ireq)
2351{
2352 enum sci_status status = SCI_FAILURE;
2353 struct sas_task *task = isci_request_access_task(ireq);
2354 struct scic_sds_request *sci_req = &ireq->sci;
2355
2356 dev_dbg(&ireq->isci_host->pdev->dev,
2357 "%s: request = %p\n", __func__, ireq);
2358
2359 dev_dbg(&ireq->isci_host->pdev->dev,
2360 "%s: smp_req len = %d\n",
2361 __func__,
2362 task->smp_task.smp_req.length);
2363
2364 /* copy the smp_command to the address; */
2365 sg_copy_to_buffer(&task->smp_task.smp_req, 1,
2366 &sci_req->smp.cmd,
2367 sizeof(struct smp_req));
2368
2369 status = scic_io_request_construct_smp(sci_req);
2370 if (status != SCI_SUCCESS)
2371 dev_warn(&ireq->isci_host->pdev->dev,
2372 "%s: failed with status = %d\n",
2373 __func__,
2374 status);
2375
2376 return status;
2377}
2378
2379/**
2380 * isci_io_request_build() - This function builds the io request object.
2381 * @isci_host: This parameter specifies the ISCI host object
2382 * @request: This parameter points to the isci_request object allocated in the
2383 * request construct function.
2384 * @sci_device: This parameter is the handle for the sci core's remote device
2385 * object that is the destination for this request.
2386 *
2387 * SCI_SUCCESS on successfull completion, or specific failure code.
2388 */
2389static enum sci_status isci_io_request_build(
2390 struct isci_host *isci_host,
2391 struct isci_request *request,
2392 struct isci_remote_device *isci_device)
2393{
2394 enum sci_status status = SCI_SUCCESS;
2395 struct sas_task *task = isci_request_access_task(request);
2396 struct scic_sds_remote_device *sci_device = &isci_device->sci;
2397
2398 dev_dbg(&isci_host->pdev->dev,
2399 "%s: isci_device = 0x%p; request = %p, "
2400 "num_scatter = %d\n",
2401 __func__,
2402 isci_device,
2403 request,
2404 task->num_scatter);
2405
2406 /* map the sgl addresses, if present.
2407 * libata does the mapping for sata devices
2408 * before we get the request.
2409 */
2410 if (task->num_scatter &&
2411 !sas_protocol_ata(task->task_proto) &&
2412 !(SAS_PROTOCOL_SMP & task->task_proto)) {
2413
2414 request->num_sg_entries = dma_map_sg(
2415 &isci_host->pdev->dev,
2416 task->scatter,
2417 task->num_scatter,
2418 task->data_dir
2419 );
2420
2421 if (request->num_sg_entries == 0)
2422 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2423 }
2424
2425 /* build the common request object. For now,
2426 * we will let the core allocate the IO tag.
2427 */
2428 status = scic_io_request_construct(&isci_host->sci, sci_device,
2429 SCI_CONTROLLER_INVALID_IO_TAG,
2430 &request->sci);
2431
2432 if (status != SCI_SUCCESS) {
2433 dev_warn(&isci_host->pdev->dev,
2434 "%s: failed request construct\n",
2435 __func__);
2436 return SCI_FAILURE;
2437 }
2438
2439 switch (task->task_proto) {
2440 case SAS_PROTOCOL_SMP:
2441 status = isci_smp_request_build(request);
2442 break;
2443 case SAS_PROTOCOL_SSP:
2444 status = isci_request_ssp_request_construct(request);
2445 break;
2446 case SAS_PROTOCOL_SATA:
2447 case SAS_PROTOCOL_STP:
2448 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2449 status = isci_request_stp_request_construct(request);
2450 break;
2451 default:
2452 dev_warn(&isci_host->pdev->dev,
2453 "%s: unknown protocol\n", __func__);
2454 return SCI_FAILURE;
2455 }
2456
2457 return SCI_SUCCESS;
2458}
2459
2460/**
2461 * isci_request_alloc_core() - This function gets the request object from the
2462 * isci_host dma cache.
2463 * @isci_host: This parameter specifies the ISCI host object
2464 * @isci_request: This parameter will contain the pointer to the new
2465 * isci_request object.
2466 * @isci_device: This parameter is the pointer to the isci remote device object
2467 * that is the destination for this request.
2468 * @gfp_flags: This parameter specifies the os allocation flags.
2469 *
2470 * SCI_SUCCESS on successfull completion, or specific failure code.
2471 */
2472static int isci_request_alloc_core(
2473 struct isci_host *isci_host,
2474 struct isci_request **isci_request,
2475 struct isci_remote_device *isci_device,
2476 gfp_t gfp_flags)
2477{
2478 int ret = 0;
2479 dma_addr_t handle;
2480 struct isci_request *request;
2481
2482
2483 /* get pointer to dma memory. This actually points
2484 * to both the isci_remote_device object and the
2485 * sci object. The isci object is at the beginning
2486 * of the memory allocated here.
2487 */
2488 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
2489 if (!request) {
2490 dev_warn(&isci_host->pdev->dev,
2491 "%s: dma_pool_alloc returned NULL\n", __func__);
2492 return -ENOMEM;
2493 }
2494
2495 /* initialize the request object. */
2496 spin_lock_init(&request->state_lock);
2497 request->request_daddr = handle;
2498 request->isci_host = isci_host;
2499 request->isci_device = isci_device;
2500 request->io_request_completion = NULL;
2501 request->terminated = false;
2502
2503 request->num_sg_entries = 0;
2504
2505 request->complete_in_target = false;
2506
2507 INIT_LIST_HEAD(&request->completed_node);
2508 INIT_LIST_HEAD(&request->dev_node);
2509
2510 *isci_request = request;
2511 isci_request_change_state(request, allocated);
2512
2513 return ret;
2514}
2515
2516static int isci_request_alloc_io(
2517 struct isci_host *isci_host,
2518 struct sas_task *task,
2519 struct isci_request **isci_request,
2520 struct isci_remote_device *isci_device,
2521 gfp_t gfp_flags)
2522{
2523 int retval = isci_request_alloc_core(isci_host, isci_request,
2524 isci_device, gfp_flags);
2525
2526 if (!retval) {
2527 (*isci_request)->ttype_ptr.io_task_ptr = task;
2528 (*isci_request)->ttype = io_task;
2529
2530 task->lldd_task = *isci_request;
2531 }
2532 return retval;
2533}
2534
2535/**
2536 * isci_request_alloc_tmf() - This function gets the request object from the
2537 * isci_host dma cache and initializes the relevant fields as a sas_task.
2538 * @isci_host: This parameter specifies the ISCI host object
2539 * @sas_task: This parameter is the task struct from the upper layer driver.
2540 * @isci_request: This parameter will contain the pointer to the new
2541 * isci_request object.
2542 * @isci_device: This parameter is the pointer to the isci remote device object
2543 * that is the destination for this request.
2544 * @gfp_flags: This parameter specifies the os allocation flags.
2545 *
2546 * SCI_SUCCESS on successfull completion, or specific failure code.
2547 */
2548int isci_request_alloc_tmf(
2549 struct isci_host *isci_host,
2550 struct isci_tmf *isci_tmf,
2551 struct isci_request **isci_request,
2552 struct isci_remote_device *isci_device,
2553 gfp_t gfp_flags)
2554{
2555 int retval = isci_request_alloc_core(isci_host, isci_request,
2556 isci_device, gfp_flags);
2557
2558 if (!retval) {
2559
2560 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
2561 (*isci_request)->ttype = tmf_task;
2562 }
2563 return retval;
2564}
2565
2566/**
2567 * isci_request_execute() - This function allocates the isci_request object,
2568 * all fills in some common fields.
2569 * @isci_host: This parameter specifies the ISCI host object
2570 * @sas_task: This parameter is the task struct from the upper layer driver.
2571 * @isci_request: This parameter will contain the pointer to the new
2572 * isci_request object.
2573 * @gfp_flags: This parameter specifies the os allocation flags.
2574 *
2575 * SCI_SUCCESS on successfull completion, or specific failure code.
2576 */
2577int isci_request_execute(
2578 struct isci_host *isci_host,
2579 struct sas_task *task,
2580 struct isci_request **isci_request,
2581 gfp_t gfp_flags)
2582{
2583 int ret = 0;
2584 struct scic_sds_remote_device *sci_device;
2585 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2586 struct isci_remote_device *isci_device;
2587 struct isci_request *request;
2588 unsigned long flags;
2589
2590 isci_device = task->dev->lldd_dev;
2591 sci_device = &isci_device->sci;
2592
2593 /* do common allocation and init of request object. */
2594 ret = isci_request_alloc_io(
2595 isci_host,
2596 task,
2597 &request,
2598 isci_device,
2599 gfp_flags
2600 );
2601
2602 if (ret)
2603 goto out;
2604
2605 status = isci_io_request_build(isci_host, request, isci_device);
2606 if (status != SCI_SUCCESS) {
2607 dev_warn(&isci_host->pdev->dev,
2608 "%s: request_construct failed - status = 0x%x\n",
2609 __func__,
2610 status);
2611 goto out;
2612 }
2613
2614 spin_lock_irqsave(&isci_host->scic_lock, flags);
2615
2616 /* send the request, let the core assign the IO TAG. */
2617 status = scic_controller_start_io(&isci_host->sci, sci_device,
2618 &request->sci,
2619 SCI_CONTROLLER_INVALID_IO_TAG);
2620 if (status != SCI_SUCCESS &&
2621 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
2622 dev_warn(&isci_host->pdev->dev,
2623 "%s: failed request start (0x%x)\n",
2624 __func__, status);
2625 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
2626 goto out;
2627 }
2628
2629 /* Either I/O started OK, or the core has signaled that
2630 * the device needs a target reset.
2631 *
2632 * In either case, hold onto the I/O for later.
2633 *
2634 * Update it's status and add it to the list in the
2635 * remote device object.
2636 */
2637 isci_request_change_state(request, started);
2638 list_add(&request->dev_node, &isci_device->reqs_in_process);
2639
2640 if (status == SCI_SUCCESS) {
2641 /* Save the tag for possible task mgmt later. */
2642 request->io_tag = request->sci.io_tag;
2643 } else {
2644 /* The request did not really start in the
2645 * hardware, so clear the request handle
2646 * here so no terminations will be done.
2647 */
2648 request->terminated = true;
2649 }
2650 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
2651
2652 if (status ==
2653 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
2654 /* Signal libsas that we need the SCSI error
2655 * handler thread to work on this I/O and that
2656 * we want a device reset.
2657 */
2658 spin_lock_irqsave(&task->task_state_lock, flags);
2659 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2660 spin_unlock_irqrestore(&task->task_state_lock, flags);
2661
2662 /* Cause this task to be scheduled in the SCSI error
2663 * handler thread.
2664 */
2665 isci_execpath_callback(isci_host, task,
2666 sas_task_abort);
2667
2668 /* Change the status, since we are holding
2669 * the I/O until it is managed by the SCSI
2670 * error handler.
2671 */
2672 status = SCI_SUCCESS;
2673 }
2674
2675 out:
2676 if (status != SCI_SUCCESS) {
2677 /* release dma memory on failure. */
2678 isci_request_free(isci_host, request);
2679 request = NULL;
2680 ret = SCI_FAILURE;
2681 }
2682
2683 *isci_request = request;
2684 return ret;
2685}
2686
2687
2688