blob: 72a47ce7a1d3c5c2af072ac45574182c2acada4e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19
20#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_driver.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_host.h>
26#include <scsi/scsi_request.h>
27
28#include "scsi_priv.h"
29#include "scsi_logging.h"
30
31
32#define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33#define SG_MEMPOOL_SIZE 32
34
35struct scsi_host_sg_pool {
36 size_t size;
37 char *name;
38 kmem_cache_t *slab;
39 mempool_t *pool;
40};
41
42#if (SCSI_MAX_PHYS_SEGMENTS < 32)
43#error SCSI_MAX_PHYS_SEGMENTS is too small
44#endif
45
46#define SP(x) { x, "sgpool-" #x }
Adrian Bunk52c1da32005-06-23 22:05:33 -070047static struct scsi_host_sg_pool scsi_sg_pools[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 SP(8),
49 SP(16),
50 SP(32),
51#if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 SP(64),
53#if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 SP(128),
55#if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 SP(256),
57#if (SCSI_MAX_PHYS_SEGMENTS > 256)
58#error SCSI_MAX_PHYS_SEGMENTS is too large
59#endif
60#endif
61#endif
62#endif
63};
64#undef SP
65
66
67/*
68 * Function: scsi_insert_special_req()
69 *
70 * Purpose: Insert pre-formed request into request queue.
71 *
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
75 *
76 * Lock status: Assumed that lock is not held upon entry.
77 *
78 * Returns: Nothing
79 *
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
85 * process it.
86 */
87int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88{
89 /*
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
92 */
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
Tejun Heo 867d1192005-04-24 02:06:05 -050095 at_head, sreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 return 0;
97}
98
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -050099static void scsi_run_queue(struct request_queue *q);
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/*
102 * Function: scsi_queue_insert()
103 *
104 * Purpose: Insert a command in the midlevel queue.
105 *
106 * Arguments: cmd - command that we are adding to queue.
107 * reason - why we are inserting command to queue.
108 *
109 * Lock status: Assumed that lock is not held upon entry.
110 *
111 * Returns: Nothing.
112 *
113 * Notes: We do this for one of two cases. Either the host is busy
114 * and it cannot accept any more commands for the time being,
115 * or the device returned QUEUE_FULL and can accept no more
116 * commands.
117 * Notes: This could be called either from an interrupt context or a
118 * normal process context.
119 */
120int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121{
122 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device;
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500124 struct request_queue *q = device->request_queue;
125 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 SCSI_LOG_MLQUEUE(1,
128 printk("Inserting command %p into mlqueue\n", cmd));
129
130 /*
Tejun Heo d8c37e72005-05-14 00:46:08 +0900131 * Set the appropriate busy bit for the device/host.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 *
133 * If the host/device isn't busy, assume that something actually
134 * completed, and that we should be able to queue a command now.
135 *
136 * Note that the prior mid-layer assumption that any host could
137 * always queue at least one command is now broken. The mid-layer
138 * will implement a user specifiable stall (see
139 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140 * if a command is requeued with no other commands outstanding
141 * either for the device or for the host.
142 */
143 if (reason == SCSI_MLQUEUE_HOST_BUSY)
144 host->host_blocked = host->max_host_blocked;
145 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146 device->device_blocked = device->max_device_blocked;
147
148 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * Decrement the counters, since these commands are no longer
150 * active on the host/device.
151 */
152 scsi_device_unbusy(device);
153
154 /*
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500155 * Requeue this command. It will go before all other commands
156 * that are already in the queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 *
158 * NOTE: there is magic here about the way the queue is plugged if
159 * we have no outstanding commands.
160 *
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500161 * Although we *don't* plug the queue, we call the request
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * function. The SCSI request function detects the blocked condition
163 * and plugs the queue appropriately.
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500164 */
165 spin_lock_irqsave(q->queue_lock, flags);
166 blk_requeue_request(q, cmd->request);
167 spin_unlock_irqrestore(q->queue_lock, flags);
168
169 scsi_run_queue(q);
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return 0;
172}
173
174/*
175 * Function: scsi_do_req
176 *
177 * Purpose: Queue a SCSI request
178 *
179 * Arguments: sreq - command descriptor.
180 * cmnd - actual SCSI command to be performed.
181 * buffer - data buffer.
182 * bufflen - size of data buffer.
183 * done - completion function to be run.
184 * timeout - how long to let it run before timeout.
185 * retries - number of retries we allow.
186 *
187 * Lock status: No locks held upon entry.
188 *
189 * Returns: Nothing.
190 *
191 * Notes: This function is only used for queueing requests for things
192 * like ioctls and character device requests - this is because
193 * we essentially just inject a request into the queue for the
194 * device.
195 *
196 * In order to support the scsi_device_quiesce function, we
197 * now inject requests on the *head* of the device queue
198 * rather than the tail.
199 */
200void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201 void *buffer, unsigned bufflen,
202 void (*done)(struct scsi_cmnd *),
203 int timeout, int retries)
204{
205 /*
206 * If the upper level driver is reusing these things, then
207 * we should release the low-level block now. Another one will
208 * be allocated later when this request is getting queued.
209 */
210 __scsi_release_request(sreq);
211
212 /*
213 * Our own function scsi_done (which marks the host as not busy,
214 * disables the timeout counter, etc) will be called by us or by the
215 * scsi_hosts[host].queuecommand() function needs to also call
216 * the completion function for the high level driver.
217 */
218 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219 sreq->sr_bufflen = bufflen;
220 sreq->sr_buffer = buffer;
221 sreq->sr_allowed = retries;
222 sreq->sr_done = done;
223 sreq->sr_timeout_per_command = timeout;
224
225 if (sreq->sr_cmd_len == 0)
226 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
227
228 /*
229 * head injection *required* here otherwise quiesce won't work
230 */
231 scsi_insert_special_req(sreq, 1);
232}
233EXPORT_SYMBOL(scsi_do_req);
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235/* This is the end routine we get to if a command was never attached
236 * to the request. Simply complete the request without changing
237 * rq_status; this will cause a DRIVER_ERROR. */
238static void scsi_wait_req_end_io(struct request *req)
239{
240 BUG_ON(!req->waiting);
241
242 complete(req->waiting);
243}
244
245void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
246 unsigned bufflen, int timeout, int retries)
247{
248 DECLARE_COMPLETION(wait);
James Bottomley39216032005-06-15 18:48:29 -0500249 int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
James Bottomleye537a362005-06-05 02:07:14 -0500250 struct request *req;
251
James Bottomley8e640112005-06-15 18:16:09 -0500252 req = blk_get_request(sreq->sr_device->request_queue, write,
253 __GFP_WAIT);
254 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
255 buffer, bufflen, __GFP_WAIT)) {
256 sreq->sr_result = DRIVER_ERROR << 24;
257 blk_put_request(req);
258 return;
259 }
260
James Bottomleye537a362005-06-05 02:07:14 -0500261 req->flags |= REQ_NOMERGE;
262 req->waiting = &wait;
263 req->end_io = scsi_wait_req_end_io;
264 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
265 req->sense = sreq->sr_sense_buffer;
266 req->sense_len = 0;
267 memcpy(req->cmd, cmnd, req->cmd_len);
268 req->timeout = timeout;
269 req->flags |= REQ_BLOCK_PC;
270 req->rq_disk = NULL;
271 blk_insert_request(sreq->sr_device->request_queue, req,
272 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 wait_for_completion(&wait);
274 sreq->sr_request->waiting = NULL;
James Bottomleye537a362005-06-05 02:07:14 -0500275 sreq->sr_result = req->errors;
276 if (req->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 sreq->sr_result |= (DRIVER_ERROR << 24);
278
James Bottomleye537a362005-06-05 02:07:14 -0500279 blk_put_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
James Bottomleye537a362005-06-05 02:07:14 -0500281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282EXPORT_SYMBOL(scsi_wait_req);
283
James Bottomley39216032005-06-15 18:48:29 -0500284/**
James Bottomley33aa6872005-08-28 11:31:14 -0500285 * scsi_execute - insert request and wait for the result
James Bottomley39216032005-06-15 18:48:29 -0500286 * @sdev: scsi device
287 * @cmd: scsi command
288 * @data_direction: data direction
289 * @buffer: data buffer
290 * @bufflen: len of buffer
291 * @sense: optional sense buffer
292 * @timeout: request timeout in seconds
293 * @retries: number of times to retry request
James Bottomley33aa6872005-08-28 11:31:14 -0500294 * @flags: or into request flags;
James Bottomley39216032005-06-15 18:48:29 -0500295 *
James Bottomleyea73a9f2005-08-28 11:33:52 -0500296 * returns the req->errors value which is the the scsi_cmnd result
297 * field.
James Bottomley39216032005-06-15 18:48:29 -0500298 **/
James Bottomley33aa6872005-08-28 11:31:14 -0500299int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
300 int data_direction, void *buffer, unsigned bufflen,
301 unsigned char *sense, int timeout, int retries, int flags)
James Bottomley39216032005-06-15 18:48:29 -0500302{
303 struct request *req;
304 int write = (data_direction == DMA_TO_DEVICE);
305 int ret = DRIVER_ERROR << 24;
306
307 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
308
309 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
310 buffer, bufflen, __GFP_WAIT))
311 goto out;
312
313 req->cmd_len = COMMAND_SIZE(cmd[0]);
314 memcpy(req->cmd, cmd, req->cmd_len);
315 req->sense = sense;
316 req->sense_len = 0;
317 req->timeout = timeout;
James Bottomley33aa6872005-08-28 11:31:14 -0500318 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL;
James Bottomley39216032005-06-15 18:48:29 -0500319
320 /*
321 * head injection *required* here otherwise quiesce won't work
322 */
323 blk_execute_rq(req->q, NULL, req, 1);
324
325 ret = req->errors;
326 out:
327 blk_put_request(req);
328
329 return ret;
330}
James Bottomley33aa6872005-08-28 11:31:14 -0500331EXPORT_SYMBOL(scsi_execute);
James Bottomley39216032005-06-15 18:48:29 -0500332
James Bottomleyea73a9f2005-08-28 11:33:52 -0500333
334int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
335 int data_direction, void *buffer, unsigned bufflen,
336 struct scsi_sense_hdr *sshdr, int timeout, int retries)
337{
338 char *sense = NULL;
akpm@osdl.org1ccb48b2005-06-26 00:12:51 -0700339 int result;
340
James Bottomleyea73a9f2005-08-28 11:33:52 -0500341 if (sshdr) {
342 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
343 if (!sense)
344 return DRIVER_ERROR << 24;
James Bottomleye5143852005-08-09 11:55:36 -0500345 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
James Bottomleyea73a9f2005-08-28 11:33:52 -0500346 }
akpm@osdl.org1ccb48b2005-06-26 00:12:51 -0700347 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
James Bottomleyea73a9f2005-08-28 11:33:52 -0500348 sense, timeout, retries, 0);
349 if (sshdr)
James Bottomleye5143852005-08-09 11:55:36 -0500350 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
James Bottomleyea73a9f2005-08-28 11:33:52 -0500351
352 kfree(sense);
353 return result;
354}
355EXPORT_SYMBOL(scsi_execute_req);
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357/*
358 * Function: scsi_init_cmd_errh()
359 *
360 * Purpose: Initialize cmd fields related to error handling.
361 *
362 * Arguments: cmd - command that is ready to be queued.
363 *
364 * Returns: Nothing
365 *
366 * Notes: This function has the job of initializing a number of
367 * fields related to error handling. Typically this will
368 * be called once for each command, as required.
369 */
370static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
371{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 cmd->serial_number = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
375
376 if (cmd->cmd_len == 0)
377 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
378
379 /*
380 * We need saved copies of a number of fields - this is because
381 * error handling may need to overwrite these with different values
382 * to run different commands, and once error handling is complete,
383 * we will need to restore these values prior to running the actual
384 * command.
385 */
386 cmd->old_use_sg = cmd->use_sg;
387 cmd->old_cmd_len = cmd->cmd_len;
388 cmd->sc_old_data_direction = cmd->sc_data_direction;
389 cmd->old_underflow = cmd->underflow;
390 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
391 cmd->buffer = cmd->request_buffer;
392 cmd->bufflen = cmd->request_bufflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
394 return 1;
395}
396
397/*
398 * Function: scsi_setup_cmd_retry()
399 *
400 * Purpose: Restore the command state for a retry
401 *
402 * Arguments: cmd - command to be restored
403 *
404 * Returns: Nothing
405 *
406 * Notes: Immediately prior to retrying a command, we need
407 * to restore certain fields that we saved above.
408 */
409void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
410{
411 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
412 cmd->request_buffer = cmd->buffer;
413 cmd->request_bufflen = cmd->bufflen;
414 cmd->use_sg = cmd->old_use_sg;
415 cmd->cmd_len = cmd->old_cmd_len;
416 cmd->sc_data_direction = cmd->sc_old_data_direction;
417 cmd->underflow = cmd->old_underflow;
418}
419
420void scsi_device_unbusy(struct scsi_device *sdev)
421{
422 struct Scsi_Host *shost = sdev->host;
423 unsigned long flags;
424
425 spin_lock_irqsave(shost->host_lock, flags);
426 shost->host_busy--;
Mike Andersond3301872005-06-16 11:12:38 -0700427 if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 shost->host_failed))
429 scsi_eh_wakeup(shost);
430 spin_unlock(shost->host_lock);
152587d2005-04-12 16:22:06 -0500431 spin_lock(sdev->request_queue->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 sdev->device_busy--;
152587d2005-04-12 16:22:06 -0500433 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
435
436/*
437 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
438 * and call blk_run_queue for all the scsi_devices on the target -
439 * including current_sdev first.
440 *
441 * Called with *no* scsi locks held.
442 */
443static void scsi_single_lun_run(struct scsi_device *current_sdev)
444{
445 struct Scsi_Host *shost = current_sdev->host;
446 struct scsi_device *sdev, *tmp;
447 struct scsi_target *starget = scsi_target(current_sdev);
448 unsigned long flags;
449
450 spin_lock_irqsave(shost->host_lock, flags);
451 starget->starget_sdev_user = NULL;
452 spin_unlock_irqrestore(shost->host_lock, flags);
453
454 /*
455 * Call blk_run_queue for all LUNs on the target, starting with
456 * current_sdev. We race with others (to set starget_sdev_user),
457 * but in most cases, we will be first. Ideally, each LU on the
458 * target would get some limited time or requests on the target.
459 */
460 blk_run_queue(current_sdev->request_queue);
461
462 spin_lock_irqsave(shost->host_lock, flags);
463 if (starget->starget_sdev_user)
464 goto out;
465 list_for_each_entry_safe(sdev, tmp, &starget->devices,
466 same_target_siblings) {
467 if (sdev == current_sdev)
468 continue;
469 if (scsi_device_get(sdev))
470 continue;
471
472 spin_unlock_irqrestore(shost->host_lock, flags);
473 blk_run_queue(sdev->request_queue);
474 spin_lock_irqsave(shost->host_lock, flags);
475
476 scsi_device_put(sdev);
477 }
478 out:
479 spin_unlock_irqrestore(shost->host_lock, flags);
480}
481
482/*
483 * Function: scsi_run_queue()
484 *
485 * Purpose: Select a proper request queue to serve next
486 *
487 * Arguments: q - last request's queue
488 *
489 * Returns: Nothing
490 *
491 * Notes: The previous command was completely finished, start
492 * a new one if possible.
493 */
494static void scsi_run_queue(struct request_queue *q)
495{
496 struct scsi_device *sdev = q->queuedata;
497 struct Scsi_Host *shost = sdev->host;
498 unsigned long flags;
499
500 if (sdev->single_lun)
501 scsi_single_lun_run(sdev);
502
503 spin_lock_irqsave(shost->host_lock, flags);
504 while (!list_empty(&shost->starved_list) &&
505 !shost->host_blocked && !shost->host_self_blocked &&
506 !((shost->can_queue > 0) &&
507 (shost->host_busy >= shost->can_queue))) {
508 /*
509 * As long as shost is accepting commands and we have
510 * starved queues, call blk_run_queue. scsi_request_fn
511 * drops the queue_lock and can add us back to the
512 * starved_list.
513 *
514 * host_lock protects the starved_list and starved_entry.
515 * scsi_request_fn must get the host_lock before checking
516 * or modifying starved_list or starved_entry.
517 */
518 sdev = list_entry(shost->starved_list.next,
519 struct scsi_device, starved_entry);
520 list_del_init(&sdev->starved_entry);
521 spin_unlock_irqrestore(shost->host_lock, flags);
522
523 blk_run_queue(sdev->request_queue);
524
525 spin_lock_irqsave(shost->host_lock, flags);
526 if (unlikely(!list_empty(&sdev->starved_entry)))
527 /*
528 * sdev lost a race, and was put back on the
529 * starved list. This is unlikely but without this
530 * in theory we could loop forever.
531 */
532 break;
533 }
534 spin_unlock_irqrestore(shost->host_lock, flags);
535
536 blk_run_queue(q);
537}
538
539/*
540 * Function: scsi_requeue_command()
541 *
542 * Purpose: Handle post-processing of completed commands.
543 *
544 * Arguments: q - queue to operate on
545 * cmd - command that may need to be requeued.
546 *
547 * Returns: Nothing
548 *
549 * Notes: After command completion, there may be blocks left
550 * over which weren't finished by the previous command
551 * this can be for a number of reasons - the main one is
552 * I/O errors in the middle of the request, in which case
553 * we need to request the blocks that come after the bad
554 * sector.
555 */
556static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
557{
Tejun Heo 283369c2005-04-24 02:06:36 -0500558 unsigned long flags;
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 cmd->request->flags &= ~REQ_DONTPREP;
Tejun Heo 283369c2005-04-24 02:06:36 -0500561
562 spin_lock_irqsave(q->queue_lock, flags);
563 blk_requeue_request(q, cmd->request);
564 spin_unlock_irqrestore(q->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 scsi_run_queue(q);
567}
568
569void scsi_next_command(struct scsi_cmnd *cmd)
570{
571 struct request_queue *q = cmd->device->request_queue;
572
573 scsi_put_command(cmd);
574 scsi_run_queue(q);
575}
576
577void scsi_run_host_queues(struct Scsi_Host *shost)
578{
579 struct scsi_device *sdev;
580
581 shost_for_each_device(sdev, shost)
582 scsi_run_queue(sdev->request_queue);
583}
584
585/*
586 * Function: scsi_end_request()
587 *
588 * Purpose: Post-processing of completed commands (usually invoked at end
589 * of upper level post-processing and scsi_io_completion).
590 *
591 * Arguments: cmd - command that is complete.
592 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
593 * bytes - number of bytes of completed I/O
594 * requeue - indicates whether we should requeue leftovers.
595 *
596 * Lock status: Assumed that lock is not held upon entry.
597 *
598 * Returns: cmd if requeue done or required, NULL otherwise
599 *
600 * Notes: This is called for block device requests in order to
601 * mark some number of sectors as complete.
602 *
603 * We are guaranteeing that the request queue will be goosed
604 * at some point during this call.
605 */
606static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
607 int bytes, int requeue)
608{
609 request_queue_t *q = cmd->device->request_queue;
610 struct request *req = cmd->request;
611 unsigned long flags;
612
613 /*
614 * If there are blocks left over at the end, set up the command
615 * to queue the remainder of them.
616 */
617 if (end_that_request_chunk(req, uptodate, bytes)) {
618 int leftover = (req->hard_nr_sectors << 9);
619
620 if (blk_pc_request(req))
621 leftover = req->data_len;
622
623 /* kill remainder if no retrys */
624 if (!uptodate && blk_noretry_request(req))
625 end_that_request_chunk(req, 0, leftover);
626 else {
627 if (requeue)
628 /*
629 * Bleah. Leftovers again. Stick the
630 * leftovers in the front of the
631 * queue, and goose the queue again.
632 */
633 scsi_requeue_command(q, cmd);
634
635 return cmd;
636 }
637 }
638
639 add_disk_randomness(req->rq_disk);
640
641 spin_lock_irqsave(q->queue_lock, flags);
642 if (blk_rq_tagged(req))
643 blk_queue_end_tag(q, req);
644 end_that_request_last(req);
645 spin_unlock_irqrestore(q->queue_lock, flags);
646
647 /*
648 * This will goose the queue request function at the end, so we don't
649 * need to worry about launching another command.
650 */
651 scsi_next_command(cmd);
652 return NULL;
653}
654
655static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
656{
657 struct scsi_host_sg_pool *sgp;
658 struct scatterlist *sgl;
659
660 BUG_ON(!cmd->use_sg);
661
662 switch (cmd->use_sg) {
663 case 1 ... 8:
664 cmd->sglist_len = 0;
665 break;
666 case 9 ... 16:
667 cmd->sglist_len = 1;
668 break;
669 case 17 ... 32:
670 cmd->sglist_len = 2;
671 break;
672#if (SCSI_MAX_PHYS_SEGMENTS > 32)
673 case 33 ... 64:
674 cmd->sglist_len = 3;
675 break;
676#if (SCSI_MAX_PHYS_SEGMENTS > 64)
677 case 65 ... 128:
678 cmd->sglist_len = 4;
679 break;
680#if (SCSI_MAX_PHYS_SEGMENTS > 128)
681 case 129 ... 256:
682 cmd->sglist_len = 5;
683 break;
684#endif
685#endif
686#endif
687 default:
688 return NULL;
689 }
690
691 sgp = scsi_sg_pools + cmd->sglist_len;
692 sgl = mempool_alloc(sgp->pool, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return sgl;
694}
695
696static void scsi_free_sgtable(struct scatterlist *sgl, int index)
697{
698 struct scsi_host_sg_pool *sgp;
699
KAMBAROV, ZAURa77e3362005-06-28 20:45:06 -0700700 BUG_ON(index >= SG_MEMPOOL_NR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 sgp = scsi_sg_pools + index;
703 mempool_free(sgl, sgp->pool);
704}
705
706/*
707 * Function: scsi_release_buffers()
708 *
709 * Purpose: Completion processing for block device I/O requests.
710 *
711 * Arguments: cmd - command that we are bailing.
712 *
713 * Lock status: Assumed that no lock is held upon entry.
714 *
715 * Returns: Nothing
716 *
717 * Notes: In the event that an upper level driver rejects a
718 * command, we must release resources allocated during
719 * the __init_io() function. Primarily this would involve
720 * the scatter-gather table, and potentially any bounce
721 * buffers.
722 */
723static void scsi_release_buffers(struct scsi_cmnd *cmd)
724{
725 struct request *req = cmd->request;
726
727 /*
728 * Free up any indirection buffers we allocated for DMA purposes.
729 */
730 if (cmd->use_sg)
731 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
732 else if (cmd->request_buffer != req->buffer)
733 kfree(cmd->request_buffer);
734
735 /*
736 * Zero these out. They now point to freed memory, and it is
737 * dangerous to hang onto the pointers.
738 */
739 cmd->buffer = NULL;
740 cmd->bufflen = 0;
741 cmd->request_buffer = NULL;
742 cmd->request_bufflen = 0;
743}
744
745/*
746 * Function: scsi_io_completion()
747 *
748 * Purpose: Completion processing for block device I/O requests.
749 *
750 * Arguments: cmd - command that is finished.
751 *
752 * Lock status: Assumed that no lock is held upon entry.
753 *
754 * Returns: Nothing
755 *
756 * Notes: This function is matched in terms of capabilities to
757 * the function that created the scatter-gather list.
758 * In other words, if there are no bounce buffers
759 * (the normal case for most drivers), we don't need
760 * the logic to deal with cleaning up afterwards.
761 *
762 * We must do one of several things here:
763 *
764 * a) Call scsi_end_request. This will finish off the
765 * specified number of sectors. If we are done, the
766 * command block will be released, and the queue
767 * function will be goosed. If we are not done, then
768 * scsi_end_request will directly goose the queue.
769 *
770 * b) We can just use scsi_requeue_command() here. This would
771 * be used if we just wanted to retry, for example.
772 */
773void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
774 unsigned int block_bytes)
775{
776 int result = cmd->result;
777 int this_count = cmd->bufflen;
778 request_queue_t *q = cmd->device->request_queue;
779 struct request *req = cmd->request;
780 int clear_errors = 1;
781 struct scsi_sense_hdr sshdr;
782 int sense_valid = 0;
783 int sense_deferred = 0;
784
785 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
786 return;
787
788 /*
789 * Free up any indirection buffers we allocated for DMA purposes.
790 * For the case of a READ, we need to copy the data out of the
791 * bounce buffer and into the real buffer.
792 */
793 if (cmd->use_sg)
794 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
795 else if (cmd->buffer != req->buffer) {
796 if (rq_data_dir(req) == READ) {
797 unsigned long flags;
798 char *to = bio_kmap_irq(req->bio, &flags);
799 memcpy(to, cmd->buffer, cmd->bufflen);
800 bio_kunmap_irq(to, &flags);
801 }
802 kfree(cmd->buffer);
803 }
804
805 if (result) {
806 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
807 if (sense_valid)
808 sense_deferred = scsi_sense_is_deferred(&sshdr);
809 }
810 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
811 req->errors = result;
812 if (result) {
813 clear_errors = 0;
814 if (sense_valid && req->sense) {
815 /*
816 * SG_IO wants current and deferred errors
817 */
818 int len = 8 + cmd->sense_buffer[7];
819
820 if (len > SCSI_SENSE_BUFFERSIZE)
821 len = SCSI_SENSE_BUFFERSIZE;
822 memcpy(req->sense, cmd->sense_buffer, len);
823 req->sense_len = len;
824 }
825 } else
826 req->data_len = cmd->resid;
827 }
828
829 /*
830 * Zero these out. They now point to freed memory, and it is
831 * dangerous to hang onto the pointers.
832 */
833 cmd->buffer = NULL;
834 cmd->bufflen = 0;
835 cmd->request_buffer = NULL;
836 cmd->request_bufflen = 0;
837
838 /*
839 * Next deal with any sectors which we were able to correctly
840 * handle.
841 */
842 if (good_bytes >= 0) {
843 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
844 req->nr_sectors, good_bytes));
845 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
846
847 if (clear_errors)
848 req->errors = 0;
849 /*
850 * If multiple sectors are requested in one buffer, then
851 * they will have been finished off by the first command.
852 * If not, then we have a multi-buffer command.
853 *
854 * If block_bytes != 0, it means we had a medium error
855 * of some sort, and that we want to mark some number of
856 * sectors as not uptodate. Thus we want to inhibit
857 * requeueing right here - we will requeue down below
858 * when we handle the bad sectors.
859 */
860 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
861
862 /*
863 * If the command completed without error, then either finish off the
864 * rest of the command, or start a new one.
865 */
866 if (result == 0 || cmd == NULL ) {
867 return;
868 }
869 }
870 /*
871 * Now, if we were good little boys and girls, Santa left us a request
872 * sense buffer. We can extract information from this, so we
873 * can choose a block to remap, etc.
874 */
875 if (sense_valid && !sense_deferred) {
876 switch (sshdr.sense_key) {
877 case UNIT_ATTENTION:
878 if (cmd->device->removable) {
879 /* detected disc change. set a bit
880 * and quietly refuse further access.
881 */
882 cmd->device->changed = 1;
883 cmd = scsi_end_request(cmd, 0,
884 this_count, 1);
885 return;
886 } else {
887 /*
888 * Must have been a power glitch, or a
889 * bus reset. Could not have been a
890 * media change, so we just retry the
891 * request and see what happens.
892 */
893 scsi_requeue_command(q, cmd);
894 return;
895 }
896 break;
897 case ILLEGAL_REQUEST:
898 /*
899 * If we had an ILLEGAL REQUEST returned, then we may
900 * have performed an unsupported command. The only
901 * thing this should be would be a ten byte read where
902 * only a six byte read was supported. Also, on a
903 * system where READ CAPACITY failed, we may have read
904 * past the end of the disk.
905 */
906 if (cmd->device->use_10_for_rw &&
907 (cmd->cmnd[0] == READ_10 ||
908 cmd->cmnd[0] == WRITE_10)) {
909 cmd->device->use_10_for_rw = 0;
910 /*
911 * This will cause a retry with a 6-byte
912 * command.
913 */
914 scsi_requeue_command(q, cmd);
915 result = 0;
916 } else {
917 cmd = scsi_end_request(cmd, 0, this_count, 1);
918 return;
919 }
920 break;
921 case NOT_READY:
922 /*
923 * If the device is in the process of becoming ready,
924 * retry.
925 */
926 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
927 scsi_requeue_command(q, cmd);
928 return;
929 }
930 printk(KERN_INFO "Device %s not ready.\n",
931 req->rq_disk ? req->rq_disk->disk_name : "");
932 cmd = scsi_end_request(cmd, 0, this_count, 1);
933 return;
934 case VOLUME_OVERFLOW:
935 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
936 cmd->device->host->host_no,
937 (int)cmd->device->channel,
938 (int)cmd->device->id, (int)cmd->device->lun);
939 __scsi_print_command(cmd->data_cmnd);
940 scsi_print_sense("", cmd);
941 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
942 return;
943 default:
944 break;
945 }
946 } /* driver byte != 0 */
947 if (host_byte(result) == DID_RESET) {
948 /*
949 * Third party bus reset or reset for error
950 * recovery reasons. Just retry the request
951 * and see what happens.
952 */
953 scsi_requeue_command(q, cmd);
954 return;
955 }
956 if (result) {
James Bottomleye537a362005-06-05 02:07:14 -0500957 if (!(req->flags & REQ_SPECIAL))
958 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
959 "= 0x%x\n", cmd->device->host->host_no,
960 cmd->device->channel,
961 cmd->device->id,
962 cmd->device->lun, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964 if (driver_byte(result) & DRIVER_SENSE)
965 scsi_print_sense("", cmd);
966 /*
967 * Mark a single buffer as not uptodate. Queue the remainder.
968 * We sometimes get this cruft in the event that a medium error
969 * isn't properly reported.
970 */
971 block_bytes = req->hard_cur_sectors << 9;
972 if (!block_bytes)
973 block_bytes = req->data_len;
974 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
975 }
976}
977EXPORT_SYMBOL(scsi_io_completion);
978
979/*
980 * Function: scsi_init_io()
981 *
982 * Purpose: SCSI I/O initialize function.
983 *
984 * Arguments: cmd - Command descriptor we wish to initialize
985 *
986 * Returns: 0 on success
987 * BLKPREP_DEFER if the failure is retryable
988 * BLKPREP_KILL if the failure is fatal
989 */
990static int scsi_init_io(struct scsi_cmnd *cmd)
991{
992 struct request *req = cmd->request;
993 struct scatterlist *sgpnt;
994 int count;
995
996 /*
997 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
998 */
999 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1000 cmd->request_bufflen = req->data_len;
1001 cmd->request_buffer = req->data;
1002 req->buffer = req->data;
1003 cmd->use_sg = 0;
1004 return 0;
1005 }
1006
1007 /*
1008 * we used to not use scatter-gather for single segment request,
1009 * but now we do (it makes highmem I/O easier to support without
1010 * kmapping pages)
1011 */
1012 cmd->use_sg = req->nr_phys_segments;
1013
1014 /*
1015 * if sg table allocation fails, requeue request later.
1016 */
1017 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
Tejun Heo beb66172005-04-24 02:04:53 -05001018 if (unlikely(!sgpnt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 return BLKPREP_DEFER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021 cmd->request_buffer = (char *) sgpnt;
1022 cmd->request_bufflen = req->nr_sectors << 9;
1023 if (blk_pc_request(req))
1024 cmd->request_bufflen = req->data_len;
1025 req->buffer = NULL;
1026
1027 /*
1028 * Next, walk the list, and fill in the addresses and sizes of
1029 * each segment.
1030 */
1031 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1032
1033 /*
1034 * mapped well, send it off
1035 */
1036 if (likely(count <= cmd->use_sg)) {
1037 cmd->use_sg = count;
1038 return 0;
1039 }
1040
1041 printk(KERN_ERR "Incorrect number of segments after building list\n");
1042 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1043 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1044 req->current_nr_sectors);
1045
1046 /* release the command and kill it */
1047 scsi_release_buffers(cmd);
1048 scsi_put_command(cmd);
1049 return BLKPREP_KILL;
1050}
1051
1052static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1053{
1054 struct scsi_device *sdev = q->queuedata;
1055 struct scsi_driver *drv;
1056
1057 if (sdev->sdev_state == SDEV_RUNNING) {
1058 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1059
1060 if (drv->prepare_flush)
1061 return drv->prepare_flush(q, rq);
1062 }
1063
1064 return 0;
1065}
1066
1067static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1068{
1069 struct scsi_device *sdev = q->queuedata;
1070 struct request *flush_rq = rq->end_io_data;
1071 struct scsi_driver *drv;
1072
1073 if (flush_rq->errors) {
1074 printk("scsi: barrier error, disabling flush support\n");
1075 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1076 }
1077
1078 if (sdev->sdev_state == SDEV_RUNNING) {
1079 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1080 drv->end_flush(q, rq);
1081 }
1082}
1083
1084static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1085 sector_t *error_sector)
1086{
1087 struct scsi_device *sdev = q->queuedata;
1088 struct scsi_driver *drv;
1089
1090 if (sdev->sdev_state != SDEV_RUNNING)
1091 return -ENXIO;
1092
1093 drv = *(struct scsi_driver **) disk->private_data;
1094 if (drv->issue_flush)
1095 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1096
1097 return -EOPNOTSUPP;
1098}
1099
James Bottomleye537a362005-06-05 02:07:14 -05001100static void scsi_generic_done(struct scsi_cmnd *cmd)
1101{
1102 BUG_ON(!blk_pc_request(cmd->request));
1103 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1104}
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106static int scsi_prep_fn(struct request_queue *q, struct request *req)
1107{
1108 struct scsi_device *sdev = q->queuedata;
1109 struct scsi_cmnd *cmd;
1110 int specials_only = 0;
1111
1112 /*
1113 * Just check to see if the device is online. If it isn't, we
1114 * refuse to process any commands. The device must be brought
1115 * online before trying any recovery commands
1116 */
1117 if (unlikely(!scsi_device_online(sdev))) {
1118 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1119 sdev->host->host_no, sdev->id, sdev->lun);
1120 return BLKPREP_KILL;
1121 }
1122 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1123 /* OK, we're not in a running state don't prep
1124 * user commands */
1125 if (sdev->sdev_state == SDEV_DEL) {
1126 /* Device is fully deleted, no commands
1127 * at all allowed down */
1128 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1129 sdev->host->host_no, sdev->id, sdev->lun);
1130 return BLKPREP_KILL;
1131 }
1132 /* OK, we only allow special commands (i.e. not
1133 * user initiated ones */
1134 specials_only = sdev->sdev_state;
1135 }
1136
1137 /*
1138 * Find the actual device driver associated with this command.
1139 * The SPECIAL requests are things like character device or
1140 * ioctls, which did not originate from ll_rw_blk. Note that
1141 * the special field is also used to indicate the cmd for
1142 * the remainder of a partially fulfilled request that can
1143 * come up when there is a medium error. We have to treat
1144 * these two cases differently. We differentiate by looking
1145 * at request->cmd, as this tells us the real story.
1146 */
James Bottomleye537a362005-06-05 02:07:14 -05001147 if (req->flags & REQ_SPECIAL && req->special) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 struct scsi_request *sreq = req->special;
1149
1150 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1151 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1152 if (unlikely(!cmd))
1153 goto defer;
1154 scsi_init_cmd_from_req(cmd, sreq);
1155 } else
1156 cmd = req->special;
1157 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1158
James Bottomleye537a362005-06-05 02:07:14 -05001159 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 if(specials_only == SDEV_QUIESCE ||
1161 specials_only == SDEV_BLOCK)
1162 return BLKPREP_DEFER;
1163
1164 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1165 sdev->host->host_no, sdev->id, sdev->lun);
1166 return BLKPREP_KILL;
1167 }
1168
1169
1170 /*
1171 * Now try and find a command block that we can use.
1172 */
1173 if (!req->special) {
1174 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1175 if (unlikely(!cmd))
1176 goto defer;
1177 } else
1178 cmd = req->special;
1179
1180 /* pull a tag out of the request if we have one */
1181 cmd->tag = req->tag;
1182 } else {
1183 blk_dump_rq_flags(req, "SCSI bad req");
1184 return BLKPREP_KILL;
1185 }
1186
1187 /* note the overloading of req->special. When the tag
1188 * is active it always means cmd. If the tag goes
1189 * back for re-queueing, it may be reset */
1190 req->special = cmd;
1191 cmd->request = req;
1192
1193 /*
1194 * FIXME: drop the lock here because the functions below
1195 * expect to be called without the queue lock held. Also,
1196 * previously, we dequeued the request before dropping the
1197 * lock. We hope REQ_STARTED prevents anything untoward from
1198 * happening now.
1199 */
1200 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1201 struct scsi_driver *drv;
1202 int ret;
1203
1204 /*
1205 * This will do a couple of things:
1206 * 1) Fill in the actual SCSI command.
1207 * 2) Fill in any other upper-level specific fields
1208 * (timeout).
1209 *
1210 * If this returns 0, it means that the request failed
1211 * (reading past end of disk, reading offline device,
1212 * etc). This won't actually talk to the device, but
1213 * some kinds of consistency checking may cause the
1214 * request to be rejected immediately.
1215 */
1216
1217 /*
1218 * This sets up the scatter-gather table (allocating if
1219 * required).
1220 */
1221 ret = scsi_init_io(cmd);
1222 if (ret) /* BLKPREP_KILL return also releases the command */
1223 return ret;
1224
1225 /*
1226 * Initialize the actual SCSI command for this request.
1227 */
James Bottomleye537a362005-06-05 02:07:14 -05001228 if (req->rq_disk) {
1229 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1230 if (unlikely(!drv->init_command(cmd))) {
1231 scsi_release_buffers(cmd);
1232 scsi_put_command(cmd);
1233 return BLKPREP_KILL;
1234 }
1235 } else {
1236 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1237 if (rq_data_dir(req) == WRITE)
1238 cmd->sc_data_direction = DMA_TO_DEVICE;
1239 else if (req->data_len)
1240 cmd->sc_data_direction = DMA_FROM_DEVICE;
1241 else
1242 cmd->sc_data_direction = DMA_NONE;
1243
1244 cmd->transfersize = req->data_len;
1245 cmd->allowed = 3;
1246 cmd->timeout_per_command = req->timeout;
1247 cmd->done = scsi_generic_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 }
1249 }
1250
1251 /*
1252 * The request is now prepped, no need to come back here
1253 */
1254 req->flags |= REQ_DONTPREP;
1255 return BLKPREP_OK;
1256
1257 defer:
1258 /* If we defer, the elv_next_request() returns NULL, but the
1259 * queue must be restarted, so we plug here if no returning
1260 * command will automatically do that. */
1261 if (sdev->device_busy == 0)
1262 blk_plug_device(q);
1263 return BLKPREP_DEFER;
1264}
1265
1266/*
1267 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1268 * return 0.
1269 *
1270 * Called with the queue_lock held.
1271 */
1272static inline int scsi_dev_queue_ready(struct request_queue *q,
1273 struct scsi_device *sdev)
1274{
1275 if (sdev->device_busy >= sdev->queue_depth)
1276 return 0;
1277 if (sdev->device_busy == 0 && sdev->device_blocked) {
1278 /*
1279 * unblock after device_blocked iterates to zero
1280 */
1281 if (--sdev->device_blocked == 0) {
1282 SCSI_LOG_MLQUEUE(3,
1283 printk("scsi%d (%d:%d) unblocking device at"
1284 " zero depth\n", sdev->host->host_no,
1285 sdev->id, sdev->lun));
1286 } else {
1287 blk_plug_device(q);
1288 return 0;
1289 }
1290 }
1291 if (sdev->device_blocked)
1292 return 0;
1293
1294 return 1;
1295}
1296
1297/*
1298 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1299 * return 0. We must end up running the queue again whenever 0 is
1300 * returned, else IO can hang.
1301 *
1302 * Called with host_lock held.
1303 */
1304static inline int scsi_host_queue_ready(struct request_queue *q,
1305 struct Scsi_Host *shost,
1306 struct scsi_device *sdev)
1307{
Mike Andersond3301872005-06-16 11:12:38 -07001308 if (shost->shost_state == SHOST_RECOVERY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return 0;
1310 if (shost->host_busy == 0 && shost->host_blocked) {
1311 /*
1312 * unblock after host_blocked iterates to zero
1313 */
1314 if (--shost->host_blocked == 0) {
1315 SCSI_LOG_MLQUEUE(3,
1316 printk("scsi%d unblocking host at zero depth\n",
1317 shost->host_no));
1318 } else {
1319 blk_plug_device(q);
1320 return 0;
1321 }
1322 }
1323 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1324 shost->host_blocked || shost->host_self_blocked) {
1325 if (list_empty(&sdev->starved_entry))
1326 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1327 return 0;
1328 }
1329
1330 /* We're OK to process the command, so we can't be starved */
1331 if (!list_empty(&sdev->starved_entry))
1332 list_del_init(&sdev->starved_entry);
1333
1334 return 1;
1335}
1336
1337/*
1338 * Kill requests for a dead device
1339 */
1340static void scsi_kill_requests(request_queue_t *q)
1341{
1342 struct request *req;
1343
1344 while ((req = elv_next_request(q)) != NULL) {
1345 blkdev_dequeue_request(req);
1346 req->flags |= REQ_QUIET;
1347 while (end_that_request_first(req, 0, req->nr_sectors))
1348 ;
1349 end_that_request_last(req);
1350 }
1351}
1352
1353/*
1354 * Function: scsi_request_fn()
1355 *
1356 * Purpose: Main strategy routine for SCSI.
1357 *
1358 * Arguments: q - Pointer to actual queue.
1359 *
1360 * Returns: Nothing
1361 *
1362 * Lock status: IO request lock assumed to be held when called.
1363 */
1364static void scsi_request_fn(struct request_queue *q)
1365{
1366 struct scsi_device *sdev = q->queuedata;
1367 struct Scsi_Host *shost;
1368 struct scsi_cmnd *cmd;
1369 struct request *req;
1370
1371 if (!sdev) {
1372 printk("scsi: killing requests for dead queue\n");
1373 scsi_kill_requests(q);
1374 return;
1375 }
1376
1377 if(!get_device(&sdev->sdev_gendev))
1378 /* We must be tearing the block queue down already */
1379 return;
1380
1381 /*
1382 * To start with, we keep looping until the queue is empty, or until
1383 * the host is no longer able to accept any more requests.
1384 */
1385 shost = sdev->host;
1386 while (!blk_queue_plugged(q)) {
1387 int rtn;
1388 /*
1389 * get next queueable request. We do this early to make sure
1390 * that the request is fully prepared even if we cannot
1391 * accept it.
1392 */
1393 req = elv_next_request(q);
1394 if (!req || !scsi_dev_queue_ready(q, sdev))
1395 break;
1396
1397 if (unlikely(!scsi_device_online(sdev))) {
1398 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1399 sdev->host->host_no, sdev->id, sdev->lun);
1400 blkdev_dequeue_request(req);
1401 req->flags |= REQ_QUIET;
1402 while (end_that_request_first(req, 0, req->nr_sectors))
1403 ;
1404 end_that_request_last(req);
1405 continue;
1406 }
1407
1408
1409 /*
1410 * Remove the request from the request list.
1411 */
1412 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1413 blkdev_dequeue_request(req);
1414 sdev->device_busy++;
1415
1416 spin_unlock(q->queue_lock);
1417 spin_lock(shost->host_lock);
1418
1419 if (!scsi_host_queue_ready(q, shost, sdev))
1420 goto not_ready;
1421 if (sdev->single_lun) {
1422 if (scsi_target(sdev)->starget_sdev_user &&
1423 scsi_target(sdev)->starget_sdev_user != sdev)
1424 goto not_ready;
1425 scsi_target(sdev)->starget_sdev_user = sdev;
1426 }
1427 shost->host_busy++;
1428
1429 /*
1430 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1431 * take the lock again.
1432 */
1433 spin_unlock_irq(shost->host_lock);
1434
1435 cmd = req->special;
1436 if (unlikely(cmd == NULL)) {
1437 printk(KERN_CRIT "impossible request in %s.\n"
1438 "please mail a stack trace to "
1439 "linux-scsi@vger.kernel.org",
1440 __FUNCTION__);
1441 BUG();
1442 }
1443
1444 /*
1445 * Finally, initialize any error handling parameters, and set up
1446 * the timers for timeouts.
1447 */
1448 scsi_init_cmd_errh(cmd);
1449
1450 /*
1451 * Dispatch the command to the low-level driver.
1452 */
1453 rtn = scsi_dispatch_cmd(cmd);
1454 spin_lock_irq(q->queue_lock);
1455 if(rtn) {
1456 /* we're refusing the command; because of
1457 * the way locks get dropped, we need to
1458 * check here if plugging is required */
1459 if(sdev->device_busy == 0)
1460 blk_plug_device(q);
1461
1462 break;
1463 }
1464 }
1465
1466 goto out;
1467
1468 not_ready:
1469 spin_unlock_irq(shost->host_lock);
1470
1471 /*
1472 * lock q, handle tag, requeue req, and decrement device_busy. We
1473 * must return with queue_lock held.
1474 *
1475 * Decrementing device_busy without checking it is OK, as all such
1476 * cases (host limits or settings) should run the queue at some
1477 * later time.
1478 */
1479 spin_lock_irq(q->queue_lock);
1480 blk_requeue_request(q, req);
1481 sdev->device_busy--;
1482 if(sdev->device_busy == 0)
1483 blk_plug_device(q);
1484 out:
1485 /* must be careful here...if we trigger the ->remove() function
1486 * we cannot be holding the q lock */
1487 spin_unlock_irq(q->queue_lock);
1488 put_device(&sdev->sdev_gendev);
1489 spin_lock_irq(q->queue_lock);
1490}
1491
1492u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1493{
1494 struct device *host_dev;
1495 u64 bounce_limit = 0xffffffff;
1496
1497 if (shost->unchecked_isa_dma)
1498 return BLK_BOUNCE_ISA;
1499 /*
1500 * Platforms with virtual-DMA translation
1501 * hardware have no practical limit.
1502 */
1503 if (!PCI_DMA_BUS_IS_PHYS)
1504 return BLK_BOUNCE_ANY;
1505
1506 host_dev = scsi_get_device(shost);
1507 if (host_dev && host_dev->dma_mask)
1508 bounce_limit = *host_dev->dma_mask;
1509
1510 return bounce_limit;
1511}
1512EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1513
1514struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1515{
1516 struct Scsi_Host *shost = sdev->host;
1517 struct request_queue *q;
1518
152587d2005-04-12 16:22:06 -05001519 q = blk_init_queue(scsi_request_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 if (!q)
1521 return NULL;
1522
1523 blk_queue_prep_rq(q, scsi_prep_fn);
1524
1525 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1526 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1527 blk_queue_max_sectors(q, shost->max_sectors);
1528 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1529 blk_queue_segment_boundary(q, shost->dma_boundary);
1530 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1531
1532 /*
1533 * ordered tags are superior to flush ordering
1534 */
1535 if (shost->ordered_tag)
1536 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1537 else if (shost->ordered_flush) {
1538 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1539 q->prepare_flush_fn = scsi_prepare_flush_fn;
1540 q->end_flush_fn = scsi_end_flush_fn;
1541 }
1542
1543 if (!shost->use_clustering)
1544 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1545 return q;
1546}
1547
1548void scsi_free_queue(struct request_queue *q)
1549{
1550 blk_cleanup_queue(q);
1551}
1552
1553/*
1554 * Function: scsi_block_requests()
1555 *
1556 * Purpose: Utility function used by low-level drivers to prevent further
1557 * commands from being queued to the device.
1558 *
1559 * Arguments: shost - Host in question
1560 *
1561 * Returns: Nothing
1562 *
1563 * Lock status: No locks are assumed held.
1564 *
1565 * Notes: There is no timer nor any other means by which the requests
1566 * get unblocked other than the low-level driver calling
1567 * scsi_unblock_requests().
1568 */
1569void scsi_block_requests(struct Scsi_Host *shost)
1570{
1571 shost->host_self_blocked = 1;
1572}
1573EXPORT_SYMBOL(scsi_block_requests);
1574
1575/*
1576 * Function: scsi_unblock_requests()
1577 *
1578 * Purpose: Utility function used by low-level drivers to allow further
1579 * commands from being queued to the device.
1580 *
1581 * Arguments: shost - Host in question
1582 *
1583 * Returns: Nothing
1584 *
1585 * Lock status: No locks are assumed held.
1586 *
1587 * Notes: There is no timer nor any other means by which the requests
1588 * get unblocked other than the low-level driver calling
1589 * scsi_unblock_requests().
1590 *
1591 * This is done as an API function so that changes to the
1592 * internals of the scsi mid-layer won't require wholesale
1593 * changes to drivers that use this feature.
1594 */
1595void scsi_unblock_requests(struct Scsi_Host *shost)
1596{
1597 shost->host_self_blocked = 0;
1598 scsi_run_host_queues(shost);
1599}
1600EXPORT_SYMBOL(scsi_unblock_requests);
1601
1602int __init scsi_init_queue(void)
1603{
1604 int i;
1605
1606 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1607 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1608 int size = sgp->size * sizeof(struct scatterlist);
1609
1610 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1611 SLAB_HWCACHE_ALIGN, NULL, NULL);
1612 if (!sgp->slab) {
1613 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1614 sgp->name);
1615 }
1616
1617 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1618 mempool_alloc_slab, mempool_free_slab,
1619 sgp->slab);
1620 if (!sgp->pool) {
1621 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1622 sgp->name);
1623 }
1624 }
1625
1626 return 0;
1627}
1628
1629void scsi_exit_queue(void)
1630{
1631 int i;
1632
1633 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1634 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1635 mempool_destroy(sgp->pool);
1636 kmem_cache_destroy(sgp->slab);
1637 }
1638}
1639/**
James Bottomleyea73a9f2005-08-28 11:33:52 -05001640 * scsi_mode_sense - issue a mode sense, falling back from 10 to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 * six bytes if necessary.
James Bottomley1cf72692005-08-28 11:27:01 -05001642 * @sdev: SCSI device to be queried
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 * @dbd: set if mode sense will allow block descriptors to be returned
1644 * @modepage: mode page being requested
1645 * @buffer: request buffer (may not be smaller than eight bytes)
1646 * @len: length of request buffer.
1647 * @timeout: command timeout
1648 * @retries: number of retries before failing
1649 * @data: returns a structure abstracting the mode header data
James Bottomley1cf72692005-08-28 11:27:01 -05001650 * @sense: place to put sense data (or NULL if no sense to be collected).
1651 * must be SCSI_SENSE_BUFFERSIZE big.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 *
1653 * Returns zero if unsuccessful, or the header offset (either 4
1654 * or 8 depending on whether a six or ten byte command was
1655 * issued) if successful.
1656 **/
1657int
James Bottomley1cf72692005-08-28 11:27:01 -05001658scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 unsigned char *buffer, int len, int timeout, int retries,
James Bottomleyea73a9f2005-08-28 11:33:52 -05001660 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 unsigned char cmd[12];
1662 int use_10_for_ms;
1663 int header_length;
James Bottomley1cf72692005-08-28 11:27:01 -05001664 int result;
James Bottomleyea73a9f2005-08-28 11:33:52 -05001665 struct scsi_sense_hdr my_sshdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
1667 memset(data, 0, sizeof(*data));
1668 memset(&cmd[0], 0, 12);
1669 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1670 cmd[2] = modepage;
1671
James Bottomleyea73a9f2005-08-28 11:33:52 -05001672 /* caller might not be interested in sense, but we need it */
1673 if (!sshdr)
1674 sshdr = &my_sshdr;
1675
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 retry:
James Bottomley1cf72692005-08-28 11:27:01 -05001677 use_10_for_ms = sdev->use_10_for_ms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 if (use_10_for_ms) {
1680 if (len < 8)
1681 len = 8;
1682
1683 cmd[0] = MODE_SENSE_10;
1684 cmd[8] = len;
1685 header_length = 8;
1686 } else {
1687 if (len < 4)
1688 len = 4;
1689
1690 cmd[0] = MODE_SENSE;
1691 cmd[4] = len;
1692 header_length = 4;
1693 }
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 memset(buffer, 0, len);
1696
James Bottomley1cf72692005-08-28 11:27:01 -05001697 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
James Bottomleyea73a9f2005-08-28 11:33:52 -05001698 sshdr, timeout, retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
1700 /* This code looks awful: what it's doing is making sure an
1701 * ILLEGAL REQUEST sense return identifies the actual command
1702 * byte as the problem. MODE_SENSE commands can return
1703 * ILLEGAL REQUEST if the code page isn't supported */
1704
James Bottomley1cf72692005-08-28 11:27:01 -05001705 if (use_10_for_ms && !scsi_status_is_good(result) &&
1706 (driver_byte(result) & DRIVER_SENSE)) {
James Bottomleyea73a9f2005-08-28 11:33:52 -05001707 if (scsi_sense_valid(sshdr)) {
1708 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1709 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 /*
1711 * Invalid command operation code
1712 */
James Bottomley1cf72692005-08-28 11:27:01 -05001713 sdev->use_10_for_ms = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 goto retry;
1715 }
1716 }
1717 }
1718
James Bottomley1cf72692005-08-28 11:27:01 -05001719 if(scsi_status_is_good(result)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 data->header_length = header_length;
1721 if(use_10_for_ms) {
1722 data->length = buffer[0]*256 + buffer[1] + 2;
1723 data->medium_type = buffer[2];
1724 data->device_specific = buffer[3];
1725 data->longlba = buffer[4] & 0x01;
1726 data->block_descriptor_length = buffer[6]*256
1727 + buffer[7];
1728 } else {
1729 data->length = buffer[0] + 1;
1730 data->medium_type = buffer[1];
1731 data->device_specific = buffer[2];
1732 data->block_descriptor_length = buffer[3];
1733 }
1734 }
1735
James Bottomley1cf72692005-08-28 11:27:01 -05001736 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737}
1738EXPORT_SYMBOL(scsi_mode_sense);
1739
1740int
1741scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1742{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 char cmd[] = {
1744 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1745 };
James Bottomleyea73a9f2005-08-28 11:33:52 -05001746 struct scsi_sense_hdr sshdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 int result;
1748
James Bottomleyea73a9f2005-08-28 11:33:52 -05001749 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
James Bottomley1cf72692005-08-28 11:27:01 -05001750 timeout, retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
James Bottomley1cf72692005-08-28 11:27:01 -05001752 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
James Bottomleyea73a9f2005-08-28 11:33:52 -05001754 if ((scsi_sense_valid(&sshdr)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 ((sshdr.sense_key == UNIT_ATTENTION) ||
1756 (sshdr.sense_key == NOT_READY))) {
1757 sdev->changed = 1;
James Bottomley1cf72692005-08-28 11:27:01 -05001758 result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 }
1760 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 return result;
1762}
1763EXPORT_SYMBOL(scsi_test_unit_ready);
1764
1765/**
1766 * scsi_device_set_state - Take the given device through the device
1767 * state model.
1768 * @sdev: scsi device to change the state of.
1769 * @state: state to change to.
1770 *
1771 * Returns zero if unsuccessful or an error if the requested
1772 * transition is illegal.
1773 **/
1774int
1775scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1776{
1777 enum scsi_device_state oldstate = sdev->sdev_state;
1778
1779 if (state == oldstate)
1780 return 0;
1781
1782 switch (state) {
1783 case SDEV_CREATED:
1784 /* There are no legal states that come back to
1785 * created. This is the manually initialised start
1786 * state */
1787 goto illegal;
1788
1789 case SDEV_RUNNING:
1790 switch (oldstate) {
1791 case SDEV_CREATED:
1792 case SDEV_OFFLINE:
1793 case SDEV_QUIESCE:
1794 case SDEV_BLOCK:
1795 break;
1796 default:
1797 goto illegal;
1798 }
1799 break;
1800
1801 case SDEV_QUIESCE:
1802 switch (oldstate) {
1803 case SDEV_RUNNING:
1804 case SDEV_OFFLINE:
1805 break;
1806 default:
1807 goto illegal;
1808 }
1809 break;
1810
1811 case SDEV_OFFLINE:
1812 switch (oldstate) {
1813 case SDEV_CREATED:
1814 case SDEV_RUNNING:
1815 case SDEV_QUIESCE:
1816 case SDEV_BLOCK:
1817 break;
1818 default:
1819 goto illegal;
1820 }
1821 break;
1822
1823 case SDEV_BLOCK:
1824 switch (oldstate) {
1825 case SDEV_CREATED:
1826 case SDEV_RUNNING:
1827 break;
1828 default:
1829 goto illegal;
1830 }
1831 break;
1832
1833 case SDEV_CANCEL:
1834 switch (oldstate) {
1835 case SDEV_CREATED:
1836 case SDEV_RUNNING:
1837 case SDEV_OFFLINE:
1838 case SDEV_BLOCK:
1839 break;
1840 default:
1841 goto illegal;
1842 }
1843 break;
1844
1845 case SDEV_DEL:
1846 switch (oldstate) {
1847 case SDEV_CANCEL:
1848 break;
1849 default:
1850 goto illegal;
1851 }
1852 break;
1853
1854 }
1855 sdev->sdev_state = state;
1856 return 0;
1857
1858 illegal:
1859 SCSI_LOG_ERROR_RECOVERY(1,
1860 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1861 "Illegal state transition %s->%s\n",
1862 scsi_device_state_name(oldstate),
1863 scsi_device_state_name(state))
1864 );
1865 return -EINVAL;
1866}
1867EXPORT_SYMBOL(scsi_device_set_state);
1868
1869/**
1870 * scsi_device_quiesce - Block user issued commands.
1871 * @sdev: scsi device to quiesce.
1872 *
1873 * This works by trying to transition to the SDEV_QUIESCE state
1874 * (which must be a legal transition). When the device is in this
1875 * state, only special requests will be accepted, all others will
1876 * be deferred. Since special requests may also be requeued requests,
1877 * a successful return doesn't guarantee the device will be
1878 * totally quiescent.
1879 *
1880 * Must be called with user context, may sleep.
1881 *
1882 * Returns zero if unsuccessful or an error if not.
1883 **/
1884int
1885scsi_device_quiesce(struct scsi_device *sdev)
1886{
1887 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1888 if (err)
1889 return err;
1890
1891 scsi_run_queue(sdev->request_queue);
1892 while (sdev->device_busy) {
1893 msleep_interruptible(200);
1894 scsi_run_queue(sdev->request_queue);
1895 }
1896 return 0;
1897}
1898EXPORT_SYMBOL(scsi_device_quiesce);
1899
1900/**
1901 * scsi_device_resume - Restart user issued commands to a quiesced device.
1902 * @sdev: scsi device to resume.
1903 *
1904 * Moves the device from quiesced back to running and restarts the
1905 * queues.
1906 *
1907 * Must be called with user context, may sleep.
1908 **/
1909void
1910scsi_device_resume(struct scsi_device *sdev)
1911{
1912 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1913 return;
1914 scsi_run_queue(sdev->request_queue);
1915}
1916EXPORT_SYMBOL(scsi_device_resume);
1917
1918static void
1919device_quiesce_fn(struct scsi_device *sdev, void *data)
1920{
1921 scsi_device_quiesce(sdev);
1922}
1923
1924void
1925scsi_target_quiesce(struct scsi_target *starget)
1926{
1927 starget_for_each_device(starget, NULL, device_quiesce_fn);
1928}
1929EXPORT_SYMBOL(scsi_target_quiesce);
1930
1931static void
1932device_resume_fn(struct scsi_device *sdev, void *data)
1933{
1934 scsi_device_resume(sdev);
1935}
1936
1937void
1938scsi_target_resume(struct scsi_target *starget)
1939{
1940 starget_for_each_device(starget, NULL, device_resume_fn);
1941}
1942EXPORT_SYMBOL(scsi_target_resume);
1943
1944/**
1945 * scsi_internal_device_block - internal function to put a device
1946 * temporarily into the SDEV_BLOCK state
1947 * @sdev: device to block
1948 *
1949 * Block request made by scsi lld's to temporarily stop all
1950 * scsi commands on the specified device. Called from interrupt
1951 * or normal process context.
1952 *
1953 * Returns zero if successful or error if not
1954 *
1955 * Notes:
1956 * This routine transitions the device to the SDEV_BLOCK state
1957 * (which must be a legal transition). When the device is in this
1958 * state, all commands are deferred until the scsi lld reenables
1959 * the device with scsi_device_unblock or device_block_tmo fires.
1960 * This routine assumes the host_lock is held on entry.
1961 **/
1962int
1963scsi_internal_device_block(struct scsi_device *sdev)
1964{
1965 request_queue_t *q = sdev->request_queue;
1966 unsigned long flags;
1967 int err = 0;
1968
1969 err = scsi_device_set_state(sdev, SDEV_BLOCK);
1970 if (err)
1971 return err;
1972
1973 /*
1974 * The device has transitioned to SDEV_BLOCK. Stop the
1975 * block layer from calling the midlayer with this device's
1976 * request queue.
1977 */
1978 spin_lock_irqsave(q->queue_lock, flags);
1979 blk_stop_queue(q);
1980 spin_unlock_irqrestore(q->queue_lock, flags);
1981
1982 return 0;
1983}
1984EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1985
1986/**
1987 * scsi_internal_device_unblock - resume a device after a block request
1988 * @sdev: device to resume
1989 *
1990 * Called by scsi lld's or the midlayer to restart the device queue
1991 * for the previously suspended scsi device. Called from interrupt or
1992 * normal process context.
1993 *
1994 * Returns zero if successful or error if not.
1995 *
1996 * Notes:
1997 * This routine transitions the device to the SDEV_RUNNING state
1998 * (which must be a legal transition) allowing the midlayer to
1999 * goose the queue for this device. This routine assumes the
2000 * host_lock is held upon entry.
2001 **/
2002int
2003scsi_internal_device_unblock(struct scsi_device *sdev)
2004{
2005 request_queue_t *q = sdev->request_queue;
2006 int err;
2007 unsigned long flags;
2008
2009 /*
2010 * Try to transition the scsi device to SDEV_RUNNING
2011 * and goose the device queue if successful.
2012 */
2013 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2014 if (err)
2015 return err;
2016
2017 spin_lock_irqsave(q->queue_lock, flags);
2018 blk_start_queue(q);
2019 spin_unlock_irqrestore(q->queue_lock, flags);
2020
2021 return 0;
2022}
2023EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2024
2025static void
2026device_block(struct scsi_device *sdev, void *data)
2027{
2028 scsi_internal_device_block(sdev);
2029}
2030
2031static int
2032target_block(struct device *dev, void *data)
2033{
2034 if (scsi_is_target_device(dev))
2035 starget_for_each_device(to_scsi_target(dev), NULL,
2036 device_block);
2037 return 0;
2038}
2039
2040void
2041scsi_target_block(struct device *dev)
2042{
2043 if (scsi_is_target_device(dev))
2044 starget_for_each_device(to_scsi_target(dev), NULL,
2045 device_block);
2046 else
2047 device_for_each_child(dev, NULL, target_block);
2048}
2049EXPORT_SYMBOL_GPL(scsi_target_block);
2050
2051static void
2052device_unblock(struct scsi_device *sdev, void *data)
2053{
2054 scsi_internal_device_unblock(sdev);
2055}
2056
2057static int
2058target_unblock(struct device *dev, void *data)
2059{
2060 if (scsi_is_target_device(dev))
2061 starget_for_each_device(to_scsi_target(dev), NULL,
2062 device_unblock);
2063 return 0;
2064}
2065
2066void
2067scsi_target_unblock(struct device *dev)
2068{
2069 if (scsi_is_target_device(dev))
2070 starget_for_each_device(to_scsi_target(dev), NULL,
2071 device_unblock);
2072 else
2073 device_for_each_child(dev, NULL, target_unblock);
2074}
2075EXPORT_SYMBOL_GPL(scsi_target_unblock);