blob: bdea26b56dc4252dd923393e2e5b3c9ddea556ea [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19
20#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_driver.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_host.h>
26#include <scsi/scsi_request.h>
27
28#include "scsi_priv.h"
29#include "scsi_logging.h"
30
31
32#define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33#define SG_MEMPOOL_SIZE 32
34
35struct scsi_host_sg_pool {
36 size_t size;
37 char *name;
38 kmem_cache_t *slab;
39 mempool_t *pool;
40};
41
42#if (SCSI_MAX_PHYS_SEGMENTS < 32)
43#error SCSI_MAX_PHYS_SEGMENTS is too small
44#endif
45
46#define SP(x) { x, "sgpool-" #x }
Adrian Bunk52c1da32005-06-23 22:05:33 -070047static struct scsi_host_sg_pool scsi_sg_pools[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 SP(8),
49 SP(16),
50 SP(32),
51#if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 SP(64),
53#if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 SP(128),
55#if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 SP(256),
57#if (SCSI_MAX_PHYS_SEGMENTS > 256)
58#error SCSI_MAX_PHYS_SEGMENTS is too large
59#endif
60#endif
61#endif
62#endif
63};
64#undef SP
65
66
67/*
68 * Function: scsi_insert_special_req()
69 *
70 * Purpose: Insert pre-formed request into request queue.
71 *
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
75 *
76 * Lock status: Assumed that lock is not held upon entry.
77 *
78 * Returns: Nothing
79 *
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
85 * process it.
86 */
87int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88{
89 /*
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
92 */
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
Tejun Heo 867d1192005-04-24 02:06:05 -050095 at_head, sreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 return 0;
97}
98
Tejun Heo a1bf9d12005-04-24 02:08:52 -050099static void scsi_run_queue(struct request_queue *q);
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/*
102 * Function: scsi_queue_insert()
103 *
104 * Purpose: Insert a command in the midlevel queue.
105 *
106 * Arguments: cmd - command that we are adding to queue.
107 * reason - why we are inserting command to queue.
108 *
109 * Lock status: Assumed that lock is not held upon entry.
110 *
111 * Returns: Nothing.
112 *
113 * Notes: We do this for one of two cases. Either the host is busy
114 * and it cannot accept any more commands for the time being,
115 * or the device returned QUEUE_FULL and can accept no more
116 * commands.
117 * Notes: This could be called either from an interrupt context or a
118 * normal process context.
119 */
120int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121{
122 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device;
Tejun Heo a1bf9d12005-04-24 02:08:52 -0500124 struct request_queue *q = device->request_queue;
125 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 SCSI_LOG_MLQUEUE(1,
128 printk("Inserting command %p into mlqueue\n", cmd));
129
130 /*
Tejun Heo d8c37e72005-05-14 00:46:08 +0900131 * Set the appropriate busy bit for the device/host.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 *
133 * If the host/device isn't busy, assume that something actually
134 * completed, and that we should be able to queue a command now.
135 *
136 * Note that the prior mid-layer assumption that any host could
137 * always queue at least one command is now broken. The mid-layer
138 * will implement a user specifiable stall (see
139 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140 * if a command is requeued with no other commands outstanding
141 * either for the device or for the host.
142 */
143 if (reason == SCSI_MLQUEUE_HOST_BUSY)
144 host->host_blocked = host->max_host_blocked;
145 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146 device->device_blocked = device->max_device_blocked;
147
148 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * Decrement the counters, since these commands are no longer
150 * active on the host/device.
151 */
152 scsi_device_unbusy(device);
153
154 /*
Tejun Heo a1bf9d12005-04-24 02:08:52 -0500155 * Requeue this command. It will go before all other commands
156 * that are already in the queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 *
158 * NOTE: there is magic here about the way the queue is plugged if
159 * we have no outstanding commands.
160 *
Tejun Heo a1bf9d12005-04-24 02:08:52 -0500161 * Although we *don't* plug the queue, we call the request
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * function. The SCSI request function detects the blocked condition
163 * and plugs the queue appropriately.
Tejun Heo a1bf9d12005-04-24 02:08:52 -0500164 */
165 spin_lock_irqsave(q->queue_lock, flags);
166 blk_requeue_request(q, cmd->request);
167 spin_unlock_irqrestore(q->queue_lock, flags);
168
169 scsi_run_queue(q);
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return 0;
172}
173
174/*
175 * Function: scsi_do_req
176 *
177 * Purpose: Queue a SCSI request
178 *
179 * Arguments: sreq - command descriptor.
180 * cmnd - actual SCSI command to be performed.
181 * buffer - data buffer.
182 * bufflen - size of data buffer.
183 * done - completion function to be run.
184 * timeout - how long to let it run before timeout.
185 * retries - number of retries we allow.
186 *
187 * Lock status: No locks held upon entry.
188 *
189 * Returns: Nothing.
190 *
191 * Notes: This function is only used for queueing requests for things
192 * like ioctls and character device requests - this is because
193 * we essentially just inject a request into the queue for the
194 * device.
195 *
196 * In order to support the scsi_device_quiesce function, we
197 * now inject requests on the *head* of the device queue
198 * rather than the tail.
199 */
200void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201 void *buffer, unsigned bufflen,
202 void (*done)(struct scsi_cmnd *),
203 int timeout, int retries)
204{
205 /*
206 * If the upper level driver is reusing these things, then
207 * we should release the low-level block now. Another one will
208 * be allocated later when this request is getting queued.
209 */
210 __scsi_release_request(sreq);
211
212 /*
213 * Our own function scsi_done (which marks the host as not busy,
214 * disables the timeout counter, etc) will be called by us or by the
215 * scsi_hosts[host].queuecommand() function needs to also call
216 * the completion function for the high level driver.
217 */
218 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219 sreq->sr_bufflen = bufflen;
220 sreq->sr_buffer = buffer;
221 sreq->sr_allowed = retries;
222 sreq->sr_done = done;
223 sreq->sr_timeout_per_command = timeout;
224
225 if (sreq->sr_cmd_len == 0)
226 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
227
228 /*
229 * head injection *required* here otherwise quiesce won't work
230 */
231 scsi_insert_special_req(sreq, 1);
232}
233EXPORT_SYMBOL(scsi_do_req);
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235/* This is the end routine we get to if a command was never attached
236 * to the request. Simply complete the request without changing
237 * rq_status; this will cause a DRIVER_ERROR. */
238static void scsi_wait_req_end_io(struct request *req)
239{
240 BUG_ON(!req->waiting);
241
242 complete(req->waiting);
243}
244
245void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
246 unsigned bufflen, int timeout, int retries)
247{
248 DECLARE_COMPLETION(wait);
James Bottomley39216032005-06-15 18:48:29 -0500249 int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
James Bottomleye537a362005-06-05 02:07:14 -0500250 struct request *req;
251
James Bottomley8e640112005-06-15 18:16:09 -0500252 req = blk_get_request(sreq->sr_device->request_queue, write,
253 __GFP_WAIT);
254 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
255 buffer, bufflen, __GFP_WAIT)) {
256 sreq->sr_result = DRIVER_ERROR << 24;
257 blk_put_request(req);
258 return;
259 }
260
James Bottomleye537a362005-06-05 02:07:14 -0500261 req->flags |= REQ_NOMERGE;
262 req->waiting = &wait;
263 req->end_io = scsi_wait_req_end_io;
264 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
265 req->sense = sreq->sr_sense_buffer;
266 req->sense_len = 0;
267 memcpy(req->cmd, cmnd, req->cmd_len);
268 req->timeout = timeout;
269 req->flags |= REQ_BLOCK_PC;
270 req->rq_disk = NULL;
271 blk_insert_request(sreq->sr_device->request_queue, req,
272 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 wait_for_completion(&wait);
274 sreq->sr_request->waiting = NULL;
James Bottomleye537a362005-06-05 02:07:14 -0500275 sreq->sr_result = req->errors;
276 if (req->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 sreq->sr_result |= (DRIVER_ERROR << 24);
278
James Bottomleye537a362005-06-05 02:07:14 -0500279 blk_put_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
James Bottomleye537a362005-06-05 02:07:14 -0500281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282EXPORT_SYMBOL(scsi_wait_req);
283
James Bottomley39216032005-06-15 18:48:29 -0500284/**
James Bottomley33aa6872005-08-28 11:31:14 -0500285 * scsi_execute - insert request and wait for the result
James Bottomley39216032005-06-15 18:48:29 -0500286 * @sdev: scsi device
287 * @cmd: scsi command
288 * @data_direction: data direction
289 * @buffer: data buffer
290 * @bufflen: len of buffer
291 * @sense: optional sense buffer
292 * @timeout: request timeout in seconds
293 * @retries: number of times to retry request
James Bottomley33aa6872005-08-28 11:31:14 -0500294 * @flags: or into request flags;
James Bottomley39216032005-06-15 18:48:29 -0500295 *
James Bottomleyea73a9f2005-08-28 11:33:52 -0500296 * returns the req->errors value which is the the scsi_cmnd result
297 * field.
James Bottomley39216032005-06-15 18:48:29 -0500298 **/
James Bottomley33aa6872005-08-28 11:31:14 -0500299int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
300 int data_direction, void *buffer, unsigned bufflen,
301 unsigned char *sense, int timeout, int retries, int flags)
James Bottomley39216032005-06-15 18:48:29 -0500302{
303 struct request *req;
304 int write = (data_direction == DMA_TO_DEVICE);
305 int ret = DRIVER_ERROR << 24;
306
307 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
308
309 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
310 buffer, bufflen, __GFP_WAIT))
311 goto out;
312
313 req->cmd_len = COMMAND_SIZE(cmd[0]);
314 memcpy(req->cmd, cmd, req->cmd_len);
315 req->sense = sense;
316 req->sense_len = 0;
317 req->timeout = timeout;
James Bottomley33aa6872005-08-28 11:31:14 -0500318 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL;
James Bottomley39216032005-06-15 18:48:29 -0500319
320 /*
321 * head injection *required* here otherwise quiesce won't work
322 */
323 blk_execute_rq(req->q, NULL, req, 1);
324
325 ret = req->errors;
326 out:
327 blk_put_request(req);
328
329 return ret;
330}
James Bottomley33aa6872005-08-28 11:31:14 -0500331EXPORT_SYMBOL(scsi_execute);
James Bottomley39216032005-06-15 18:48:29 -0500332
James Bottomleyea73a9f2005-08-28 11:33:52 -0500333
334int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
335 int data_direction, void *buffer, unsigned bufflen,
336 struct scsi_sense_hdr *sshdr, int timeout, int retries)
337{
338 char *sense = NULL;
339
340 if (sshdr) {
341 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
342 if (!sense)
343 return DRIVER_ERROR << 24;
344 memset(sense, 0, sizeof(*sense));
345 }
346 int result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
347 sense, timeout, retries, 0);
348 if (sshdr)
349 scsi_normalize_sense(sense, sizeof(*sense), sshdr);
350
351 kfree(sense);
352 return result;
353}
354EXPORT_SYMBOL(scsi_execute_req);
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356/*
357 * Function: scsi_init_cmd_errh()
358 *
359 * Purpose: Initialize cmd fields related to error handling.
360 *
361 * Arguments: cmd - command that is ready to be queued.
362 *
363 * Returns: Nothing
364 *
365 * Notes: This function has the job of initializing a number of
366 * fields related to error handling. Typically this will
367 * be called once for each command, as required.
368 */
369static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
370{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 cmd->serial_number = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
374
375 if (cmd->cmd_len == 0)
376 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
377
378 /*
379 * We need saved copies of a number of fields - this is because
380 * error handling may need to overwrite these with different values
381 * to run different commands, and once error handling is complete,
382 * we will need to restore these values prior to running the actual
383 * command.
384 */
385 cmd->old_use_sg = cmd->use_sg;
386 cmd->old_cmd_len = cmd->cmd_len;
387 cmd->sc_old_data_direction = cmd->sc_data_direction;
388 cmd->old_underflow = cmd->underflow;
389 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
390 cmd->buffer = cmd->request_buffer;
391 cmd->bufflen = cmd->request_bufflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 return 1;
394}
395
396/*
397 * Function: scsi_setup_cmd_retry()
398 *
399 * Purpose: Restore the command state for a retry
400 *
401 * Arguments: cmd - command to be restored
402 *
403 * Returns: Nothing
404 *
405 * Notes: Immediately prior to retrying a command, we need
406 * to restore certain fields that we saved above.
407 */
408void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
409{
410 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
411 cmd->request_buffer = cmd->buffer;
412 cmd->request_bufflen = cmd->bufflen;
413 cmd->use_sg = cmd->old_use_sg;
414 cmd->cmd_len = cmd->old_cmd_len;
415 cmd->sc_data_direction = cmd->sc_old_data_direction;
416 cmd->underflow = cmd->old_underflow;
417}
418
419void scsi_device_unbusy(struct scsi_device *sdev)
420{
421 struct Scsi_Host *shost = sdev->host;
422 unsigned long flags;
423
424 spin_lock_irqsave(shost->host_lock, flags);
425 shost->host_busy--;
Mike Andersond3301872005-06-16 11:12:38 -0700426 if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 shost->host_failed))
428 scsi_eh_wakeup(shost);
429 spin_unlock(shost->host_lock);
152587d2005-04-12 16:22:06 -0500430 spin_lock(sdev->request_queue->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 sdev->device_busy--;
152587d2005-04-12 16:22:06 -0500432 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
435/*
436 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
437 * and call blk_run_queue for all the scsi_devices on the target -
438 * including current_sdev first.
439 *
440 * Called with *no* scsi locks held.
441 */
442static void scsi_single_lun_run(struct scsi_device *current_sdev)
443{
444 struct Scsi_Host *shost = current_sdev->host;
445 struct scsi_device *sdev, *tmp;
446 struct scsi_target *starget = scsi_target(current_sdev);
447 unsigned long flags;
448
449 spin_lock_irqsave(shost->host_lock, flags);
450 starget->starget_sdev_user = NULL;
451 spin_unlock_irqrestore(shost->host_lock, flags);
452
453 /*
454 * Call blk_run_queue for all LUNs on the target, starting with
455 * current_sdev. We race with others (to set starget_sdev_user),
456 * but in most cases, we will be first. Ideally, each LU on the
457 * target would get some limited time or requests on the target.
458 */
459 blk_run_queue(current_sdev->request_queue);
460
461 spin_lock_irqsave(shost->host_lock, flags);
462 if (starget->starget_sdev_user)
463 goto out;
464 list_for_each_entry_safe(sdev, tmp, &starget->devices,
465 same_target_siblings) {
466 if (sdev == current_sdev)
467 continue;
468 if (scsi_device_get(sdev))
469 continue;
470
471 spin_unlock_irqrestore(shost->host_lock, flags);
472 blk_run_queue(sdev->request_queue);
473 spin_lock_irqsave(shost->host_lock, flags);
474
475 scsi_device_put(sdev);
476 }
477 out:
478 spin_unlock_irqrestore(shost->host_lock, flags);
479}
480
481/*
482 * Function: scsi_run_queue()
483 *
484 * Purpose: Select a proper request queue to serve next
485 *
486 * Arguments: q - last request's queue
487 *
488 * Returns: Nothing
489 *
490 * Notes: The previous command was completely finished, start
491 * a new one if possible.
492 */
493static void scsi_run_queue(struct request_queue *q)
494{
495 struct scsi_device *sdev = q->queuedata;
496 struct Scsi_Host *shost = sdev->host;
497 unsigned long flags;
498
499 if (sdev->single_lun)
500 scsi_single_lun_run(sdev);
501
502 spin_lock_irqsave(shost->host_lock, flags);
503 while (!list_empty(&shost->starved_list) &&
504 !shost->host_blocked && !shost->host_self_blocked &&
505 !((shost->can_queue > 0) &&
506 (shost->host_busy >= shost->can_queue))) {
507 /*
508 * As long as shost is accepting commands and we have
509 * starved queues, call blk_run_queue. scsi_request_fn
510 * drops the queue_lock and can add us back to the
511 * starved_list.
512 *
513 * host_lock protects the starved_list and starved_entry.
514 * scsi_request_fn must get the host_lock before checking
515 * or modifying starved_list or starved_entry.
516 */
517 sdev = list_entry(shost->starved_list.next,
518 struct scsi_device, starved_entry);
519 list_del_init(&sdev->starved_entry);
520 spin_unlock_irqrestore(shost->host_lock, flags);
521
522 blk_run_queue(sdev->request_queue);
523
524 spin_lock_irqsave(shost->host_lock, flags);
525 if (unlikely(!list_empty(&sdev->starved_entry)))
526 /*
527 * sdev lost a race, and was put back on the
528 * starved list. This is unlikely but without this
529 * in theory we could loop forever.
530 */
531 break;
532 }
533 spin_unlock_irqrestore(shost->host_lock, flags);
534
535 blk_run_queue(q);
536}
537
538/*
539 * Function: scsi_requeue_command()
540 *
541 * Purpose: Handle post-processing of completed commands.
542 *
543 * Arguments: q - queue to operate on
544 * cmd - command that may need to be requeued.
545 *
546 * Returns: Nothing
547 *
548 * Notes: After command completion, there may be blocks left
549 * over which weren't finished by the previous command
550 * this can be for a number of reasons - the main one is
551 * I/O errors in the middle of the request, in which case
552 * we need to request the blocks that come after the bad
553 * sector.
554 */
555static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
556{
Tejun Heo 283369c2005-04-24 02:06:36 -0500557 unsigned long flags;
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 cmd->request->flags &= ~REQ_DONTPREP;
Tejun Heo 283369c2005-04-24 02:06:36 -0500560
561 spin_lock_irqsave(q->queue_lock, flags);
562 blk_requeue_request(q, cmd->request);
563 spin_unlock_irqrestore(q->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 scsi_run_queue(q);
566}
567
568void scsi_next_command(struct scsi_cmnd *cmd)
569{
570 struct request_queue *q = cmd->device->request_queue;
571
572 scsi_put_command(cmd);
573 scsi_run_queue(q);
574}
575
576void scsi_run_host_queues(struct Scsi_Host *shost)
577{
578 struct scsi_device *sdev;
579
580 shost_for_each_device(sdev, shost)
581 scsi_run_queue(sdev->request_queue);
582}
583
584/*
585 * Function: scsi_end_request()
586 *
587 * Purpose: Post-processing of completed commands (usually invoked at end
588 * of upper level post-processing and scsi_io_completion).
589 *
590 * Arguments: cmd - command that is complete.
591 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
592 * bytes - number of bytes of completed I/O
593 * requeue - indicates whether we should requeue leftovers.
594 *
595 * Lock status: Assumed that lock is not held upon entry.
596 *
597 * Returns: cmd if requeue done or required, NULL otherwise
598 *
599 * Notes: This is called for block device requests in order to
600 * mark some number of sectors as complete.
601 *
602 * We are guaranteeing that the request queue will be goosed
603 * at some point during this call.
604 */
605static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
606 int bytes, int requeue)
607{
608 request_queue_t *q = cmd->device->request_queue;
609 struct request *req = cmd->request;
610 unsigned long flags;
611
612 /*
613 * If there are blocks left over at the end, set up the command
614 * to queue the remainder of them.
615 */
616 if (end_that_request_chunk(req, uptodate, bytes)) {
617 int leftover = (req->hard_nr_sectors << 9);
618
619 if (blk_pc_request(req))
620 leftover = req->data_len;
621
622 /* kill remainder if no retrys */
623 if (!uptodate && blk_noretry_request(req))
624 end_that_request_chunk(req, 0, leftover);
625 else {
626 if (requeue)
627 /*
628 * Bleah. Leftovers again. Stick the
629 * leftovers in the front of the
630 * queue, and goose the queue again.
631 */
632 scsi_requeue_command(q, cmd);
633
634 return cmd;
635 }
636 }
637
638 add_disk_randomness(req->rq_disk);
639
640 spin_lock_irqsave(q->queue_lock, flags);
641 if (blk_rq_tagged(req))
642 blk_queue_end_tag(q, req);
643 end_that_request_last(req);
644 spin_unlock_irqrestore(q->queue_lock, flags);
645
646 /*
647 * This will goose the queue request function at the end, so we don't
648 * need to worry about launching another command.
649 */
650 scsi_next_command(cmd);
651 return NULL;
652}
653
654static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
655{
656 struct scsi_host_sg_pool *sgp;
657 struct scatterlist *sgl;
658
659 BUG_ON(!cmd->use_sg);
660
661 switch (cmd->use_sg) {
662 case 1 ... 8:
663 cmd->sglist_len = 0;
664 break;
665 case 9 ... 16:
666 cmd->sglist_len = 1;
667 break;
668 case 17 ... 32:
669 cmd->sglist_len = 2;
670 break;
671#if (SCSI_MAX_PHYS_SEGMENTS > 32)
672 case 33 ... 64:
673 cmd->sglist_len = 3;
674 break;
675#if (SCSI_MAX_PHYS_SEGMENTS > 64)
676 case 65 ... 128:
677 cmd->sglist_len = 4;
678 break;
679#if (SCSI_MAX_PHYS_SEGMENTS > 128)
680 case 129 ... 256:
681 cmd->sglist_len = 5;
682 break;
683#endif
684#endif
685#endif
686 default:
687 return NULL;
688 }
689
690 sgp = scsi_sg_pools + cmd->sglist_len;
691 sgl = mempool_alloc(sgp->pool, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return sgl;
693}
694
695static void scsi_free_sgtable(struct scatterlist *sgl, int index)
696{
697 struct scsi_host_sg_pool *sgp;
698
KAMBAROV, ZAURa77e3362005-06-28 20:45:06 -0700699 BUG_ON(index >= SG_MEMPOOL_NR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 sgp = scsi_sg_pools + index;
702 mempool_free(sgl, sgp->pool);
703}
704
705/*
706 * Function: scsi_release_buffers()
707 *
708 * Purpose: Completion processing for block device I/O requests.
709 *
710 * Arguments: cmd - command that we are bailing.
711 *
712 * Lock status: Assumed that no lock is held upon entry.
713 *
714 * Returns: Nothing
715 *
716 * Notes: In the event that an upper level driver rejects a
717 * command, we must release resources allocated during
718 * the __init_io() function. Primarily this would involve
719 * the scatter-gather table, and potentially any bounce
720 * buffers.
721 */
722static void scsi_release_buffers(struct scsi_cmnd *cmd)
723{
724 struct request *req = cmd->request;
725
726 /*
727 * Free up any indirection buffers we allocated for DMA purposes.
728 */
729 if (cmd->use_sg)
730 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
731 else if (cmd->request_buffer != req->buffer)
732 kfree(cmd->request_buffer);
733
734 /*
735 * Zero these out. They now point to freed memory, and it is
736 * dangerous to hang onto the pointers.
737 */
738 cmd->buffer = NULL;
739 cmd->bufflen = 0;
740 cmd->request_buffer = NULL;
741 cmd->request_bufflen = 0;
742}
743
744/*
745 * Function: scsi_io_completion()
746 *
747 * Purpose: Completion processing for block device I/O requests.
748 *
749 * Arguments: cmd - command that is finished.
750 *
751 * Lock status: Assumed that no lock is held upon entry.
752 *
753 * Returns: Nothing
754 *
755 * Notes: This function is matched in terms of capabilities to
756 * the function that created the scatter-gather list.
757 * In other words, if there are no bounce buffers
758 * (the normal case for most drivers), we don't need
759 * the logic to deal with cleaning up afterwards.
760 *
761 * We must do one of several things here:
762 *
763 * a) Call scsi_end_request. This will finish off the
764 * specified number of sectors. If we are done, the
765 * command block will be released, and the queue
766 * function will be goosed. If we are not done, then
767 * scsi_end_request will directly goose the queue.
768 *
769 * b) We can just use scsi_requeue_command() here. This would
770 * be used if we just wanted to retry, for example.
771 */
772void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
773 unsigned int block_bytes)
774{
775 int result = cmd->result;
776 int this_count = cmd->bufflen;
777 request_queue_t *q = cmd->device->request_queue;
778 struct request *req = cmd->request;
779 int clear_errors = 1;
780 struct scsi_sense_hdr sshdr;
781 int sense_valid = 0;
782 int sense_deferred = 0;
783
784 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
785 return;
786
787 /*
788 * Free up any indirection buffers we allocated for DMA purposes.
789 * For the case of a READ, we need to copy the data out of the
790 * bounce buffer and into the real buffer.
791 */
792 if (cmd->use_sg)
793 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
794 else if (cmd->buffer != req->buffer) {
795 if (rq_data_dir(req) == READ) {
796 unsigned long flags;
797 char *to = bio_kmap_irq(req->bio, &flags);
798 memcpy(to, cmd->buffer, cmd->bufflen);
799 bio_kunmap_irq(to, &flags);
800 }
801 kfree(cmd->buffer);
802 }
803
804 if (result) {
805 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
806 if (sense_valid)
807 sense_deferred = scsi_sense_is_deferred(&sshdr);
808 }
809 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
810 req->errors = result;
811 if (result) {
812 clear_errors = 0;
813 if (sense_valid && req->sense) {
814 /*
815 * SG_IO wants current and deferred errors
816 */
817 int len = 8 + cmd->sense_buffer[7];
818
819 if (len > SCSI_SENSE_BUFFERSIZE)
820 len = SCSI_SENSE_BUFFERSIZE;
821 memcpy(req->sense, cmd->sense_buffer, len);
822 req->sense_len = len;
823 }
824 } else
825 req->data_len = cmd->resid;
826 }
827
828 /*
829 * Zero these out. They now point to freed memory, and it is
830 * dangerous to hang onto the pointers.
831 */
832 cmd->buffer = NULL;
833 cmd->bufflen = 0;
834 cmd->request_buffer = NULL;
835 cmd->request_bufflen = 0;
836
837 /*
838 * Next deal with any sectors which we were able to correctly
839 * handle.
840 */
841 if (good_bytes >= 0) {
842 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
843 req->nr_sectors, good_bytes));
844 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
845
846 if (clear_errors)
847 req->errors = 0;
848 /*
849 * If multiple sectors are requested in one buffer, then
850 * they will have been finished off by the first command.
851 * If not, then we have a multi-buffer command.
852 *
853 * If block_bytes != 0, it means we had a medium error
854 * of some sort, and that we want to mark some number of
855 * sectors as not uptodate. Thus we want to inhibit
856 * requeueing right here - we will requeue down below
857 * when we handle the bad sectors.
858 */
859 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
860
861 /*
862 * If the command completed without error, then either finish off the
863 * rest of the command, or start a new one.
864 */
865 if (result == 0 || cmd == NULL ) {
866 return;
867 }
868 }
869 /*
870 * Now, if we were good little boys and girls, Santa left us a request
871 * sense buffer. We can extract information from this, so we
872 * can choose a block to remap, etc.
873 */
874 if (sense_valid && !sense_deferred) {
875 switch (sshdr.sense_key) {
876 case UNIT_ATTENTION:
877 if (cmd->device->removable) {
878 /* detected disc change. set a bit
879 * and quietly refuse further access.
880 */
881 cmd->device->changed = 1;
882 cmd = scsi_end_request(cmd, 0,
883 this_count, 1);
884 return;
885 } else {
886 /*
887 * Must have been a power glitch, or a
888 * bus reset. Could not have been a
889 * media change, so we just retry the
890 * request and see what happens.
891 */
892 scsi_requeue_command(q, cmd);
893 return;
894 }
895 break;
896 case ILLEGAL_REQUEST:
897 /*
898 * If we had an ILLEGAL REQUEST returned, then we may
899 * have performed an unsupported command. The only
900 * thing this should be would be a ten byte read where
901 * only a six byte read was supported. Also, on a
902 * system where READ CAPACITY failed, we may have read
903 * past the end of the disk.
904 */
905 if (cmd->device->use_10_for_rw &&
906 (cmd->cmnd[0] == READ_10 ||
907 cmd->cmnd[0] == WRITE_10)) {
908 cmd->device->use_10_for_rw = 0;
909 /*
910 * This will cause a retry with a 6-byte
911 * command.
912 */
913 scsi_requeue_command(q, cmd);
914 result = 0;
915 } else {
916 cmd = scsi_end_request(cmd, 0, this_count, 1);
917 return;
918 }
919 break;
920 case NOT_READY:
921 /*
922 * If the device is in the process of becoming ready,
923 * retry.
924 */
925 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
926 scsi_requeue_command(q, cmd);
927 return;
928 }
929 printk(KERN_INFO "Device %s not ready.\n",
930 req->rq_disk ? req->rq_disk->disk_name : "");
931 cmd = scsi_end_request(cmd, 0, this_count, 1);
932 return;
933 case VOLUME_OVERFLOW:
934 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
935 cmd->device->host->host_no,
936 (int)cmd->device->channel,
937 (int)cmd->device->id, (int)cmd->device->lun);
938 __scsi_print_command(cmd->data_cmnd);
939 scsi_print_sense("", cmd);
940 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
941 return;
942 default:
943 break;
944 }
945 } /* driver byte != 0 */
946 if (host_byte(result) == DID_RESET) {
947 /*
948 * Third party bus reset or reset for error
949 * recovery reasons. Just retry the request
950 * and see what happens.
951 */
952 scsi_requeue_command(q, cmd);
953 return;
954 }
955 if (result) {
James Bottomleye537a362005-06-05 02:07:14 -0500956 if (!(req->flags & REQ_SPECIAL))
957 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
958 "= 0x%x\n", cmd->device->host->host_no,
959 cmd->device->channel,
960 cmd->device->id,
961 cmd->device->lun, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 if (driver_byte(result) & DRIVER_SENSE)
964 scsi_print_sense("", cmd);
965 /*
966 * Mark a single buffer as not uptodate. Queue the remainder.
967 * We sometimes get this cruft in the event that a medium error
968 * isn't properly reported.
969 */
970 block_bytes = req->hard_cur_sectors << 9;
971 if (!block_bytes)
972 block_bytes = req->data_len;
973 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
974 }
975}
976EXPORT_SYMBOL(scsi_io_completion);
977
978/*
979 * Function: scsi_init_io()
980 *
981 * Purpose: SCSI I/O initialize function.
982 *
983 * Arguments: cmd - Command descriptor we wish to initialize
984 *
985 * Returns: 0 on success
986 * BLKPREP_DEFER if the failure is retryable
987 * BLKPREP_KILL if the failure is fatal
988 */
989static int scsi_init_io(struct scsi_cmnd *cmd)
990{
991 struct request *req = cmd->request;
992 struct scatterlist *sgpnt;
993 int count;
994
995 /*
996 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
997 */
998 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
999 cmd->request_bufflen = req->data_len;
1000 cmd->request_buffer = req->data;
1001 req->buffer = req->data;
1002 cmd->use_sg = 0;
1003 return 0;
1004 }
1005
1006 /*
1007 * we used to not use scatter-gather for single segment request,
1008 * but now we do (it makes highmem I/O easier to support without
1009 * kmapping pages)
1010 */
1011 cmd->use_sg = req->nr_phys_segments;
1012
1013 /*
1014 * if sg table allocation fails, requeue request later.
1015 */
1016 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
Tejun Heo beb66172005-04-24 02:04:53 -05001017 if (unlikely(!sgpnt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 return BLKPREP_DEFER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
1020 cmd->request_buffer = (char *) sgpnt;
1021 cmd->request_bufflen = req->nr_sectors << 9;
1022 if (blk_pc_request(req))
1023 cmd->request_bufflen = req->data_len;
1024 req->buffer = NULL;
1025
1026 /*
1027 * Next, walk the list, and fill in the addresses and sizes of
1028 * each segment.
1029 */
1030 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1031
1032 /*
1033 * mapped well, send it off
1034 */
1035 if (likely(count <= cmd->use_sg)) {
1036 cmd->use_sg = count;
1037 return 0;
1038 }
1039
1040 printk(KERN_ERR "Incorrect number of segments after building list\n");
1041 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1042 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1043 req->current_nr_sectors);
1044
1045 /* release the command and kill it */
1046 scsi_release_buffers(cmd);
1047 scsi_put_command(cmd);
1048 return BLKPREP_KILL;
1049}
1050
1051static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1052{
1053 struct scsi_device *sdev = q->queuedata;
1054 struct scsi_driver *drv;
1055
1056 if (sdev->sdev_state == SDEV_RUNNING) {
1057 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1058
1059 if (drv->prepare_flush)
1060 return drv->prepare_flush(q, rq);
1061 }
1062
1063 return 0;
1064}
1065
1066static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1067{
1068 struct scsi_device *sdev = q->queuedata;
1069 struct request *flush_rq = rq->end_io_data;
1070 struct scsi_driver *drv;
1071
1072 if (flush_rq->errors) {
1073 printk("scsi: barrier error, disabling flush support\n");
1074 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1075 }
1076
1077 if (sdev->sdev_state == SDEV_RUNNING) {
1078 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1079 drv->end_flush(q, rq);
1080 }
1081}
1082
1083static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1084 sector_t *error_sector)
1085{
1086 struct scsi_device *sdev = q->queuedata;
1087 struct scsi_driver *drv;
1088
1089 if (sdev->sdev_state != SDEV_RUNNING)
1090 return -ENXIO;
1091
1092 drv = *(struct scsi_driver **) disk->private_data;
1093 if (drv->issue_flush)
1094 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1095
1096 return -EOPNOTSUPP;
1097}
1098
James Bottomleye537a362005-06-05 02:07:14 -05001099static void scsi_generic_done(struct scsi_cmnd *cmd)
1100{
1101 BUG_ON(!blk_pc_request(cmd->request));
1102 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1103}
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105static int scsi_prep_fn(struct request_queue *q, struct request *req)
1106{
1107 struct scsi_device *sdev = q->queuedata;
1108 struct scsi_cmnd *cmd;
1109 int specials_only = 0;
1110
1111 /*
1112 * Just check to see if the device is online. If it isn't, we
1113 * refuse to process any commands. The device must be brought
1114 * online before trying any recovery commands
1115 */
1116 if (unlikely(!scsi_device_online(sdev))) {
1117 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1118 sdev->host->host_no, sdev->id, sdev->lun);
1119 return BLKPREP_KILL;
1120 }
1121 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1122 /* OK, we're not in a running state don't prep
1123 * user commands */
1124 if (sdev->sdev_state == SDEV_DEL) {
1125 /* Device is fully deleted, no commands
1126 * at all allowed down */
1127 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1128 sdev->host->host_no, sdev->id, sdev->lun);
1129 return BLKPREP_KILL;
1130 }
1131 /* OK, we only allow special commands (i.e. not
1132 * user initiated ones */
1133 specials_only = sdev->sdev_state;
1134 }
1135
1136 /*
1137 * Find the actual device driver associated with this command.
1138 * The SPECIAL requests are things like character device or
1139 * ioctls, which did not originate from ll_rw_blk. Note that
1140 * the special field is also used to indicate the cmd for
1141 * the remainder of a partially fulfilled request that can
1142 * come up when there is a medium error. We have to treat
1143 * these two cases differently. We differentiate by looking
1144 * at request->cmd, as this tells us the real story.
1145 */
James Bottomleye537a362005-06-05 02:07:14 -05001146 if (req->flags & REQ_SPECIAL && req->special) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 struct scsi_request *sreq = req->special;
1148
1149 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1150 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1151 if (unlikely(!cmd))
1152 goto defer;
1153 scsi_init_cmd_from_req(cmd, sreq);
1154 } else
1155 cmd = req->special;
1156 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1157
James Bottomleye537a362005-06-05 02:07:14 -05001158 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if(specials_only == SDEV_QUIESCE ||
1160 specials_only == SDEV_BLOCK)
1161 return BLKPREP_DEFER;
1162
1163 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1164 sdev->host->host_no, sdev->id, sdev->lun);
1165 return BLKPREP_KILL;
1166 }
1167
1168
1169 /*
1170 * Now try and find a command block that we can use.
1171 */
1172 if (!req->special) {
1173 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1174 if (unlikely(!cmd))
1175 goto defer;
1176 } else
1177 cmd = req->special;
1178
1179 /* pull a tag out of the request if we have one */
1180 cmd->tag = req->tag;
1181 } else {
1182 blk_dump_rq_flags(req, "SCSI bad req");
1183 return BLKPREP_KILL;
1184 }
1185
1186 /* note the overloading of req->special. When the tag
1187 * is active it always means cmd. If the tag goes
1188 * back for re-queueing, it may be reset */
1189 req->special = cmd;
1190 cmd->request = req;
1191
1192 /*
1193 * FIXME: drop the lock here because the functions below
1194 * expect to be called without the queue lock held. Also,
1195 * previously, we dequeued the request before dropping the
1196 * lock. We hope REQ_STARTED prevents anything untoward from
1197 * happening now.
1198 */
1199 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1200 struct scsi_driver *drv;
1201 int ret;
1202
1203 /*
1204 * This will do a couple of things:
1205 * 1) Fill in the actual SCSI command.
1206 * 2) Fill in any other upper-level specific fields
1207 * (timeout).
1208 *
1209 * If this returns 0, it means that the request failed
1210 * (reading past end of disk, reading offline device,
1211 * etc). This won't actually talk to the device, but
1212 * some kinds of consistency checking may cause the
1213 * request to be rejected immediately.
1214 */
1215
1216 /*
1217 * This sets up the scatter-gather table (allocating if
1218 * required).
1219 */
1220 ret = scsi_init_io(cmd);
1221 if (ret) /* BLKPREP_KILL return also releases the command */
1222 return ret;
1223
1224 /*
1225 * Initialize the actual SCSI command for this request.
1226 */
James Bottomleye537a362005-06-05 02:07:14 -05001227 if (req->rq_disk) {
1228 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1229 if (unlikely(!drv->init_command(cmd))) {
1230 scsi_release_buffers(cmd);
1231 scsi_put_command(cmd);
1232 return BLKPREP_KILL;
1233 }
1234 } else {
1235 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1236 if (rq_data_dir(req) == WRITE)
1237 cmd->sc_data_direction = DMA_TO_DEVICE;
1238 else if (req->data_len)
1239 cmd->sc_data_direction = DMA_FROM_DEVICE;
1240 else
1241 cmd->sc_data_direction = DMA_NONE;
1242
1243 cmd->transfersize = req->data_len;
1244 cmd->allowed = 3;
1245 cmd->timeout_per_command = req->timeout;
1246 cmd->done = scsi_generic_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 }
1248 }
1249
1250 /*
1251 * The request is now prepped, no need to come back here
1252 */
1253 req->flags |= REQ_DONTPREP;
1254 return BLKPREP_OK;
1255
1256 defer:
1257 /* If we defer, the elv_next_request() returns NULL, but the
1258 * queue must be restarted, so we plug here if no returning
1259 * command will automatically do that. */
1260 if (sdev->device_busy == 0)
1261 blk_plug_device(q);
1262 return BLKPREP_DEFER;
1263}
1264
1265/*
1266 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1267 * return 0.
1268 *
1269 * Called with the queue_lock held.
1270 */
1271static inline int scsi_dev_queue_ready(struct request_queue *q,
1272 struct scsi_device *sdev)
1273{
1274 if (sdev->device_busy >= sdev->queue_depth)
1275 return 0;
1276 if (sdev->device_busy == 0 && sdev->device_blocked) {
1277 /*
1278 * unblock after device_blocked iterates to zero
1279 */
1280 if (--sdev->device_blocked == 0) {
1281 SCSI_LOG_MLQUEUE(3,
1282 printk("scsi%d (%d:%d) unblocking device at"
1283 " zero depth\n", sdev->host->host_no,
1284 sdev->id, sdev->lun));
1285 } else {
1286 blk_plug_device(q);
1287 return 0;
1288 }
1289 }
1290 if (sdev->device_blocked)
1291 return 0;
1292
1293 return 1;
1294}
1295
1296/*
1297 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1298 * return 0. We must end up running the queue again whenever 0 is
1299 * returned, else IO can hang.
1300 *
1301 * Called with host_lock held.
1302 */
1303static inline int scsi_host_queue_ready(struct request_queue *q,
1304 struct Scsi_Host *shost,
1305 struct scsi_device *sdev)
1306{
Mike Andersond3301872005-06-16 11:12:38 -07001307 if (shost->shost_state == SHOST_RECOVERY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 return 0;
1309 if (shost->host_busy == 0 && shost->host_blocked) {
1310 /*
1311 * unblock after host_blocked iterates to zero
1312 */
1313 if (--shost->host_blocked == 0) {
1314 SCSI_LOG_MLQUEUE(3,
1315 printk("scsi%d unblocking host at zero depth\n",
1316 shost->host_no));
1317 } else {
1318 blk_plug_device(q);
1319 return 0;
1320 }
1321 }
1322 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1323 shost->host_blocked || shost->host_self_blocked) {
1324 if (list_empty(&sdev->starved_entry))
1325 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1326 return 0;
1327 }
1328
1329 /* We're OK to process the command, so we can't be starved */
1330 if (!list_empty(&sdev->starved_entry))
1331 list_del_init(&sdev->starved_entry);
1332
1333 return 1;
1334}
1335
1336/*
1337 * Kill requests for a dead device
1338 */
1339static void scsi_kill_requests(request_queue_t *q)
1340{
1341 struct request *req;
1342
1343 while ((req = elv_next_request(q)) != NULL) {
1344 blkdev_dequeue_request(req);
1345 req->flags |= REQ_QUIET;
1346 while (end_that_request_first(req, 0, req->nr_sectors))
1347 ;
1348 end_that_request_last(req);
1349 }
1350}
1351
1352/*
1353 * Function: scsi_request_fn()
1354 *
1355 * Purpose: Main strategy routine for SCSI.
1356 *
1357 * Arguments: q - Pointer to actual queue.
1358 *
1359 * Returns: Nothing
1360 *
1361 * Lock status: IO request lock assumed to be held when called.
1362 */
1363static void scsi_request_fn(struct request_queue *q)
1364{
1365 struct scsi_device *sdev = q->queuedata;
1366 struct Scsi_Host *shost;
1367 struct scsi_cmnd *cmd;
1368 struct request *req;
1369
1370 if (!sdev) {
1371 printk("scsi: killing requests for dead queue\n");
1372 scsi_kill_requests(q);
1373 return;
1374 }
1375
1376 if(!get_device(&sdev->sdev_gendev))
1377 /* We must be tearing the block queue down already */
1378 return;
1379
1380 /*
1381 * To start with, we keep looping until the queue is empty, or until
1382 * the host is no longer able to accept any more requests.
1383 */
1384 shost = sdev->host;
1385 while (!blk_queue_plugged(q)) {
1386 int rtn;
1387 /*
1388 * get next queueable request. We do this early to make sure
1389 * that the request is fully prepared even if we cannot
1390 * accept it.
1391 */
1392 req = elv_next_request(q);
1393 if (!req || !scsi_dev_queue_ready(q, sdev))
1394 break;
1395
1396 if (unlikely(!scsi_device_online(sdev))) {
1397 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1398 sdev->host->host_no, sdev->id, sdev->lun);
1399 blkdev_dequeue_request(req);
1400 req->flags |= REQ_QUIET;
1401 while (end_that_request_first(req, 0, req->nr_sectors))
1402 ;
1403 end_that_request_last(req);
1404 continue;
1405 }
1406
1407
1408 /*
1409 * Remove the request from the request list.
1410 */
1411 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1412 blkdev_dequeue_request(req);
1413 sdev->device_busy++;
1414
1415 spin_unlock(q->queue_lock);
1416 spin_lock(shost->host_lock);
1417
1418 if (!scsi_host_queue_ready(q, shost, sdev))
1419 goto not_ready;
1420 if (sdev->single_lun) {
1421 if (scsi_target(sdev)->starget_sdev_user &&
1422 scsi_target(sdev)->starget_sdev_user != sdev)
1423 goto not_ready;
1424 scsi_target(sdev)->starget_sdev_user = sdev;
1425 }
1426 shost->host_busy++;
1427
1428 /*
1429 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1430 * take the lock again.
1431 */
1432 spin_unlock_irq(shost->host_lock);
1433
1434 cmd = req->special;
1435 if (unlikely(cmd == NULL)) {
1436 printk(KERN_CRIT "impossible request in %s.\n"
1437 "please mail a stack trace to "
1438 "linux-scsi@vger.kernel.org",
1439 __FUNCTION__);
1440 BUG();
1441 }
1442
1443 /*
1444 * Finally, initialize any error handling parameters, and set up
1445 * the timers for timeouts.
1446 */
1447 scsi_init_cmd_errh(cmd);
1448
1449 /*
1450 * Dispatch the command to the low-level driver.
1451 */
1452 rtn = scsi_dispatch_cmd(cmd);
1453 spin_lock_irq(q->queue_lock);
1454 if(rtn) {
1455 /* we're refusing the command; because of
1456 * the way locks get dropped, we need to
1457 * check here if plugging is required */
1458 if(sdev->device_busy == 0)
1459 blk_plug_device(q);
1460
1461 break;
1462 }
1463 }
1464
1465 goto out;
1466
1467 not_ready:
1468 spin_unlock_irq(shost->host_lock);
1469
1470 /*
1471 * lock q, handle tag, requeue req, and decrement device_busy. We
1472 * must return with queue_lock held.
1473 *
1474 * Decrementing device_busy without checking it is OK, as all such
1475 * cases (host limits or settings) should run the queue at some
1476 * later time.
1477 */
1478 spin_lock_irq(q->queue_lock);
1479 blk_requeue_request(q, req);
1480 sdev->device_busy--;
1481 if(sdev->device_busy == 0)
1482 blk_plug_device(q);
1483 out:
1484 /* must be careful here...if we trigger the ->remove() function
1485 * we cannot be holding the q lock */
1486 spin_unlock_irq(q->queue_lock);
1487 put_device(&sdev->sdev_gendev);
1488 spin_lock_irq(q->queue_lock);
1489}
1490
1491u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1492{
1493 struct device *host_dev;
1494 u64 bounce_limit = 0xffffffff;
1495
1496 if (shost->unchecked_isa_dma)
1497 return BLK_BOUNCE_ISA;
1498 /*
1499 * Platforms with virtual-DMA translation
1500 * hardware have no practical limit.
1501 */
1502 if (!PCI_DMA_BUS_IS_PHYS)
1503 return BLK_BOUNCE_ANY;
1504
1505 host_dev = scsi_get_device(shost);
1506 if (host_dev && host_dev->dma_mask)
1507 bounce_limit = *host_dev->dma_mask;
1508
1509 return bounce_limit;
1510}
1511EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1512
1513struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1514{
1515 struct Scsi_Host *shost = sdev->host;
1516 struct request_queue *q;
1517
152587d2005-04-12 16:22:06 -05001518 q = blk_init_queue(scsi_request_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 if (!q)
1520 return NULL;
1521
1522 blk_queue_prep_rq(q, scsi_prep_fn);
1523
1524 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1525 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1526 blk_queue_max_sectors(q, shost->max_sectors);
1527 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1528 blk_queue_segment_boundary(q, shost->dma_boundary);
1529 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1530
1531 /*
1532 * ordered tags are superior to flush ordering
1533 */
1534 if (shost->ordered_tag)
1535 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1536 else if (shost->ordered_flush) {
1537 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1538 q->prepare_flush_fn = scsi_prepare_flush_fn;
1539 q->end_flush_fn = scsi_end_flush_fn;
1540 }
1541
1542 if (!shost->use_clustering)
1543 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1544 return q;
1545}
1546
1547void scsi_free_queue(struct request_queue *q)
1548{
1549 blk_cleanup_queue(q);
1550}
1551
1552/*
1553 * Function: scsi_block_requests()
1554 *
1555 * Purpose: Utility function used by low-level drivers to prevent further
1556 * commands from being queued to the device.
1557 *
1558 * Arguments: shost - Host in question
1559 *
1560 * Returns: Nothing
1561 *
1562 * Lock status: No locks are assumed held.
1563 *
1564 * Notes: There is no timer nor any other means by which the requests
1565 * get unblocked other than the low-level driver calling
1566 * scsi_unblock_requests().
1567 */
1568void scsi_block_requests(struct Scsi_Host *shost)
1569{
1570 shost->host_self_blocked = 1;
1571}
1572EXPORT_SYMBOL(scsi_block_requests);
1573
1574/*
1575 * Function: scsi_unblock_requests()
1576 *
1577 * Purpose: Utility function used by low-level drivers to allow further
1578 * commands from being queued to the device.
1579 *
1580 * Arguments: shost - Host in question
1581 *
1582 * Returns: Nothing
1583 *
1584 * Lock status: No locks are assumed held.
1585 *
1586 * Notes: There is no timer nor any other means by which the requests
1587 * get unblocked other than the low-level driver calling
1588 * scsi_unblock_requests().
1589 *
1590 * This is done as an API function so that changes to the
1591 * internals of the scsi mid-layer won't require wholesale
1592 * changes to drivers that use this feature.
1593 */
1594void scsi_unblock_requests(struct Scsi_Host *shost)
1595{
1596 shost->host_self_blocked = 0;
1597 scsi_run_host_queues(shost);
1598}
1599EXPORT_SYMBOL(scsi_unblock_requests);
1600
1601int __init scsi_init_queue(void)
1602{
1603 int i;
1604
1605 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1606 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1607 int size = sgp->size * sizeof(struct scatterlist);
1608
1609 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1610 SLAB_HWCACHE_ALIGN, NULL, NULL);
1611 if (!sgp->slab) {
1612 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1613 sgp->name);
1614 }
1615
1616 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1617 mempool_alloc_slab, mempool_free_slab,
1618 sgp->slab);
1619 if (!sgp->pool) {
1620 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1621 sgp->name);
1622 }
1623 }
1624
1625 return 0;
1626}
1627
1628void scsi_exit_queue(void)
1629{
1630 int i;
1631
1632 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1633 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1634 mempool_destroy(sgp->pool);
1635 kmem_cache_destroy(sgp->slab);
1636 }
1637}
1638/**
James Bottomleyea73a9f2005-08-28 11:33:52 -05001639 * scsi_mode_sense - issue a mode sense, falling back from 10 to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 * six bytes if necessary.
James Bottomley1cf72692005-08-28 11:27:01 -05001641 * @sdev: SCSI device to be queried
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 * @dbd: set if mode sense will allow block descriptors to be returned
1643 * @modepage: mode page being requested
1644 * @buffer: request buffer (may not be smaller than eight bytes)
1645 * @len: length of request buffer.
1646 * @timeout: command timeout
1647 * @retries: number of retries before failing
1648 * @data: returns a structure abstracting the mode header data
James Bottomley1cf72692005-08-28 11:27:01 -05001649 * @sense: place to put sense data (or NULL if no sense to be collected).
1650 * must be SCSI_SENSE_BUFFERSIZE big.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 *
1652 * Returns zero if unsuccessful, or the header offset (either 4
1653 * or 8 depending on whether a six or ten byte command was
1654 * issued) if successful.
1655 **/
1656int
James Bottomley1cf72692005-08-28 11:27:01 -05001657scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 unsigned char *buffer, int len, int timeout, int retries,
James Bottomleyea73a9f2005-08-28 11:33:52 -05001659 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 unsigned char cmd[12];
1661 int use_10_for_ms;
1662 int header_length;
James Bottomley1cf72692005-08-28 11:27:01 -05001663 int result;
James Bottomleyea73a9f2005-08-28 11:33:52 -05001664 struct scsi_sense_hdr my_sshdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
1666 memset(data, 0, sizeof(*data));
1667 memset(&cmd[0], 0, 12);
1668 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1669 cmd[2] = modepage;
1670
James Bottomleyea73a9f2005-08-28 11:33:52 -05001671 /* caller might not be interested in sense, but we need it */
1672 if (!sshdr)
1673 sshdr = &my_sshdr;
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 retry:
James Bottomley1cf72692005-08-28 11:27:01 -05001676 use_10_for_ms = sdev->use_10_for_ms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 if (use_10_for_ms) {
1679 if (len < 8)
1680 len = 8;
1681
1682 cmd[0] = MODE_SENSE_10;
1683 cmd[8] = len;
1684 header_length = 8;
1685 } else {
1686 if (len < 4)
1687 len = 4;
1688
1689 cmd[0] = MODE_SENSE;
1690 cmd[4] = len;
1691 header_length = 4;
1692 }
1693
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 memset(buffer, 0, len);
1695
James Bottomley1cf72692005-08-28 11:27:01 -05001696 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
James Bottomleyea73a9f2005-08-28 11:33:52 -05001697 sshdr, timeout, retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
1699 /* This code looks awful: what it's doing is making sure an
1700 * ILLEGAL REQUEST sense return identifies the actual command
1701 * byte as the problem. MODE_SENSE commands can return
1702 * ILLEGAL REQUEST if the code page isn't supported */
1703
James Bottomley1cf72692005-08-28 11:27:01 -05001704 if (use_10_for_ms && !scsi_status_is_good(result) &&
1705 (driver_byte(result) & DRIVER_SENSE)) {
James Bottomleyea73a9f2005-08-28 11:33:52 -05001706 if (scsi_sense_valid(sshdr)) {
1707 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1708 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 /*
1710 * Invalid command operation code
1711 */
James Bottomley1cf72692005-08-28 11:27:01 -05001712 sdev->use_10_for_ms = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 goto retry;
1714 }
1715 }
1716 }
1717
James Bottomley1cf72692005-08-28 11:27:01 -05001718 if(scsi_status_is_good(result)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 data->header_length = header_length;
1720 if(use_10_for_ms) {
1721 data->length = buffer[0]*256 + buffer[1] + 2;
1722 data->medium_type = buffer[2];
1723 data->device_specific = buffer[3];
1724 data->longlba = buffer[4] & 0x01;
1725 data->block_descriptor_length = buffer[6]*256
1726 + buffer[7];
1727 } else {
1728 data->length = buffer[0] + 1;
1729 data->medium_type = buffer[1];
1730 data->device_specific = buffer[2];
1731 data->block_descriptor_length = buffer[3];
1732 }
1733 }
1734
James Bottomley1cf72692005-08-28 11:27:01 -05001735 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736}
1737EXPORT_SYMBOL(scsi_mode_sense);
1738
1739int
1740scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1741{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 char cmd[] = {
1743 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1744 };
James Bottomleyea73a9f2005-08-28 11:33:52 -05001745 struct scsi_sense_hdr sshdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 int result;
1747
James Bottomleyea73a9f2005-08-28 11:33:52 -05001748 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
James Bottomley1cf72692005-08-28 11:27:01 -05001749 timeout, retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
James Bottomley1cf72692005-08-28 11:27:01 -05001751 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
James Bottomleyea73a9f2005-08-28 11:33:52 -05001753 if ((scsi_sense_valid(&sshdr)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 ((sshdr.sense_key == UNIT_ATTENTION) ||
1755 (sshdr.sense_key == NOT_READY))) {
1756 sdev->changed = 1;
James Bottomley1cf72692005-08-28 11:27:01 -05001757 result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 }
1759 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 return result;
1761}
1762EXPORT_SYMBOL(scsi_test_unit_ready);
1763
1764/**
1765 * scsi_device_set_state - Take the given device through the device
1766 * state model.
1767 * @sdev: scsi device to change the state of.
1768 * @state: state to change to.
1769 *
1770 * Returns zero if unsuccessful or an error if the requested
1771 * transition is illegal.
1772 **/
1773int
1774scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1775{
1776 enum scsi_device_state oldstate = sdev->sdev_state;
1777
1778 if (state == oldstate)
1779 return 0;
1780
1781 switch (state) {
1782 case SDEV_CREATED:
1783 /* There are no legal states that come back to
1784 * created. This is the manually initialised start
1785 * state */
1786 goto illegal;
1787
1788 case SDEV_RUNNING:
1789 switch (oldstate) {
1790 case SDEV_CREATED:
1791 case SDEV_OFFLINE:
1792 case SDEV_QUIESCE:
1793 case SDEV_BLOCK:
1794 break;
1795 default:
1796 goto illegal;
1797 }
1798 break;
1799
1800 case SDEV_QUIESCE:
1801 switch (oldstate) {
1802 case SDEV_RUNNING:
1803 case SDEV_OFFLINE:
1804 break;
1805 default:
1806 goto illegal;
1807 }
1808 break;
1809
1810 case SDEV_OFFLINE:
1811 switch (oldstate) {
1812 case SDEV_CREATED:
1813 case SDEV_RUNNING:
1814 case SDEV_QUIESCE:
1815 case SDEV_BLOCK:
1816 break;
1817 default:
1818 goto illegal;
1819 }
1820 break;
1821
1822 case SDEV_BLOCK:
1823 switch (oldstate) {
1824 case SDEV_CREATED:
1825 case SDEV_RUNNING:
1826 break;
1827 default:
1828 goto illegal;
1829 }
1830 break;
1831
1832 case SDEV_CANCEL:
1833 switch (oldstate) {
1834 case SDEV_CREATED:
1835 case SDEV_RUNNING:
1836 case SDEV_OFFLINE:
1837 case SDEV_BLOCK:
1838 break;
1839 default:
1840 goto illegal;
1841 }
1842 break;
1843
1844 case SDEV_DEL:
1845 switch (oldstate) {
1846 case SDEV_CANCEL:
1847 break;
1848 default:
1849 goto illegal;
1850 }
1851 break;
1852
1853 }
1854 sdev->sdev_state = state;
1855 return 0;
1856
1857 illegal:
1858 SCSI_LOG_ERROR_RECOVERY(1,
1859 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1860 "Illegal state transition %s->%s\n",
1861 scsi_device_state_name(oldstate),
1862 scsi_device_state_name(state))
1863 );
1864 return -EINVAL;
1865}
1866EXPORT_SYMBOL(scsi_device_set_state);
1867
1868/**
1869 * scsi_device_quiesce - Block user issued commands.
1870 * @sdev: scsi device to quiesce.
1871 *
1872 * This works by trying to transition to the SDEV_QUIESCE state
1873 * (which must be a legal transition). When the device is in this
1874 * state, only special requests will be accepted, all others will
1875 * be deferred. Since special requests may also be requeued requests,
1876 * a successful return doesn't guarantee the device will be
1877 * totally quiescent.
1878 *
1879 * Must be called with user context, may sleep.
1880 *
1881 * Returns zero if unsuccessful or an error if not.
1882 **/
1883int
1884scsi_device_quiesce(struct scsi_device *sdev)
1885{
1886 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1887 if (err)
1888 return err;
1889
1890 scsi_run_queue(sdev->request_queue);
1891 while (sdev->device_busy) {
1892 msleep_interruptible(200);
1893 scsi_run_queue(sdev->request_queue);
1894 }
1895 return 0;
1896}
1897EXPORT_SYMBOL(scsi_device_quiesce);
1898
1899/**
1900 * scsi_device_resume - Restart user issued commands to a quiesced device.
1901 * @sdev: scsi device to resume.
1902 *
1903 * Moves the device from quiesced back to running and restarts the
1904 * queues.
1905 *
1906 * Must be called with user context, may sleep.
1907 **/
1908void
1909scsi_device_resume(struct scsi_device *sdev)
1910{
1911 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1912 return;
1913 scsi_run_queue(sdev->request_queue);
1914}
1915EXPORT_SYMBOL(scsi_device_resume);
1916
1917static void
1918device_quiesce_fn(struct scsi_device *sdev, void *data)
1919{
1920 scsi_device_quiesce(sdev);
1921}
1922
1923void
1924scsi_target_quiesce(struct scsi_target *starget)
1925{
1926 starget_for_each_device(starget, NULL, device_quiesce_fn);
1927}
1928EXPORT_SYMBOL(scsi_target_quiesce);
1929
1930static void
1931device_resume_fn(struct scsi_device *sdev, void *data)
1932{
1933 scsi_device_resume(sdev);
1934}
1935
1936void
1937scsi_target_resume(struct scsi_target *starget)
1938{
1939 starget_for_each_device(starget, NULL, device_resume_fn);
1940}
1941EXPORT_SYMBOL(scsi_target_resume);
1942
1943/**
1944 * scsi_internal_device_block - internal function to put a device
1945 * temporarily into the SDEV_BLOCK state
1946 * @sdev: device to block
1947 *
1948 * Block request made by scsi lld's to temporarily stop all
1949 * scsi commands on the specified device. Called from interrupt
1950 * or normal process context.
1951 *
1952 * Returns zero if successful or error if not
1953 *
1954 * Notes:
1955 * This routine transitions the device to the SDEV_BLOCK state
1956 * (which must be a legal transition). When the device is in this
1957 * state, all commands are deferred until the scsi lld reenables
1958 * the device with scsi_device_unblock or device_block_tmo fires.
1959 * This routine assumes the host_lock is held on entry.
1960 **/
1961int
1962scsi_internal_device_block(struct scsi_device *sdev)
1963{
1964 request_queue_t *q = sdev->request_queue;
1965 unsigned long flags;
1966 int err = 0;
1967
1968 err = scsi_device_set_state(sdev, SDEV_BLOCK);
1969 if (err)
1970 return err;
1971
1972 /*
1973 * The device has transitioned to SDEV_BLOCK. Stop the
1974 * block layer from calling the midlayer with this device's
1975 * request queue.
1976 */
1977 spin_lock_irqsave(q->queue_lock, flags);
1978 blk_stop_queue(q);
1979 spin_unlock_irqrestore(q->queue_lock, flags);
1980
1981 return 0;
1982}
1983EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1984
1985/**
1986 * scsi_internal_device_unblock - resume a device after a block request
1987 * @sdev: device to resume
1988 *
1989 * Called by scsi lld's or the midlayer to restart the device queue
1990 * for the previously suspended scsi device. Called from interrupt or
1991 * normal process context.
1992 *
1993 * Returns zero if successful or error if not.
1994 *
1995 * Notes:
1996 * This routine transitions the device to the SDEV_RUNNING state
1997 * (which must be a legal transition) allowing the midlayer to
1998 * goose the queue for this device. This routine assumes the
1999 * host_lock is held upon entry.
2000 **/
2001int
2002scsi_internal_device_unblock(struct scsi_device *sdev)
2003{
2004 request_queue_t *q = sdev->request_queue;
2005 int err;
2006 unsigned long flags;
2007
2008 /*
2009 * Try to transition the scsi device to SDEV_RUNNING
2010 * and goose the device queue if successful.
2011 */
2012 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2013 if (err)
2014 return err;
2015
2016 spin_lock_irqsave(q->queue_lock, flags);
2017 blk_start_queue(q);
2018 spin_unlock_irqrestore(q->queue_lock, flags);
2019
2020 return 0;
2021}
2022EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2023
2024static void
2025device_block(struct scsi_device *sdev, void *data)
2026{
2027 scsi_internal_device_block(sdev);
2028}
2029
2030static int
2031target_block(struct device *dev, void *data)
2032{
2033 if (scsi_is_target_device(dev))
2034 starget_for_each_device(to_scsi_target(dev), NULL,
2035 device_block);
2036 return 0;
2037}
2038
2039void
2040scsi_target_block(struct device *dev)
2041{
2042 if (scsi_is_target_device(dev))
2043 starget_for_each_device(to_scsi_target(dev), NULL,
2044 device_block);
2045 else
2046 device_for_each_child(dev, NULL, target_block);
2047}
2048EXPORT_SYMBOL_GPL(scsi_target_block);
2049
2050static void
2051device_unblock(struct scsi_device *sdev, void *data)
2052{
2053 scsi_internal_device_unblock(sdev);
2054}
2055
2056static int
2057target_unblock(struct device *dev, void *data)
2058{
2059 if (scsi_is_target_device(dev))
2060 starget_for_each_device(to_scsi_target(dev), NULL,
2061 device_unblock);
2062 return 0;
2063}
2064
2065void
2066scsi_target_unblock(struct device *dev)
2067{
2068 if (scsi_is_target_device(dev))
2069 starget_for_each_device(to_scsi_target(dev), NULL,
2070 device_unblock);
2071 else
2072 device_for_each_child(dev, NULL, target_unblock);
2073}
2074EXPORT_SYMBOL_GPL(scsi_target_unblock);