blob: 60f07b6a5ffc62ba7341448813175bb49c3a97bd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19
20#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_driver.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_host.h>
26#include <scsi/scsi_request.h>
27
28#include "scsi_priv.h"
29#include "scsi_logging.h"
30
31
32#define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33#define SG_MEMPOOL_SIZE 32
34
35struct scsi_host_sg_pool {
36 size_t size;
37 char *name;
38 kmem_cache_t *slab;
39 mempool_t *pool;
40};
41
42#if (SCSI_MAX_PHYS_SEGMENTS < 32)
43#error SCSI_MAX_PHYS_SEGMENTS is too small
44#endif
45
46#define SP(x) { x, "sgpool-" #x }
Adrian Bunk52c1da32005-06-23 22:05:33 -070047static struct scsi_host_sg_pool scsi_sg_pools[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 SP(8),
49 SP(16),
50 SP(32),
51#if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 SP(64),
53#if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 SP(128),
55#if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 SP(256),
57#if (SCSI_MAX_PHYS_SEGMENTS > 256)
58#error SCSI_MAX_PHYS_SEGMENTS is too large
59#endif
60#endif
61#endif
62#endif
63};
64#undef SP
65
66
67/*
68 * Function: scsi_insert_special_req()
69 *
70 * Purpose: Insert pre-formed request into request queue.
71 *
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
75 *
76 * Lock status: Assumed that lock is not held upon entry.
77 *
78 * Returns: Nothing
79 *
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
85 * process it.
86 */
87int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88{
89 /*
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
92 */
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
Tejun Heo 867d1192005-04-24 02:06:05 -050095 at_head, sreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 return 0;
97}
98
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -050099static void scsi_run_queue(struct request_queue *q);
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/*
102 * Function: scsi_queue_insert()
103 *
104 * Purpose: Insert a command in the midlevel queue.
105 *
106 * Arguments: cmd - command that we are adding to queue.
107 * reason - why we are inserting command to queue.
108 *
109 * Lock status: Assumed that lock is not held upon entry.
110 *
111 * Returns: Nothing.
112 *
113 * Notes: We do this for one of two cases. Either the host is busy
114 * and it cannot accept any more commands for the time being,
115 * or the device returned QUEUE_FULL and can accept no more
116 * commands.
117 * Notes: This could be called either from an interrupt context or a
118 * normal process context.
119 */
120int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121{
122 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device;
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500124 struct request_queue *q = device->request_queue;
125 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 SCSI_LOG_MLQUEUE(1,
128 printk("Inserting command %p into mlqueue\n", cmd));
129
130 /*
Tejun Heo d8c37e72005-05-14 00:46:08 +0900131 * Set the appropriate busy bit for the device/host.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 *
133 * If the host/device isn't busy, assume that something actually
134 * completed, and that we should be able to queue a command now.
135 *
136 * Note that the prior mid-layer assumption that any host could
137 * always queue at least one command is now broken. The mid-layer
138 * will implement a user specifiable stall (see
139 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140 * if a command is requeued with no other commands outstanding
141 * either for the device or for the host.
142 */
143 if (reason == SCSI_MLQUEUE_HOST_BUSY)
144 host->host_blocked = host->max_host_blocked;
145 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146 device->device_blocked = device->max_device_blocked;
147
148 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * Decrement the counters, since these commands are no longer
150 * active on the host/device.
151 */
152 scsi_device_unbusy(device);
153
154 /*
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500155 * Requeue this command. It will go before all other commands
156 * that are already in the queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 *
158 * NOTE: there is magic here about the way the queue is plugged if
159 * we have no outstanding commands.
160 *
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500161 * Although we *don't* plug the queue, we call the request
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * function. The SCSI request function detects the blocked condition
163 * and plugs the queue appropriately.
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500164 */
165 spin_lock_irqsave(q->queue_lock, flags);
166 blk_requeue_request(q, cmd->request);
167 spin_unlock_irqrestore(q->queue_lock, flags);
168
169 scsi_run_queue(q);
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return 0;
172}
173
174/*
175 * Function: scsi_do_req
176 *
177 * Purpose: Queue a SCSI request
178 *
179 * Arguments: sreq - command descriptor.
180 * cmnd - actual SCSI command to be performed.
181 * buffer - data buffer.
182 * bufflen - size of data buffer.
183 * done - completion function to be run.
184 * timeout - how long to let it run before timeout.
185 * retries - number of retries we allow.
186 *
187 * Lock status: No locks held upon entry.
188 *
189 * Returns: Nothing.
190 *
191 * Notes: This function is only used for queueing requests for things
192 * like ioctls and character device requests - this is because
193 * we essentially just inject a request into the queue for the
194 * device.
195 *
196 * In order to support the scsi_device_quiesce function, we
197 * now inject requests on the *head* of the device queue
198 * rather than the tail.
199 */
200void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201 void *buffer, unsigned bufflen,
202 void (*done)(struct scsi_cmnd *),
203 int timeout, int retries)
204{
205 /*
206 * If the upper level driver is reusing these things, then
207 * we should release the low-level block now. Another one will
208 * be allocated later when this request is getting queued.
209 */
210 __scsi_release_request(sreq);
211
212 /*
213 * Our own function scsi_done (which marks the host as not busy,
214 * disables the timeout counter, etc) will be called by us or by the
215 * scsi_hosts[host].queuecommand() function needs to also call
216 * the completion function for the high level driver.
217 */
218 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219 sreq->sr_bufflen = bufflen;
220 sreq->sr_buffer = buffer;
221 sreq->sr_allowed = retries;
222 sreq->sr_done = done;
223 sreq->sr_timeout_per_command = timeout;
224
225 if (sreq->sr_cmd_len == 0)
226 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
227
228 /*
229 * head injection *required* here otherwise quiesce won't work
230 */
231 scsi_insert_special_req(sreq, 1);
232}
233EXPORT_SYMBOL(scsi_do_req);
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235/* This is the end routine we get to if a command was never attached
236 * to the request. Simply complete the request without changing
237 * rq_status; this will cause a DRIVER_ERROR. */
238static void scsi_wait_req_end_io(struct request *req)
239{
240 BUG_ON(!req->waiting);
241
242 complete(req->waiting);
243}
244
245void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
246 unsigned bufflen, int timeout, int retries)
247{
248 DECLARE_COMPLETION(wait);
James Bottomley8e640112005-06-15 18:16:09 -0500249 int write = sreq->sr_data_direction == DMA_TO_DEVICE;
James Bottomleye537a362005-06-05 02:07:14 -0500250 struct request *req;
251
James Bottomley8e640112005-06-15 18:16:09 -0500252 req = blk_get_request(sreq->sr_device->request_queue, write,
253 __GFP_WAIT);
254 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
255 buffer, bufflen, __GFP_WAIT)) {
256 sreq->sr_result = DRIVER_ERROR << 24;
257 blk_put_request(req);
258 return;
259 }
260
James Bottomleye537a362005-06-05 02:07:14 -0500261 req->flags |= REQ_NOMERGE;
262 req->waiting = &wait;
263 req->end_io = scsi_wait_req_end_io;
264 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
265 req->sense = sreq->sr_sense_buffer;
266 req->sense_len = 0;
267 memcpy(req->cmd, cmnd, req->cmd_len);
268 req->timeout = timeout;
269 req->flags |= REQ_BLOCK_PC;
270 req->rq_disk = NULL;
271 blk_insert_request(sreq->sr_device->request_queue, req,
272 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 wait_for_completion(&wait);
274 sreq->sr_request->waiting = NULL;
James Bottomleye537a362005-06-05 02:07:14 -0500275 sreq->sr_result = req->errors;
276 if (req->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 sreq->sr_result |= (DRIVER_ERROR << 24);
278
James Bottomleye537a362005-06-05 02:07:14 -0500279 blk_put_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
James Bottomleye537a362005-06-05 02:07:14 -0500281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282EXPORT_SYMBOL(scsi_wait_req);
283
284/*
285 * Function: scsi_init_cmd_errh()
286 *
287 * Purpose: Initialize cmd fields related to error handling.
288 *
289 * Arguments: cmd - command that is ready to be queued.
290 *
291 * Returns: Nothing
292 *
293 * Notes: This function has the job of initializing a number of
294 * fields related to error handling. Typically this will
295 * be called once for each command, as required.
296 */
297static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
298{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 cmd->serial_number = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
302
303 if (cmd->cmd_len == 0)
304 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
305
306 /*
307 * We need saved copies of a number of fields - this is because
308 * error handling may need to overwrite these with different values
309 * to run different commands, and once error handling is complete,
310 * we will need to restore these values prior to running the actual
311 * command.
312 */
313 cmd->old_use_sg = cmd->use_sg;
314 cmd->old_cmd_len = cmd->cmd_len;
315 cmd->sc_old_data_direction = cmd->sc_data_direction;
316 cmd->old_underflow = cmd->underflow;
317 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
318 cmd->buffer = cmd->request_buffer;
319 cmd->bufflen = cmd->request_bufflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321 return 1;
322}
323
324/*
325 * Function: scsi_setup_cmd_retry()
326 *
327 * Purpose: Restore the command state for a retry
328 *
329 * Arguments: cmd - command to be restored
330 *
331 * Returns: Nothing
332 *
333 * Notes: Immediately prior to retrying a command, we need
334 * to restore certain fields that we saved above.
335 */
336void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
337{
338 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
339 cmd->request_buffer = cmd->buffer;
340 cmd->request_bufflen = cmd->bufflen;
341 cmd->use_sg = cmd->old_use_sg;
342 cmd->cmd_len = cmd->old_cmd_len;
343 cmd->sc_data_direction = cmd->sc_old_data_direction;
344 cmd->underflow = cmd->old_underflow;
345}
346
347void scsi_device_unbusy(struct scsi_device *sdev)
348{
349 struct Scsi_Host *shost = sdev->host;
350 unsigned long flags;
351
352 spin_lock_irqsave(shost->host_lock, flags);
353 shost->host_busy--;
354 if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
355 shost->host_failed))
356 scsi_eh_wakeup(shost);
357 spin_unlock(shost->host_lock);
152587d2005-04-12 16:22:06 -0500358 spin_lock(sdev->request_queue->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 sdev->device_busy--;
152587d2005-04-12 16:22:06 -0500360 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361}
362
363/*
364 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
365 * and call blk_run_queue for all the scsi_devices on the target -
366 * including current_sdev first.
367 *
368 * Called with *no* scsi locks held.
369 */
370static void scsi_single_lun_run(struct scsi_device *current_sdev)
371{
372 struct Scsi_Host *shost = current_sdev->host;
373 struct scsi_device *sdev, *tmp;
374 struct scsi_target *starget = scsi_target(current_sdev);
375 unsigned long flags;
376
377 spin_lock_irqsave(shost->host_lock, flags);
378 starget->starget_sdev_user = NULL;
379 spin_unlock_irqrestore(shost->host_lock, flags);
380
381 /*
382 * Call blk_run_queue for all LUNs on the target, starting with
383 * current_sdev. We race with others (to set starget_sdev_user),
384 * but in most cases, we will be first. Ideally, each LU on the
385 * target would get some limited time or requests on the target.
386 */
387 blk_run_queue(current_sdev->request_queue);
388
389 spin_lock_irqsave(shost->host_lock, flags);
390 if (starget->starget_sdev_user)
391 goto out;
392 list_for_each_entry_safe(sdev, tmp, &starget->devices,
393 same_target_siblings) {
394 if (sdev == current_sdev)
395 continue;
396 if (scsi_device_get(sdev))
397 continue;
398
399 spin_unlock_irqrestore(shost->host_lock, flags);
400 blk_run_queue(sdev->request_queue);
401 spin_lock_irqsave(shost->host_lock, flags);
402
403 scsi_device_put(sdev);
404 }
405 out:
406 spin_unlock_irqrestore(shost->host_lock, flags);
407}
408
409/*
410 * Function: scsi_run_queue()
411 *
412 * Purpose: Select a proper request queue to serve next
413 *
414 * Arguments: q - last request's queue
415 *
416 * Returns: Nothing
417 *
418 * Notes: The previous command was completely finished, start
419 * a new one if possible.
420 */
421static void scsi_run_queue(struct request_queue *q)
422{
423 struct scsi_device *sdev = q->queuedata;
424 struct Scsi_Host *shost = sdev->host;
425 unsigned long flags;
426
427 if (sdev->single_lun)
428 scsi_single_lun_run(sdev);
429
430 spin_lock_irqsave(shost->host_lock, flags);
431 while (!list_empty(&shost->starved_list) &&
432 !shost->host_blocked && !shost->host_self_blocked &&
433 !((shost->can_queue > 0) &&
434 (shost->host_busy >= shost->can_queue))) {
435 /*
436 * As long as shost is accepting commands and we have
437 * starved queues, call blk_run_queue. scsi_request_fn
438 * drops the queue_lock and can add us back to the
439 * starved_list.
440 *
441 * host_lock protects the starved_list and starved_entry.
442 * scsi_request_fn must get the host_lock before checking
443 * or modifying starved_list or starved_entry.
444 */
445 sdev = list_entry(shost->starved_list.next,
446 struct scsi_device, starved_entry);
447 list_del_init(&sdev->starved_entry);
448 spin_unlock_irqrestore(shost->host_lock, flags);
449
450 blk_run_queue(sdev->request_queue);
451
452 spin_lock_irqsave(shost->host_lock, flags);
453 if (unlikely(!list_empty(&sdev->starved_entry)))
454 /*
455 * sdev lost a race, and was put back on the
456 * starved list. This is unlikely but without this
457 * in theory we could loop forever.
458 */
459 break;
460 }
461 spin_unlock_irqrestore(shost->host_lock, flags);
462
463 blk_run_queue(q);
464}
465
466/*
467 * Function: scsi_requeue_command()
468 *
469 * Purpose: Handle post-processing of completed commands.
470 *
471 * Arguments: q - queue to operate on
472 * cmd - command that may need to be requeued.
473 *
474 * Returns: Nothing
475 *
476 * Notes: After command completion, there may be blocks left
477 * over which weren't finished by the previous command
478 * this can be for a number of reasons - the main one is
479 * I/O errors in the middle of the request, in which case
480 * we need to request the blocks that come after the bad
481 * sector.
482 */
483static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
484{
Tejun Heo 283369c2005-04-24 02:06:36 -0500485 unsigned long flags;
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 cmd->request->flags &= ~REQ_DONTPREP;
Tejun Heo 283369c2005-04-24 02:06:36 -0500488
489 spin_lock_irqsave(q->queue_lock, flags);
490 blk_requeue_request(q, cmd->request);
491 spin_unlock_irqrestore(q->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 scsi_run_queue(q);
494}
495
496void scsi_next_command(struct scsi_cmnd *cmd)
497{
498 struct request_queue *q = cmd->device->request_queue;
499
500 scsi_put_command(cmd);
501 scsi_run_queue(q);
502}
503
504void scsi_run_host_queues(struct Scsi_Host *shost)
505{
506 struct scsi_device *sdev;
507
508 shost_for_each_device(sdev, shost)
509 scsi_run_queue(sdev->request_queue);
510}
511
512/*
513 * Function: scsi_end_request()
514 *
515 * Purpose: Post-processing of completed commands (usually invoked at end
516 * of upper level post-processing and scsi_io_completion).
517 *
518 * Arguments: cmd - command that is complete.
519 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
520 * bytes - number of bytes of completed I/O
521 * requeue - indicates whether we should requeue leftovers.
522 *
523 * Lock status: Assumed that lock is not held upon entry.
524 *
525 * Returns: cmd if requeue done or required, NULL otherwise
526 *
527 * Notes: This is called for block device requests in order to
528 * mark some number of sectors as complete.
529 *
530 * We are guaranteeing that the request queue will be goosed
531 * at some point during this call.
532 */
533static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
534 int bytes, int requeue)
535{
536 request_queue_t *q = cmd->device->request_queue;
537 struct request *req = cmd->request;
538 unsigned long flags;
539
540 /*
541 * If there are blocks left over at the end, set up the command
542 * to queue the remainder of them.
543 */
544 if (end_that_request_chunk(req, uptodate, bytes)) {
545 int leftover = (req->hard_nr_sectors << 9);
546
547 if (blk_pc_request(req))
548 leftover = req->data_len;
549
550 /* kill remainder if no retrys */
551 if (!uptodate && blk_noretry_request(req))
552 end_that_request_chunk(req, 0, leftover);
553 else {
554 if (requeue)
555 /*
556 * Bleah. Leftovers again. Stick the
557 * leftovers in the front of the
558 * queue, and goose the queue again.
559 */
560 scsi_requeue_command(q, cmd);
561
562 return cmd;
563 }
564 }
565
566 add_disk_randomness(req->rq_disk);
567
568 spin_lock_irqsave(q->queue_lock, flags);
569 if (blk_rq_tagged(req))
570 blk_queue_end_tag(q, req);
571 end_that_request_last(req);
572 spin_unlock_irqrestore(q->queue_lock, flags);
573
574 /*
575 * This will goose the queue request function at the end, so we don't
576 * need to worry about launching another command.
577 */
578 scsi_next_command(cmd);
579 return NULL;
580}
581
582static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
583{
584 struct scsi_host_sg_pool *sgp;
585 struct scatterlist *sgl;
586
587 BUG_ON(!cmd->use_sg);
588
589 switch (cmd->use_sg) {
590 case 1 ... 8:
591 cmd->sglist_len = 0;
592 break;
593 case 9 ... 16:
594 cmd->sglist_len = 1;
595 break;
596 case 17 ... 32:
597 cmd->sglist_len = 2;
598 break;
599#if (SCSI_MAX_PHYS_SEGMENTS > 32)
600 case 33 ... 64:
601 cmd->sglist_len = 3;
602 break;
603#if (SCSI_MAX_PHYS_SEGMENTS > 64)
604 case 65 ... 128:
605 cmd->sglist_len = 4;
606 break;
607#if (SCSI_MAX_PHYS_SEGMENTS > 128)
608 case 129 ... 256:
609 cmd->sglist_len = 5;
610 break;
611#endif
612#endif
613#endif
614 default:
615 return NULL;
616 }
617
618 sgp = scsi_sg_pools + cmd->sglist_len;
619 sgl = mempool_alloc(sgp->pool, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 return sgl;
621}
622
623static void scsi_free_sgtable(struct scatterlist *sgl, int index)
624{
625 struct scsi_host_sg_pool *sgp;
626
KAMBAROV, ZAURa77e3362005-06-28 20:45:06 -0700627 BUG_ON(index >= SG_MEMPOOL_NR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
629 sgp = scsi_sg_pools + index;
630 mempool_free(sgl, sgp->pool);
631}
632
633/*
634 * Function: scsi_release_buffers()
635 *
636 * Purpose: Completion processing for block device I/O requests.
637 *
638 * Arguments: cmd - command that we are bailing.
639 *
640 * Lock status: Assumed that no lock is held upon entry.
641 *
642 * Returns: Nothing
643 *
644 * Notes: In the event that an upper level driver rejects a
645 * command, we must release resources allocated during
646 * the __init_io() function. Primarily this would involve
647 * the scatter-gather table, and potentially any bounce
648 * buffers.
649 */
650static void scsi_release_buffers(struct scsi_cmnd *cmd)
651{
652 struct request *req = cmd->request;
653
654 /*
655 * Free up any indirection buffers we allocated for DMA purposes.
656 */
657 if (cmd->use_sg)
658 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
659 else if (cmd->request_buffer != req->buffer)
660 kfree(cmd->request_buffer);
661
662 /*
663 * Zero these out. They now point to freed memory, and it is
664 * dangerous to hang onto the pointers.
665 */
666 cmd->buffer = NULL;
667 cmd->bufflen = 0;
668 cmd->request_buffer = NULL;
669 cmd->request_bufflen = 0;
670}
671
672/*
673 * Function: scsi_io_completion()
674 *
675 * Purpose: Completion processing for block device I/O requests.
676 *
677 * Arguments: cmd - command that is finished.
678 *
679 * Lock status: Assumed that no lock is held upon entry.
680 *
681 * Returns: Nothing
682 *
683 * Notes: This function is matched in terms of capabilities to
684 * the function that created the scatter-gather list.
685 * In other words, if there are no bounce buffers
686 * (the normal case for most drivers), we don't need
687 * the logic to deal with cleaning up afterwards.
688 *
689 * We must do one of several things here:
690 *
691 * a) Call scsi_end_request. This will finish off the
692 * specified number of sectors. If we are done, the
693 * command block will be released, and the queue
694 * function will be goosed. If we are not done, then
695 * scsi_end_request will directly goose the queue.
696 *
697 * b) We can just use scsi_requeue_command() here. This would
698 * be used if we just wanted to retry, for example.
699 */
700void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
701 unsigned int block_bytes)
702{
703 int result = cmd->result;
704 int this_count = cmd->bufflen;
705 request_queue_t *q = cmd->device->request_queue;
706 struct request *req = cmd->request;
707 int clear_errors = 1;
708 struct scsi_sense_hdr sshdr;
709 int sense_valid = 0;
710 int sense_deferred = 0;
711
712 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
713 return;
714
715 /*
716 * Free up any indirection buffers we allocated for DMA purposes.
717 * For the case of a READ, we need to copy the data out of the
718 * bounce buffer and into the real buffer.
719 */
720 if (cmd->use_sg)
721 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
722 else if (cmd->buffer != req->buffer) {
723 if (rq_data_dir(req) == READ) {
724 unsigned long flags;
725 char *to = bio_kmap_irq(req->bio, &flags);
726 memcpy(to, cmd->buffer, cmd->bufflen);
727 bio_kunmap_irq(to, &flags);
728 }
729 kfree(cmd->buffer);
730 }
731
732 if (result) {
733 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
734 if (sense_valid)
735 sense_deferred = scsi_sense_is_deferred(&sshdr);
736 }
737 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
738 req->errors = result;
739 if (result) {
740 clear_errors = 0;
741 if (sense_valid && req->sense) {
742 /*
743 * SG_IO wants current and deferred errors
744 */
745 int len = 8 + cmd->sense_buffer[7];
746
747 if (len > SCSI_SENSE_BUFFERSIZE)
748 len = SCSI_SENSE_BUFFERSIZE;
749 memcpy(req->sense, cmd->sense_buffer, len);
750 req->sense_len = len;
751 }
752 } else
753 req->data_len = cmd->resid;
754 }
755
756 /*
757 * Zero these out. They now point to freed memory, and it is
758 * dangerous to hang onto the pointers.
759 */
760 cmd->buffer = NULL;
761 cmd->bufflen = 0;
762 cmd->request_buffer = NULL;
763 cmd->request_bufflen = 0;
764
765 /*
766 * Next deal with any sectors which we were able to correctly
767 * handle.
768 */
769 if (good_bytes >= 0) {
770 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
771 req->nr_sectors, good_bytes));
772 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
773
774 if (clear_errors)
775 req->errors = 0;
776 /*
777 * If multiple sectors are requested in one buffer, then
778 * they will have been finished off by the first command.
779 * If not, then we have a multi-buffer command.
780 *
781 * If block_bytes != 0, it means we had a medium error
782 * of some sort, and that we want to mark some number of
783 * sectors as not uptodate. Thus we want to inhibit
784 * requeueing right here - we will requeue down below
785 * when we handle the bad sectors.
786 */
787 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
788
789 /*
790 * If the command completed without error, then either finish off the
791 * rest of the command, or start a new one.
792 */
793 if (result == 0 || cmd == NULL ) {
794 return;
795 }
796 }
797 /*
798 * Now, if we were good little boys and girls, Santa left us a request
799 * sense buffer. We can extract information from this, so we
800 * can choose a block to remap, etc.
801 */
802 if (sense_valid && !sense_deferred) {
803 switch (sshdr.sense_key) {
804 case UNIT_ATTENTION:
805 if (cmd->device->removable) {
806 /* detected disc change. set a bit
807 * and quietly refuse further access.
808 */
809 cmd->device->changed = 1;
810 cmd = scsi_end_request(cmd, 0,
811 this_count, 1);
812 return;
813 } else {
814 /*
815 * Must have been a power glitch, or a
816 * bus reset. Could not have been a
817 * media change, so we just retry the
818 * request and see what happens.
819 */
820 scsi_requeue_command(q, cmd);
821 return;
822 }
823 break;
824 case ILLEGAL_REQUEST:
825 /*
826 * If we had an ILLEGAL REQUEST returned, then we may
827 * have performed an unsupported command. The only
828 * thing this should be would be a ten byte read where
829 * only a six byte read was supported. Also, on a
830 * system where READ CAPACITY failed, we may have read
831 * past the end of the disk.
832 */
833 if (cmd->device->use_10_for_rw &&
834 (cmd->cmnd[0] == READ_10 ||
835 cmd->cmnd[0] == WRITE_10)) {
836 cmd->device->use_10_for_rw = 0;
837 /*
838 * This will cause a retry with a 6-byte
839 * command.
840 */
841 scsi_requeue_command(q, cmd);
842 result = 0;
843 } else {
844 cmd = scsi_end_request(cmd, 0, this_count, 1);
845 return;
846 }
847 break;
848 case NOT_READY:
849 /*
850 * If the device is in the process of becoming ready,
851 * retry.
852 */
853 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
854 scsi_requeue_command(q, cmd);
855 return;
856 }
857 printk(KERN_INFO "Device %s not ready.\n",
858 req->rq_disk ? req->rq_disk->disk_name : "");
859 cmd = scsi_end_request(cmd, 0, this_count, 1);
860 return;
861 case VOLUME_OVERFLOW:
862 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
863 cmd->device->host->host_no,
864 (int)cmd->device->channel,
865 (int)cmd->device->id, (int)cmd->device->lun);
866 __scsi_print_command(cmd->data_cmnd);
867 scsi_print_sense("", cmd);
868 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
869 return;
870 default:
871 break;
872 }
873 } /* driver byte != 0 */
874 if (host_byte(result) == DID_RESET) {
875 /*
876 * Third party bus reset or reset for error
877 * recovery reasons. Just retry the request
878 * and see what happens.
879 */
880 scsi_requeue_command(q, cmd);
881 return;
882 }
883 if (result) {
James Bottomleye537a362005-06-05 02:07:14 -0500884 if (!(req->flags & REQ_SPECIAL))
885 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
886 "= 0x%x\n", cmd->device->host->host_no,
887 cmd->device->channel,
888 cmd->device->id,
889 cmd->device->lun, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891 if (driver_byte(result) & DRIVER_SENSE)
892 scsi_print_sense("", cmd);
893 /*
894 * Mark a single buffer as not uptodate. Queue the remainder.
895 * We sometimes get this cruft in the event that a medium error
896 * isn't properly reported.
897 */
898 block_bytes = req->hard_cur_sectors << 9;
899 if (!block_bytes)
900 block_bytes = req->data_len;
901 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
902 }
903}
904EXPORT_SYMBOL(scsi_io_completion);
905
906/*
907 * Function: scsi_init_io()
908 *
909 * Purpose: SCSI I/O initialize function.
910 *
911 * Arguments: cmd - Command descriptor we wish to initialize
912 *
913 * Returns: 0 on success
914 * BLKPREP_DEFER if the failure is retryable
915 * BLKPREP_KILL if the failure is fatal
916 */
917static int scsi_init_io(struct scsi_cmnd *cmd)
918{
919 struct request *req = cmd->request;
920 struct scatterlist *sgpnt;
921 int count;
922
923 /*
924 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
925 */
926 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
927 cmd->request_bufflen = req->data_len;
928 cmd->request_buffer = req->data;
929 req->buffer = req->data;
930 cmd->use_sg = 0;
931 return 0;
932 }
933
934 /*
935 * we used to not use scatter-gather for single segment request,
936 * but now we do (it makes highmem I/O easier to support without
937 * kmapping pages)
938 */
939 cmd->use_sg = req->nr_phys_segments;
940
941 /*
942 * if sg table allocation fails, requeue request later.
943 */
944 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
Tejun Heo beb66172005-04-24 02:04:53 -0500945 if (unlikely(!sgpnt))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 return BLKPREP_DEFER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
948 cmd->request_buffer = (char *) sgpnt;
949 cmd->request_bufflen = req->nr_sectors << 9;
950 if (blk_pc_request(req))
951 cmd->request_bufflen = req->data_len;
952 req->buffer = NULL;
953
954 /*
955 * Next, walk the list, and fill in the addresses and sizes of
956 * each segment.
957 */
958 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
959
960 /*
961 * mapped well, send it off
962 */
963 if (likely(count <= cmd->use_sg)) {
964 cmd->use_sg = count;
965 return 0;
966 }
967
968 printk(KERN_ERR "Incorrect number of segments after building list\n");
969 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
970 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
971 req->current_nr_sectors);
972
973 /* release the command and kill it */
974 scsi_release_buffers(cmd);
975 scsi_put_command(cmd);
976 return BLKPREP_KILL;
977}
978
979static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
980{
981 struct scsi_device *sdev = q->queuedata;
982 struct scsi_driver *drv;
983
984 if (sdev->sdev_state == SDEV_RUNNING) {
985 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
986
987 if (drv->prepare_flush)
988 return drv->prepare_flush(q, rq);
989 }
990
991 return 0;
992}
993
994static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
995{
996 struct scsi_device *sdev = q->queuedata;
997 struct request *flush_rq = rq->end_io_data;
998 struct scsi_driver *drv;
999
1000 if (flush_rq->errors) {
1001 printk("scsi: barrier error, disabling flush support\n");
1002 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1003 }
1004
1005 if (sdev->sdev_state == SDEV_RUNNING) {
1006 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1007 drv->end_flush(q, rq);
1008 }
1009}
1010
1011static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1012 sector_t *error_sector)
1013{
1014 struct scsi_device *sdev = q->queuedata;
1015 struct scsi_driver *drv;
1016
1017 if (sdev->sdev_state != SDEV_RUNNING)
1018 return -ENXIO;
1019
1020 drv = *(struct scsi_driver **) disk->private_data;
1021 if (drv->issue_flush)
1022 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1023
1024 return -EOPNOTSUPP;
1025}
1026
James Bottomleye537a362005-06-05 02:07:14 -05001027static void scsi_generic_done(struct scsi_cmnd *cmd)
1028{
1029 BUG_ON(!blk_pc_request(cmd->request));
1030 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1031}
1032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033static int scsi_prep_fn(struct request_queue *q, struct request *req)
1034{
1035 struct scsi_device *sdev = q->queuedata;
1036 struct scsi_cmnd *cmd;
1037 int specials_only = 0;
1038
1039 /*
1040 * Just check to see if the device is online. If it isn't, we
1041 * refuse to process any commands. The device must be brought
1042 * online before trying any recovery commands
1043 */
1044 if (unlikely(!scsi_device_online(sdev))) {
1045 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1046 sdev->host->host_no, sdev->id, sdev->lun);
1047 return BLKPREP_KILL;
1048 }
1049 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1050 /* OK, we're not in a running state don't prep
1051 * user commands */
1052 if (sdev->sdev_state == SDEV_DEL) {
1053 /* Device is fully deleted, no commands
1054 * at all allowed down */
1055 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1056 sdev->host->host_no, sdev->id, sdev->lun);
1057 return BLKPREP_KILL;
1058 }
1059 /* OK, we only allow special commands (i.e. not
1060 * user initiated ones */
1061 specials_only = sdev->sdev_state;
1062 }
1063
1064 /*
1065 * Find the actual device driver associated with this command.
1066 * The SPECIAL requests are things like character device or
1067 * ioctls, which did not originate from ll_rw_blk. Note that
1068 * the special field is also used to indicate the cmd for
1069 * the remainder of a partially fulfilled request that can
1070 * come up when there is a medium error. We have to treat
1071 * these two cases differently. We differentiate by looking
1072 * at request->cmd, as this tells us the real story.
1073 */
James Bottomleye537a362005-06-05 02:07:14 -05001074 if (req->flags & REQ_SPECIAL && req->special) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 struct scsi_request *sreq = req->special;
1076
1077 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1078 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1079 if (unlikely(!cmd))
1080 goto defer;
1081 scsi_init_cmd_from_req(cmd, sreq);
1082 } else
1083 cmd = req->special;
1084 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1085
James Bottomleye537a362005-06-05 02:07:14 -05001086 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 if(specials_only == SDEV_QUIESCE ||
1088 specials_only == SDEV_BLOCK)
1089 return BLKPREP_DEFER;
1090
1091 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1092 sdev->host->host_no, sdev->id, sdev->lun);
1093 return BLKPREP_KILL;
1094 }
1095
1096
1097 /*
1098 * Now try and find a command block that we can use.
1099 */
1100 if (!req->special) {
1101 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1102 if (unlikely(!cmd))
1103 goto defer;
1104 } else
1105 cmd = req->special;
1106
1107 /* pull a tag out of the request if we have one */
1108 cmd->tag = req->tag;
1109 } else {
1110 blk_dump_rq_flags(req, "SCSI bad req");
1111 return BLKPREP_KILL;
1112 }
1113
1114 /* note the overloading of req->special. When the tag
1115 * is active it always means cmd. If the tag goes
1116 * back for re-queueing, it may be reset */
1117 req->special = cmd;
1118 cmd->request = req;
1119
1120 /*
1121 * FIXME: drop the lock here because the functions below
1122 * expect to be called without the queue lock held. Also,
1123 * previously, we dequeued the request before dropping the
1124 * lock. We hope REQ_STARTED prevents anything untoward from
1125 * happening now.
1126 */
1127 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1128 struct scsi_driver *drv;
1129 int ret;
1130
1131 /*
1132 * This will do a couple of things:
1133 * 1) Fill in the actual SCSI command.
1134 * 2) Fill in any other upper-level specific fields
1135 * (timeout).
1136 *
1137 * If this returns 0, it means that the request failed
1138 * (reading past end of disk, reading offline device,
1139 * etc). This won't actually talk to the device, but
1140 * some kinds of consistency checking may cause the
1141 * request to be rejected immediately.
1142 */
1143
1144 /*
1145 * This sets up the scatter-gather table (allocating if
1146 * required).
1147 */
1148 ret = scsi_init_io(cmd);
1149 if (ret) /* BLKPREP_KILL return also releases the command */
1150 return ret;
1151
1152 /*
1153 * Initialize the actual SCSI command for this request.
1154 */
James Bottomleye537a362005-06-05 02:07:14 -05001155 if (req->rq_disk) {
1156 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1157 if (unlikely(!drv->init_command(cmd))) {
1158 scsi_release_buffers(cmd);
1159 scsi_put_command(cmd);
1160 return BLKPREP_KILL;
1161 }
1162 } else {
1163 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1164 if (rq_data_dir(req) == WRITE)
1165 cmd->sc_data_direction = DMA_TO_DEVICE;
1166 else if (req->data_len)
1167 cmd->sc_data_direction = DMA_FROM_DEVICE;
1168 else
1169 cmd->sc_data_direction = DMA_NONE;
1170
1171 cmd->transfersize = req->data_len;
1172 cmd->allowed = 3;
1173 cmd->timeout_per_command = req->timeout;
1174 cmd->done = scsi_generic_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 }
1176 }
1177
1178 /*
1179 * The request is now prepped, no need to come back here
1180 */
1181 req->flags |= REQ_DONTPREP;
1182 return BLKPREP_OK;
1183
1184 defer:
1185 /* If we defer, the elv_next_request() returns NULL, but the
1186 * queue must be restarted, so we plug here if no returning
1187 * command will automatically do that. */
1188 if (sdev->device_busy == 0)
1189 blk_plug_device(q);
1190 return BLKPREP_DEFER;
1191}
1192
1193/*
1194 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1195 * return 0.
1196 *
1197 * Called with the queue_lock held.
1198 */
1199static inline int scsi_dev_queue_ready(struct request_queue *q,
1200 struct scsi_device *sdev)
1201{
1202 if (sdev->device_busy >= sdev->queue_depth)
1203 return 0;
1204 if (sdev->device_busy == 0 && sdev->device_blocked) {
1205 /*
1206 * unblock after device_blocked iterates to zero
1207 */
1208 if (--sdev->device_blocked == 0) {
1209 SCSI_LOG_MLQUEUE(3,
1210 printk("scsi%d (%d:%d) unblocking device at"
1211 " zero depth\n", sdev->host->host_no,
1212 sdev->id, sdev->lun));
1213 } else {
1214 blk_plug_device(q);
1215 return 0;
1216 }
1217 }
1218 if (sdev->device_blocked)
1219 return 0;
1220
1221 return 1;
1222}
1223
1224/*
1225 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1226 * return 0. We must end up running the queue again whenever 0 is
1227 * returned, else IO can hang.
1228 *
1229 * Called with host_lock held.
1230 */
1231static inline int scsi_host_queue_ready(struct request_queue *q,
1232 struct Scsi_Host *shost,
1233 struct scsi_device *sdev)
1234{
1235 if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1236 return 0;
1237 if (shost->host_busy == 0 && shost->host_blocked) {
1238 /*
1239 * unblock after host_blocked iterates to zero
1240 */
1241 if (--shost->host_blocked == 0) {
1242 SCSI_LOG_MLQUEUE(3,
1243 printk("scsi%d unblocking host at zero depth\n",
1244 shost->host_no));
1245 } else {
1246 blk_plug_device(q);
1247 return 0;
1248 }
1249 }
1250 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1251 shost->host_blocked || shost->host_self_blocked) {
1252 if (list_empty(&sdev->starved_entry))
1253 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1254 return 0;
1255 }
1256
1257 /* We're OK to process the command, so we can't be starved */
1258 if (!list_empty(&sdev->starved_entry))
1259 list_del_init(&sdev->starved_entry);
1260
1261 return 1;
1262}
1263
1264/*
1265 * Kill requests for a dead device
1266 */
1267static void scsi_kill_requests(request_queue_t *q)
1268{
1269 struct request *req;
1270
1271 while ((req = elv_next_request(q)) != NULL) {
1272 blkdev_dequeue_request(req);
1273 req->flags |= REQ_QUIET;
1274 while (end_that_request_first(req, 0, req->nr_sectors))
1275 ;
1276 end_that_request_last(req);
1277 }
1278}
1279
1280/*
1281 * Function: scsi_request_fn()
1282 *
1283 * Purpose: Main strategy routine for SCSI.
1284 *
1285 * Arguments: q - Pointer to actual queue.
1286 *
1287 * Returns: Nothing
1288 *
1289 * Lock status: IO request lock assumed to be held when called.
1290 */
1291static void scsi_request_fn(struct request_queue *q)
1292{
1293 struct scsi_device *sdev = q->queuedata;
1294 struct Scsi_Host *shost;
1295 struct scsi_cmnd *cmd;
1296 struct request *req;
1297
1298 if (!sdev) {
1299 printk("scsi: killing requests for dead queue\n");
1300 scsi_kill_requests(q);
1301 return;
1302 }
1303
1304 if(!get_device(&sdev->sdev_gendev))
1305 /* We must be tearing the block queue down already */
1306 return;
1307
1308 /*
1309 * To start with, we keep looping until the queue is empty, or until
1310 * the host is no longer able to accept any more requests.
1311 */
1312 shost = sdev->host;
1313 while (!blk_queue_plugged(q)) {
1314 int rtn;
1315 /*
1316 * get next queueable request. We do this early to make sure
1317 * that the request is fully prepared even if we cannot
1318 * accept it.
1319 */
1320 req = elv_next_request(q);
1321 if (!req || !scsi_dev_queue_ready(q, sdev))
1322 break;
1323
1324 if (unlikely(!scsi_device_online(sdev))) {
1325 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1326 sdev->host->host_no, sdev->id, sdev->lun);
1327 blkdev_dequeue_request(req);
1328 req->flags |= REQ_QUIET;
1329 while (end_that_request_first(req, 0, req->nr_sectors))
1330 ;
1331 end_that_request_last(req);
1332 continue;
1333 }
1334
1335
1336 /*
1337 * Remove the request from the request list.
1338 */
1339 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1340 blkdev_dequeue_request(req);
1341 sdev->device_busy++;
1342
1343 spin_unlock(q->queue_lock);
1344 spin_lock(shost->host_lock);
1345
1346 if (!scsi_host_queue_ready(q, shost, sdev))
1347 goto not_ready;
1348 if (sdev->single_lun) {
1349 if (scsi_target(sdev)->starget_sdev_user &&
1350 scsi_target(sdev)->starget_sdev_user != sdev)
1351 goto not_ready;
1352 scsi_target(sdev)->starget_sdev_user = sdev;
1353 }
1354 shost->host_busy++;
1355
1356 /*
1357 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1358 * take the lock again.
1359 */
1360 spin_unlock_irq(shost->host_lock);
1361
1362 cmd = req->special;
1363 if (unlikely(cmd == NULL)) {
1364 printk(KERN_CRIT "impossible request in %s.\n"
1365 "please mail a stack trace to "
1366 "linux-scsi@vger.kernel.org",
1367 __FUNCTION__);
1368 BUG();
1369 }
1370
1371 /*
1372 * Finally, initialize any error handling parameters, and set up
1373 * the timers for timeouts.
1374 */
1375 scsi_init_cmd_errh(cmd);
1376
1377 /*
1378 * Dispatch the command to the low-level driver.
1379 */
1380 rtn = scsi_dispatch_cmd(cmd);
1381 spin_lock_irq(q->queue_lock);
1382 if(rtn) {
1383 /* we're refusing the command; because of
1384 * the way locks get dropped, we need to
1385 * check here if plugging is required */
1386 if(sdev->device_busy == 0)
1387 blk_plug_device(q);
1388
1389 break;
1390 }
1391 }
1392
1393 goto out;
1394
1395 not_ready:
1396 spin_unlock_irq(shost->host_lock);
1397
1398 /*
1399 * lock q, handle tag, requeue req, and decrement device_busy. We
1400 * must return with queue_lock held.
1401 *
1402 * Decrementing device_busy without checking it is OK, as all such
1403 * cases (host limits or settings) should run the queue at some
1404 * later time.
1405 */
1406 spin_lock_irq(q->queue_lock);
1407 blk_requeue_request(q, req);
1408 sdev->device_busy--;
1409 if(sdev->device_busy == 0)
1410 blk_plug_device(q);
1411 out:
1412 /* must be careful here...if we trigger the ->remove() function
1413 * we cannot be holding the q lock */
1414 spin_unlock_irq(q->queue_lock);
1415 put_device(&sdev->sdev_gendev);
1416 spin_lock_irq(q->queue_lock);
1417}
1418
1419u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1420{
1421 struct device *host_dev;
1422 u64 bounce_limit = 0xffffffff;
1423
1424 if (shost->unchecked_isa_dma)
1425 return BLK_BOUNCE_ISA;
1426 /*
1427 * Platforms with virtual-DMA translation
1428 * hardware have no practical limit.
1429 */
1430 if (!PCI_DMA_BUS_IS_PHYS)
1431 return BLK_BOUNCE_ANY;
1432
1433 host_dev = scsi_get_device(shost);
1434 if (host_dev && host_dev->dma_mask)
1435 bounce_limit = *host_dev->dma_mask;
1436
1437 return bounce_limit;
1438}
1439EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1440
1441struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1442{
1443 struct Scsi_Host *shost = sdev->host;
1444 struct request_queue *q;
1445
152587d2005-04-12 16:22:06 -05001446 q = blk_init_queue(scsi_request_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 if (!q)
1448 return NULL;
1449
1450 blk_queue_prep_rq(q, scsi_prep_fn);
1451
1452 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1453 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1454 blk_queue_max_sectors(q, shost->max_sectors);
1455 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1456 blk_queue_segment_boundary(q, shost->dma_boundary);
1457 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1458
1459 /*
1460 * ordered tags are superior to flush ordering
1461 */
1462 if (shost->ordered_tag)
1463 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1464 else if (shost->ordered_flush) {
1465 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1466 q->prepare_flush_fn = scsi_prepare_flush_fn;
1467 q->end_flush_fn = scsi_end_flush_fn;
1468 }
1469
1470 if (!shost->use_clustering)
1471 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1472 return q;
1473}
1474
1475void scsi_free_queue(struct request_queue *q)
1476{
1477 blk_cleanup_queue(q);
1478}
1479
1480/*
1481 * Function: scsi_block_requests()
1482 *
1483 * Purpose: Utility function used by low-level drivers to prevent further
1484 * commands from being queued to the device.
1485 *
1486 * Arguments: shost - Host in question
1487 *
1488 * Returns: Nothing
1489 *
1490 * Lock status: No locks are assumed held.
1491 *
1492 * Notes: There is no timer nor any other means by which the requests
1493 * get unblocked other than the low-level driver calling
1494 * scsi_unblock_requests().
1495 */
1496void scsi_block_requests(struct Scsi_Host *shost)
1497{
1498 shost->host_self_blocked = 1;
1499}
1500EXPORT_SYMBOL(scsi_block_requests);
1501
1502/*
1503 * Function: scsi_unblock_requests()
1504 *
1505 * Purpose: Utility function used by low-level drivers to allow further
1506 * commands from being queued to the device.
1507 *
1508 * Arguments: shost - Host in question
1509 *
1510 * Returns: Nothing
1511 *
1512 * Lock status: No locks are assumed held.
1513 *
1514 * Notes: There is no timer nor any other means by which the requests
1515 * get unblocked other than the low-level driver calling
1516 * scsi_unblock_requests().
1517 *
1518 * This is done as an API function so that changes to the
1519 * internals of the scsi mid-layer won't require wholesale
1520 * changes to drivers that use this feature.
1521 */
1522void scsi_unblock_requests(struct Scsi_Host *shost)
1523{
1524 shost->host_self_blocked = 0;
1525 scsi_run_host_queues(shost);
1526}
1527EXPORT_SYMBOL(scsi_unblock_requests);
1528
1529int __init scsi_init_queue(void)
1530{
1531 int i;
1532
1533 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1534 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1535 int size = sgp->size * sizeof(struct scatterlist);
1536
1537 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1538 SLAB_HWCACHE_ALIGN, NULL, NULL);
1539 if (!sgp->slab) {
1540 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1541 sgp->name);
1542 }
1543
1544 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1545 mempool_alloc_slab, mempool_free_slab,
1546 sgp->slab);
1547 if (!sgp->pool) {
1548 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1549 sgp->name);
1550 }
1551 }
1552
1553 return 0;
1554}
1555
1556void scsi_exit_queue(void)
1557{
1558 int i;
1559
1560 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1561 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1562 mempool_destroy(sgp->pool);
1563 kmem_cache_destroy(sgp->slab);
1564 }
1565}
1566/**
1567 * __scsi_mode_sense - issue a mode sense, falling back from 10 to
1568 * six bytes if necessary.
1569 * @sreq: SCSI request to fill in with the MODE_SENSE
1570 * @dbd: set if mode sense will allow block descriptors to be returned
1571 * @modepage: mode page being requested
1572 * @buffer: request buffer (may not be smaller than eight bytes)
1573 * @len: length of request buffer.
1574 * @timeout: command timeout
1575 * @retries: number of retries before failing
1576 * @data: returns a structure abstracting the mode header data
1577 *
1578 * Returns zero if unsuccessful, or the header offset (either 4
1579 * or 8 depending on whether a six or ten byte command was
1580 * issued) if successful.
1581 **/
1582int
1583__scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1584 unsigned char *buffer, int len, int timeout, int retries,
1585 struct scsi_mode_data *data) {
1586 unsigned char cmd[12];
1587 int use_10_for_ms;
1588 int header_length;
1589
1590 memset(data, 0, sizeof(*data));
1591 memset(&cmd[0], 0, 12);
1592 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1593 cmd[2] = modepage;
1594
1595 retry:
1596 use_10_for_ms = sreq->sr_device->use_10_for_ms;
1597
1598 if (use_10_for_ms) {
1599 if (len < 8)
1600 len = 8;
1601
1602 cmd[0] = MODE_SENSE_10;
1603 cmd[8] = len;
1604 header_length = 8;
1605 } else {
1606 if (len < 4)
1607 len = 4;
1608
1609 cmd[0] = MODE_SENSE;
1610 cmd[4] = len;
1611 header_length = 4;
1612 }
1613
1614 sreq->sr_cmd_len = 0;
1615 memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1616 sreq->sr_data_direction = DMA_FROM_DEVICE;
1617
1618 memset(buffer, 0, len);
1619
1620 scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1621
1622 /* This code looks awful: what it's doing is making sure an
1623 * ILLEGAL REQUEST sense return identifies the actual command
1624 * byte as the problem. MODE_SENSE commands can return
1625 * ILLEGAL REQUEST if the code page isn't supported */
1626
1627 if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1628 (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1629 struct scsi_sense_hdr sshdr;
1630
1631 if (scsi_request_normalize_sense(sreq, &sshdr)) {
1632 if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1633 (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1634 /*
1635 * Invalid command operation code
1636 */
1637 sreq->sr_device->use_10_for_ms = 0;
1638 goto retry;
1639 }
1640 }
1641 }
1642
1643 if(scsi_status_is_good(sreq->sr_result)) {
1644 data->header_length = header_length;
1645 if(use_10_for_ms) {
1646 data->length = buffer[0]*256 + buffer[1] + 2;
1647 data->medium_type = buffer[2];
1648 data->device_specific = buffer[3];
1649 data->longlba = buffer[4] & 0x01;
1650 data->block_descriptor_length = buffer[6]*256
1651 + buffer[7];
1652 } else {
1653 data->length = buffer[0] + 1;
1654 data->medium_type = buffer[1];
1655 data->device_specific = buffer[2];
1656 data->block_descriptor_length = buffer[3];
1657 }
1658 }
1659
1660 return sreq->sr_result;
1661}
1662EXPORT_SYMBOL(__scsi_mode_sense);
1663
1664/**
1665 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1666 * six bytes if necessary.
1667 * @sdev: scsi device to send command to.
1668 * @dbd: set if mode sense will disable block descriptors in the return
1669 * @modepage: mode page being requested
1670 * @buffer: request buffer (may not be smaller than eight bytes)
1671 * @len: length of request buffer.
1672 * @timeout: command timeout
1673 * @retries: number of retries before failing
1674 *
1675 * Returns zero if unsuccessful, or the header offset (either 4
1676 * or 8 depending on whether a six or ten byte command was
1677 * issued) if successful.
1678 **/
1679int
1680scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1681 unsigned char *buffer, int len, int timeout, int retries,
1682 struct scsi_mode_data *data)
1683{
1684 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1685 int ret;
1686
1687 if (!sreq)
1688 return -1;
1689
1690 ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1691 timeout, retries, data);
1692
1693 scsi_release_request(sreq);
1694
1695 return ret;
1696}
1697EXPORT_SYMBOL(scsi_mode_sense);
1698
1699int
1700scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1701{
1702 struct scsi_request *sreq;
1703 char cmd[] = {
1704 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1705 };
1706 int result;
1707
1708 sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1709 if (!sreq)
1710 return -ENOMEM;
1711
1712 sreq->sr_data_direction = DMA_NONE;
1713 scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1714
1715 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1716 struct scsi_sense_hdr sshdr;
1717
1718 if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1719 ((sshdr.sense_key == UNIT_ATTENTION) ||
1720 (sshdr.sense_key == NOT_READY))) {
1721 sdev->changed = 1;
1722 sreq->sr_result = 0;
1723 }
1724 }
1725 result = sreq->sr_result;
1726 scsi_release_request(sreq);
1727 return result;
1728}
1729EXPORT_SYMBOL(scsi_test_unit_ready);
1730
1731/**
1732 * scsi_device_set_state - Take the given device through the device
1733 * state model.
1734 * @sdev: scsi device to change the state of.
1735 * @state: state to change to.
1736 *
1737 * Returns zero if unsuccessful or an error if the requested
1738 * transition is illegal.
1739 **/
1740int
1741scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1742{
1743 enum scsi_device_state oldstate = sdev->sdev_state;
1744
1745 if (state == oldstate)
1746 return 0;
1747
1748 switch (state) {
1749 case SDEV_CREATED:
1750 /* There are no legal states that come back to
1751 * created. This is the manually initialised start
1752 * state */
1753 goto illegal;
1754
1755 case SDEV_RUNNING:
1756 switch (oldstate) {
1757 case SDEV_CREATED:
1758 case SDEV_OFFLINE:
1759 case SDEV_QUIESCE:
1760 case SDEV_BLOCK:
1761 break;
1762 default:
1763 goto illegal;
1764 }
1765 break;
1766
1767 case SDEV_QUIESCE:
1768 switch (oldstate) {
1769 case SDEV_RUNNING:
1770 case SDEV_OFFLINE:
1771 break;
1772 default:
1773 goto illegal;
1774 }
1775 break;
1776
1777 case SDEV_OFFLINE:
1778 switch (oldstate) {
1779 case SDEV_CREATED:
1780 case SDEV_RUNNING:
1781 case SDEV_QUIESCE:
1782 case SDEV_BLOCK:
1783 break;
1784 default:
1785 goto illegal;
1786 }
1787 break;
1788
1789 case SDEV_BLOCK:
1790 switch (oldstate) {
1791 case SDEV_CREATED:
1792 case SDEV_RUNNING:
1793 break;
1794 default:
1795 goto illegal;
1796 }
1797 break;
1798
1799 case SDEV_CANCEL:
1800 switch (oldstate) {
1801 case SDEV_CREATED:
1802 case SDEV_RUNNING:
1803 case SDEV_OFFLINE:
1804 case SDEV_BLOCK:
1805 break;
1806 default:
1807 goto illegal;
1808 }
1809 break;
1810
1811 case SDEV_DEL:
1812 switch (oldstate) {
1813 case SDEV_CANCEL:
1814 break;
1815 default:
1816 goto illegal;
1817 }
1818 break;
1819
1820 }
1821 sdev->sdev_state = state;
1822 return 0;
1823
1824 illegal:
1825 SCSI_LOG_ERROR_RECOVERY(1,
1826 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1827 "Illegal state transition %s->%s\n",
1828 scsi_device_state_name(oldstate),
1829 scsi_device_state_name(state))
1830 );
1831 return -EINVAL;
1832}
1833EXPORT_SYMBOL(scsi_device_set_state);
1834
1835/**
1836 * scsi_device_quiesce - Block user issued commands.
1837 * @sdev: scsi device to quiesce.
1838 *
1839 * This works by trying to transition to the SDEV_QUIESCE state
1840 * (which must be a legal transition). When the device is in this
1841 * state, only special requests will be accepted, all others will
1842 * be deferred. Since special requests may also be requeued requests,
1843 * a successful return doesn't guarantee the device will be
1844 * totally quiescent.
1845 *
1846 * Must be called with user context, may sleep.
1847 *
1848 * Returns zero if unsuccessful or an error if not.
1849 **/
1850int
1851scsi_device_quiesce(struct scsi_device *sdev)
1852{
1853 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1854 if (err)
1855 return err;
1856
1857 scsi_run_queue(sdev->request_queue);
1858 while (sdev->device_busy) {
1859 msleep_interruptible(200);
1860 scsi_run_queue(sdev->request_queue);
1861 }
1862 return 0;
1863}
1864EXPORT_SYMBOL(scsi_device_quiesce);
1865
1866/**
1867 * scsi_device_resume - Restart user issued commands to a quiesced device.
1868 * @sdev: scsi device to resume.
1869 *
1870 * Moves the device from quiesced back to running and restarts the
1871 * queues.
1872 *
1873 * Must be called with user context, may sleep.
1874 **/
1875void
1876scsi_device_resume(struct scsi_device *sdev)
1877{
1878 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1879 return;
1880 scsi_run_queue(sdev->request_queue);
1881}
1882EXPORT_SYMBOL(scsi_device_resume);
1883
1884static void
1885device_quiesce_fn(struct scsi_device *sdev, void *data)
1886{
1887 scsi_device_quiesce(sdev);
1888}
1889
1890void
1891scsi_target_quiesce(struct scsi_target *starget)
1892{
1893 starget_for_each_device(starget, NULL, device_quiesce_fn);
1894}
1895EXPORT_SYMBOL(scsi_target_quiesce);
1896
1897static void
1898device_resume_fn(struct scsi_device *sdev, void *data)
1899{
1900 scsi_device_resume(sdev);
1901}
1902
1903void
1904scsi_target_resume(struct scsi_target *starget)
1905{
1906 starget_for_each_device(starget, NULL, device_resume_fn);
1907}
1908EXPORT_SYMBOL(scsi_target_resume);
1909
1910/**
1911 * scsi_internal_device_block - internal function to put a device
1912 * temporarily into the SDEV_BLOCK state
1913 * @sdev: device to block
1914 *
1915 * Block request made by scsi lld's to temporarily stop all
1916 * scsi commands on the specified device. Called from interrupt
1917 * or normal process context.
1918 *
1919 * Returns zero if successful or error if not
1920 *
1921 * Notes:
1922 * This routine transitions the device to the SDEV_BLOCK state
1923 * (which must be a legal transition). When the device is in this
1924 * state, all commands are deferred until the scsi lld reenables
1925 * the device with scsi_device_unblock or device_block_tmo fires.
1926 * This routine assumes the host_lock is held on entry.
1927 **/
1928int
1929scsi_internal_device_block(struct scsi_device *sdev)
1930{
1931 request_queue_t *q = sdev->request_queue;
1932 unsigned long flags;
1933 int err = 0;
1934
1935 err = scsi_device_set_state(sdev, SDEV_BLOCK);
1936 if (err)
1937 return err;
1938
1939 /*
1940 * The device has transitioned to SDEV_BLOCK. Stop the
1941 * block layer from calling the midlayer with this device's
1942 * request queue.
1943 */
1944 spin_lock_irqsave(q->queue_lock, flags);
1945 blk_stop_queue(q);
1946 spin_unlock_irqrestore(q->queue_lock, flags);
1947
1948 return 0;
1949}
1950EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1951
1952/**
1953 * scsi_internal_device_unblock - resume a device after a block request
1954 * @sdev: device to resume
1955 *
1956 * Called by scsi lld's or the midlayer to restart the device queue
1957 * for the previously suspended scsi device. Called from interrupt or
1958 * normal process context.
1959 *
1960 * Returns zero if successful or error if not.
1961 *
1962 * Notes:
1963 * This routine transitions the device to the SDEV_RUNNING state
1964 * (which must be a legal transition) allowing the midlayer to
1965 * goose the queue for this device. This routine assumes the
1966 * host_lock is held upon entry.
1967 **/
1968int
1969scsi_internal_device_unblock(struct scsi_device *sdev)
1970{
1971 request_queue_t *q = sdev->request_queue;
1972 int err;
1973 unsigned long flags;
1974
1975 /*
1976 * Try to transition the scsi device to SDEV_RUNNING
1977 * and goose the device queue if successful.
1978 */
1979 err = scsi_device_set_state(sdev, SDEV_RUNNING);
1980 if (err)
1981 return err;
1982
1983 spin_lock_irqsave(q->queue_lock, flags);
1984 blk_start_queue(q);
1985 spin_unlock_irqrestore(q->queue_lock, flags);
1986
1987 return 0;
1988}
1989EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
1990
1991static void
1992device_block(struct scsi_device *sdev, void *data)
1993{
1994 scsi_internal_device_block(sdev);
1995}
1996
1997static int
1998target_block(struct device *dev, void *data)
1999{
2000 if (scsi_is_target_device(dev))
2001 starget_for_each_device(to_scsi_target(dev), NULL,
2002 device_block);
2003 return 0;
2004}
2005
2006void
2007scsi_target_block(struct device *dev)
2008{
2009 if (scsi_is_target_device(dev))
2010 starget_for_each_device(to_scsi_target(dev), NULL,
2011 device_block);
2012 else
2013 device_for_each_child(dev, NULL, target_block);
2014}
2015EXPORT_SYMBOL_GPL(scsi_target_block);
2016
2017static void
2018device_unblock(struct scsi_device *sdev, void *data)
2019{
2020 scsi_internal_device_unblock(sdev);
2021}
2022
2023static int
2024target_unblock(struct device *dev, void *data)
2025{
2026 if (scsi_is_target_device(dev))
2027 starget_for_each_device(to_scsi_target(dev), NULL,
2028 device_unblock);
2029 return 0;
2030}
2031
2032void
2033scsi_target_unblock(struct device *dev)
2034{
2035 if (scsi_is_target_device(dev))
2036 starget_for_each_device(to_scsi_target(dev), NULL,
2037 device_unblock);
2038 else
2039 device_for_each_child(dev, NULL, target_unblock);
2040}
2041EXPORT_SYMBOL_GPL(scsi_target_unblock);