| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _SCSI_SCSI_HOST_H | 
|  | 2 | #define _SCSI_SCSI_HOST_H | 
|  | 3 |  | 
|  | 4 | #include <linux/device.h> | 
|  | 5 | #include <linux/list.h> | 
|  | 6 | #include <linux/types.h> | 
|  | 7 | #include <linux/workqueue.h> | 
|  | 8 |  | 
|  | 9 | struct block_device; | 
|  | 10 | struct module; | 
|  | 11 | struct scsi_cmnd; | 
|  | 12 | struct scsi_device; | 
| James Bottomley | a283bd3 | 2005-05-24 12:06:38 -0500 | [diff] [blame] | 13 | struct scsi_target; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | struct Scsi_Host; | 
|  | 15 | struct scsi_host_cmd_pool; | 
|  | 16 | struct scsi_transport_template; | 
|  | 17 |  | 
|  | 18 |  | 
|  | 19 | /* | 
|  | 20 | * The various choices mean: | 
|  | 21 | * NONE: Self evident.	Host adapter is not capable of scatter-gather. | 
|  | 22 | * ALL:	 Means that the host adapter module can do scatter-gather, | 
|  | 23 | *	 and that there is no limit to the size of the table to which | 
|  | 24 | *	 we scatter/gather data. | 
|  | 25 | * Anything else:  Indicates the maximum number of chains that can be | 
|  | 26 | *	 used in one scatter-gather request. | 
|  | 27 | */ | 
|  | 28 | #define SG_NONE 0 | 
|  | 29 | #define SG_ALL 0xff | 
|  | 30 |  | 
|  | 31 |  | 
|  | 32 | #define DISABLE_CLUSTERING 0 | 
|  | 33 | #define ENABLE_CLUSTERING 1 | 
|  | 34 |  | 
|  | 35 | enum scsi_eh_timer_return { | 
|  | 36 | EH_NOT_HANDLED, | 
|  | 37 | EH_HANDLED, | 
|  | 38 | EH_RESET_TIMER, | 
|  | 39 | }; | 
|  | 40 |  | 
|  | 41 |  | 
|  | 42 | struct scsi_host_template { | 
|  | 43 | struct module *module; | 
|  | 44 | const char *name; | 
|  | 45 |  | 
|  | 46 | /* | 
|  | 47 | * Used to initialize old-style drivers.  For new-style drivers | 
|  | 48 | * just perform all work in your module initialization function. | 
|  | 49 | * | 
|  | 50 | * Status:  OBSOLETE | 
|  | 51 | */ | 
|  | 52 | int (* detect)(struct scsi_host_template *); | 
|  | 53 |  | 
|  | 54 | /* | 
|  | 55 | * Used as unload callback for hosts with old-style drivers. | 
|  | 56 | * | 
|  | 57 | * Status: OBSOLETE | 
|  | 58 | */ | 
|  | 59 | int (* release)(struct Scsi_Host *); | 
|  | 60 |  | 
|  | 61 | /* | 
|  | 62 | * The info function will return whatever useful information the | 
|  | 63 | * developer sees fit.  If not provided, then the name field will | 
|  | 64 | * be used instead. | 
|  | 65 | * | 
|  | 66 | * Status: OPTIONAL | 
|  | 67 | */ | 
|  | 68 | const char *(* info)(struct Scsi_Host *); | 
|  | 69 |  | 
|  | 70 | /* | 
|  | 71 | * Ioctl interface | 
|  | 72 | * | 
|  | 73 | * Status: OPTIONAL | 
|  | 74 | */ | 
|  | 75 | int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | 
|  | 76 |  | 
|  | 77 |  | 
|  | 78 | #ifdef CONFIG_COMPAT | 
|  | 79 | /* | 
|  | 80 | * Compat handler. Handle 32bit ABI. | 
|  | 81 | * When unknown ioctl is passed return -ENOIOCTLCMD. | 
|  | 82 | * | 
|  | 83 | * Status: OPTIONAL | 
|  | 84 | */ | 
|  | 85 | int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | 
|  | 86 | #endif | 
|  | 87 |  | 
|  | 88 | /* | 
|  | 89 | * The queuecommand function is used to queue up a scsi | 
|  | 90 | * command block to the LLDD.  When the driver finished | 
|  | 91 | * processing the command the done callback is invoked. | 
|  | 92 | * | 
|  | 93 | * If queuecommand returns 0, then the HBA has accepted the | 
|  | 94 | * command.  The done() function must be called on the command | 
|  | 95 | * when the driver has finished with it. (you may call done on the | 
|  | 96 | * command before queuecommand returns, but in this case you | 
|  | 97 | * *must* return 0 from queuecommand). | 
|  | 98 | * | 
|  | 99 | * Queuecommand may also reject the command, in which case it may | 
|  | 100 | * not touch the command and must not call done() for it. | 
|  | 101 | * | 
|  | 102 | * There are two possible rejection returns: | 
|  | 103 | * | 
|  | 104 | *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but | 
|  | 105 | *   allow commands to other devices serviced by this host. | 
|  | 106 | * | 
|  | 107 | *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this | 
|  | 108 | *   host temporarily. | 
|  | 109 | * | 
|  | 110 | * For compatibility, any other non-zero return is treated the | 
|  | 111 | * same as SCSI_MLQUEUE_HOST_BUSY. | 
|  | 112 | * | 
|  | 113 | * NOTE: "temporarily" means either until the next command for# | 
|  | 114 | * this device/host completes, or a period of time determined by | 
|  | 115 | * I/O pressure in the system if there are no other outstanding | 
|  | 116 | * commands. | 
|  | 117 | * | 
|  | 118 | * STATUS: REQUIRED | 
|  | 119 | */ | 
|  | 120 | int (* queuecommand)(struct scsi_cmnd *, | 
|  | 121 | void (*done)(struct scsi_cmnd *)); | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * This is an error handling strategy routine.  You don't need to | 
|  | 125 | * define one of these if you don't want to - there is a default | 
|  | 126 | * routine that is present that should work in most cases.  For those | 
|  | 127 | * driver authors that have the inclination and ability to write their | 
|  | 128 | * own strategy routine, this is where it is specified.  Note - the | 
|  | 129 | * strategy routine is *ALWAYS* run in the context of the kernel eh | 
|  | 130 | * thread.  Thus you are guaranteed to *NOT* be in an interrupt | 
|  | 131 | * handler when you execute this, and you are also guaranteed to | 
|  | 132 | * *NOT* have any other commands being queued while you are in the | 
|  | 133 | * strategy routine. When you return from this function, operations | 
|  | 134 | * return to normal. | 
|  | 135 | * | 
|  | 136 | * See scsi_error.c scsi_unjam_host for additional comments about | 
|  | 137 | * what this function should and should not be attempting to do. | 
|  | 138 | * | 
|  | 139 | * Status: REQUIRED	(at least one of them) | 
|  | 140 | */ | 
|  | 141 | int (* eh_strategy_handler)(struct Scsi_Host *); | 
|  | 142 | int (* eh_abort_handler)(struct scsi_cmnd *); | 
|  | 143 | int (* eh_device_reset_handler)(struct scsi_cmnd *); | 
|  | 144 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); | 
|  | 145 | int (* eh_host_reset_handler)(struct scsi_cmnd *); | 
|  | 146 |  | 
|  | 147 | /* | 
|  | 148 | * This is an optional routine to notify the host that the scsi | 
|  | 149 | * timer just fired.  The returns tell the timer routine what to | 
|  | 150 | * do about this: | 
|  | 151 | * | 
|  | 152 | * EH_HANDLED:		I fixed the error, please complete the command | 
|  | 153 | * EH_RESET_TIMER:	I need more time, reset the timer and | 
|  | 154 | *			begin counting again | 
|  | 155 | * EH_NOT_HANDLED	Begin normal error recovery | 
|  | 156 | * | 
|  | 157 | * Status: OPTIONAL | 
|  | 158 | */ | 
|  | 159 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | 
|  | 160 |  | 
|  | 161 | /* | 
|  | 162 | * Before the mid layer attempts to scan for a new device where none | 
|  | 163 | * currently exists, it will call this entry in your driver.  Should | 
|  | 164 | * your driver need to allocate any structs or perform any other init | 
|  | 165 | * items in order to send commands to a currently unused target/lun | 
|  | 166 | * combo, then this is where you can perform those allocations.  This | 
|  | 167 | * is specifically so that drivers won't have to perform any kind of | 
|  | 168 | * "is this a new device" checks in their queuecommand routine, | 
|  | 169 | * thereby making the hot path a bit quicker. | 
|  | 170 | * | 
|  | 171 | * Return values: 0 on success, non-0 on failure | 
|  | 172 | * | 
|  | 173 | * Deallocation:  If we didn't find any devices at this ID, you will | 
|  | 174 | * get an immediate call to slave_destroy().  If we find something | 
|  | 175 | * here then you will get a call to slave_configure(), then the | 
|  | 176 | * device will be used for however long it is kept around, then when | 
|  | 177 | * the device is removed from the system (or * possibly at reboot | 
|  | 178 | * time), you will then get a call to slave_destroy().  This is | 
|  | 179 | * assuming you implement slave_configure and slave_destroy. | 
|  | 180 | * However, if you allocate memory and hang it off the device struct, | 
|  | 181 | * then you must implement the slave_destroy() routine at a minimum | 
|  | 182 | * in order to avoid leaking memory | 
|  | 183 | * each time a device is tore down. | 
|  | 184 | * | 
|  | 185 | * Status: OPTIONAL | 
|  | 186 | */ | 
|  | 187 | int (* slave_alloc)(struct scsi_device *); | 
|  | 188 |  | 
|  | 189 | /* | 
|  | 190 | * Once the device has responded to an INQUIRY and we know the | 
|  | 191 | * device is online, we call into the low level driver with the | 
|  | 192 | * struct scsi_device *.  If the low level device driver implements | 
|  | 193 | * this function, it *must* perform the task of setting the queue | 
|  | 194 | * depth on the device.  All other tasks are optional and depend | 
|  | 195 | * on what the driver supports and various implementation details. | 
|  | 196 | * | 
|  | 197 | * Things currently recommended to be handled at this time include: | 
|  | 198 | * | 
|  | 199 | * 1.  Setting the device queue depth.  Proper setting of this is | 
|  | 200 | *     described in the comments for scsi_adjust_queue_depth. | 
|  | 201 | * 2.  Determining if the device supports the various synchronous | 
|  | 202 | *     negotiation protocols.  The device struct will already have | 
|  | 203 | *     responded to INQUIRY and the results of the standard items | 
|  | 204 | *     will have been shoved into the various device flag bits, eg. | 
|  | 205 | *     device->sdtr will be true if the device supports SDTR messages. | 
|  | 206 | * 3.  Allocating command structs that the device will need. | 
|  | 207 | * 4.  Setting the default timeout on this device (if needed). | 
|  | 208 | * 5.  Anything else the low level driver might want to do on a device | 
|  | 209 | *     specific setup basis... | 
|  | 210 | * 6.  Return 0 on success, non-0 on error.  The device will be marked | 
|  | 211 | *     as offline on error so that no access will occur.  If you return | 
|  | 212 | *     non-0, your slave_destroy routine will never get called for this | 
|  | 213 | *     device, so don't leave any loose memory hanging around, clean | 
|  | 214 | *     up after yourself before returning non-0 | 
|  | 215 | * | 
|  | 216 | * Status: OPTIONAL | 
|  | 217 | */ | 
|  | 218 | int (* slave_configure)(struct scsi_device *); | 
|  | 219 |  | 
|  | 220 | /* | 
|  | 221 | * Immediately prior to deallocating the device and after all activity | 
|  | 222 | * has ceased the mid layer calls this point so that the low level | 
|  | 223 | * driver may completely detach itself from the scsi device and vice | 
|  | 224 | * versa.  The low level driver is responsible for freeing any memory | 
|  | 225 | * it allocated in the slave_alloc or slave_configure calls. | 
|  | 226 | * | 
|  | 227 | * Status: OPTIONAL | 
|  | 228 | */ | 
|  | 229 | void (* slave_destroy)(struct scsi_device *); | 
|  | 230 |  | 
|  | 231 | /* | 
| James Bottomley | a283bd3 | 2005-05-24 12:06:38 -0500 | [diff] [blame] | 232 | * Before the mid layer attempts to scan for a new device attached | 
|  | 233 | * to a target where no target currently exists, it will call this | 
|  | 234 | * entry in your driver.  Should your driver need to allocate any | 
|  | 235 | * structs or perform any other init items in order to send commands | 
|  | 236 | * to a currently unused target, then this is where you can perform | 
|  | 237 | * those allocations. | 
|  | 238 | * | 
|  | 239 | * Return values: 0 on success, non-0 on failure | 
|  | 240 | * | 
|  | 241 | * Status: OPTIONAL | 
|  | 242 | */ | 
|  | 243 | int (* target_alloc)(struct scsi_target *); | 
|  | 244 |  | 
|  | 245 | /* | 
|  | 246 | * Immediately prior to deallocating the target structure, and | 
|  | 247 | * after all activity to attached scsi devices has ceased, the | 
|  | 248 | * midlayer calls this point so that the driver may deallocate | 
|  | 249 | * and terminate any references to the target. | 
|  | 250 | * | 
|  | 251 | * Status: OPTIONAL | 
|  | 252 | */ | 
|  | 253 | void (* target_destroy)(struct scsi_target *); | 
|  | 254 |  | 
|  | 255 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | * fill in this function to allow the queue depth of this host | 
|  | 257 | * to be changeable (on a per device basis).  returns either | 
|  | 258 | * the current queue depth setting (may be different from what | 
|  | 259 | * was passed in) or an error.  An error should only be | 
|  | 260 | * returned if the requested depth is legal but the driver was | 
|  | 261 | * unable to set it.  If the requested depth is illegal, the | 
|  | 262 | * driver should set and return the closest legal queue depth. | 
|  | 263 | * | 
|  | 264 | */ | 
|  | 265 | int (* change_queue_depth)(struct scsi_device *, int); | 
|  | 266 |  | 
|  | 267 | /* | 
|  | 268 | * fill in this function to allow the changing of tag types | 
|  | 269 | * (this also allows the enabling/disabling of tag command | 
|  | 270 | * queueing).  An error should only be returned if something | 
|  | 271 | * went wrong in the driver while trying to set the tag type. | 
|  | 272 | * If the driver doesn't support the requested tag type, then | 
|  | 273 | * it should set the closest type it does support without | 
|  | 274 | * returning an error.  Returns the actual tag type set. | 
|  | 275 | */ | 
|  | 276 | int (* change_queue_type)(struct scsi_device *, int); | 
|  | 277 |  | 
|  | 278 | /* | 
|  | 279 | * This function determines the bios parameters for a given | 
|  | 280 | * harddisk.  These tend to be numbers that are made up by | 
|  | 281 | * the host adapter.  Parameters: | 
|  | 282 | * size, device, list (heads, sectors, cylinders) | 
|  | 283 | * | 
|  | 284 | * Status: OPTIONAL */ | 
|  | 285 | int (* bios_param)(struct scsi_device *, struct block_device *, | 
|  | 286 | sector_t, int []); | 
|  | 287 |  | 
|  | 288 | /* | 
|  | 289 | * Can be used to export driver statistics and other infos to the | 
|  | 290 | * world outside the kernel ie. userspace and it also provides an | 
|  | 291 | * interface to feed the driver with information. | 
|  | 292 | * | 
|  | 293 | * Status: OBSOLETE | 
|  | 294 | */ | 
|  | 295 | int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int); | 
|  | 296 |  | 
|  | 297 | /* | 
|  | 298 | * Name of proc directory | 
|  | 299 | */ | 
|  | 300 | char *proc_name; | 
|  | 301 |  | 
|  | 302 | /* | 
|  | 303 | * Used to store the procfs directory if a driver implements the | 
|  | 304 | * proc_info method. | 
|  | 305 | */ | 
|  | 306 | struct proc_dir_entry *proc_dir; | 
|  | 307 |  | 
|  | 308 | /* | 
|  | 309 | * This determines if we will use a non-interrupt driven | 
|  | 310 | * or an interrupt driven scheme,  It is set to the maximum number | 
|  | 311 | * of simultaneous commands a given host adapter will accept. | 
|  | 312 | */ | 
|  | 313 | int can_queue; | 
|  | 314 |  | 
|  | 315 | /* | 
|  | 316 | * In many instances, especially where disconnect / reconnect are | 
|  | 317 | * supported, our host also has an ID on the SCSI bus.  If this is | 
|  | 318 | * the case, then it must be reserved.  Please set this_id to -1 if | 
|  | 319 | * your setup is in single initiator mode, and the host lacks an | 
|  | 320 | * ID. | 
|  | 321 | */ | 
|  | 322 | int this_id; | 
|  | 323 |  | 
|  | 324 | /* | 
|  | 325 | * This determines the degree to which the host adapter is capable | 
|  | 326 | * of scatter-gather. | 
|  | 327 | */ | 
|  | 328 | unsigned short sg_tablesize; | 
|  | 329 |  | 
|  | 330 | /* | 
|  | 331 | * If the host adapter has limitations beside segment count | 
|  | 332 | */ | 
|  | 333 | unsigned short max_sectors; | 
|  | 334 |  | 
|  | 335 | /* | 
|  | 336 | * dma scatter gather segment boundary limit. a segment crossing this | 
|  | 337 | * boundary will be split in two. | 
|  | 338 | */ | 
|  | 339 | unsigned long dma_boundary; | 
|  | 340 |  | 
|  | 341 | /* | 
|  | 342 | * This specifies "machine infinity" for host templates which don't | 
|  | 343 | * limit the transfer size.  Note this limit represents an absolute | 
|  | 344 | * maximum, and may be over the transfer limits allowed for | 
|  | 345 | * individual devices (e.g. 256 for SCSI-1) | 
|  | 346 | */ | 
|  | 347 | #define SCSI_DEFAULT_MAX_SECTORS	1024 | 
|  | 348 |  | 
|  | 349 | /* | 
|  | 350 | * True if this host adapter can make good use of linked commands. | 
|  | 351 | * This will allow more than one command to be queued to a given | 
|  | 352 | * unit on a given host.  Set this to the maximum number of command | 
|  | 353 | * blocks to be provided for each device.  Set this to 1 for one | 
|  | 354 | * command block per lun, 2 for two, etc.  Do not set this to 0. | 
|  | 355 | * You should make sure that the host adapter will do the right thing | 
|  | 356 | * before you try setting this above 1. | 
|  | 357 | */ | 
|  | 358 | short cmd_per_lun; | 
|  | 359 |  | 
|  | 360 | /* | 
|  | 361 | * present contains counter indicating how many boards of this | 
|  | 362 | * type were found when we did the scan. | 
|  | 363 | */ | 
|  | 364 | unsigned char present; | 
|  | 365 |  | 
|  | 366 | /* | 
|  | 367 | * true if this host adapter uses unchecked DMA onto an ISA bus. | 
|  | 368 | */ | 
|  | 369 | unsigned unchecked_isa_dma:1; | 
|  | 370 |  | 
|  | 371 | /* | 
|  | 372 | * true if this host adapter can make good use of clustering. | 
|  | 373 | * I originally thought that if the tablesize was large that it | 
|  | 374 | * was a waste of CPU cycles to prepare a cluster list, but | 
|  | 375 | * it works out that the Buslogic is faster if you use a smaller | 
|  | 376 | * number of segments (i.e. use clustering).  I guess it is | 
|  | 377 | * inefficient. | 
|  | 378 | */ | 
|  | 379 | unsigned use_clustering:1; | 
|  | 380 |  | 
|  | 381 | /* | 
|  | 382 | * True for emulated SCSI host adapters (e.g. ATAPI) | 
|  | 383 | */ | 
|  | 384 | unsigned emulated:1; | 
|  | 385 |  | 
|  | 386 | /* | 
|  | 387 | * True if the low-level driver performs its own reset-settle delays. | 
|  | 388 | */ | 
|  | 389 | unsigned skip_settle_delay:1; | 
|  | 390 |  | 
|  | 391 | /* | 
|  | 392 | * ordered write support | 
|  | 393 | */ | 
|  | 394 | unsigned ordered_flush:1; | 
|  | 395 | unsigned ordered_tag:1; | 
|  | 396 |  | 
|  | 397 | /* | 
|  | 398 | * Countdown for host blocking with no commands outstanding | 
|  | 399 | */ | 
|  | 400 | unsigned int max_host_blocked; | 
|  | 401 |  | 
|  | 402 | /* | 
|  | 403 | * Default value for the blocking.  If the queue is empty, | 
|  | 404 | * host_blocked counts down in the request_fn until it restarts | 
|  | 405 | * host operations as zero is reached. | 
|  | 406 | * | 
|  | 407 | * FIXME: This should probably be a value in the template | 
|  | 408 | */ | 
|  | 409 | #define SCSI_DEFAULT_HOST_BLOCKED	7 | 
|  | 410 |  | 
|  | 411 | /* | 
|  | 412 | * Pointer to the sysfs class properties for this host, NULL terminated. | 
|  | 413 | */ | 
|  | 414 | struct class_device_attribute **shost_attrs; | 
|  | 415 |  | 
|  | 416 | /* | 
|  | 417 | * Pointer to the SCSI device properties for this host, NULL terminated. | 
|  | 418 | */ | 
|  | 419 | struct device_attribute **sdev_attrs; | 
|  | 420 |  | 
|  | 421 | /* | 
|  | 422 | * List of hosts per template. | 
|  | 423 | * | 
|  | 424 | * This is only for use by scsi_module.c for legacy templates. | 
|  | 425 | * For these access to it is synchronized implicitly by | 
|  | 426 | * module_init/module_exit. | 
|  | 427 | */ | 
|  | 428 | struct list_head legacy_hosts; | 
|  | 429 | }; | 
|  | 430 |  | 
|  | 431 | /* | 
|  | 432 | * shost states | 
|  | 433 | */ | 
|  | 434 | enum { | 
|  | 435 | SHOST_ADD, | 
|  | 436 | SHOST_DEL, | 
|  | 437 | SHOST_CANCEL, | 
|  | 438 | SHOST_RECOVERY, | 
|  | 439 | }; | 
|  | 440 |  | 
|  | 441 | struct Scsi_Host { | 
|  | 442 | /* | 
|  | 443 | * __devices is protected by the host_lock, but you should | 
|  | 444 | * usually use scsi_device_lookup / shost_for_each_device | 
|  | 445 | * to access it and don't care about locking yourself. | 
|  | 446 | * In the rare case of beeing in irq context you can use | 
|  | 447 | * their __ prefixed variants with the lock held. NEVER | 
|  | 448 | * access this list directly from a driver. | 
|  | 449 | */ | 
|  | 450 | struct list_head	__devices; | 
|  | 451 | struct list_head	__targets; | 
|  | 452 |  | 
|  | 453 | struct scsi_host_cmd_pool *cmd_pool; | 
|  | 454 | spinlock_t		free_list_lock; | 
|  | 455 | struct list_head	free_list; /* backup store of cmd structs */ | 
|  | 456 | struct list_head	starved_list; | 
|  | 457 |  | 
|  | 458 | spinlock_t		default_lock; | 
|  | 459 | spinlock_t		*host_lock; | 
|  | 460 |  | 
|  | 461 | struct semaphore	scan_mutex;/* serialize scanning activity */ | 
|  | 462 |  | 
|  | 463 | struct list_head	eh_cmd_q; | 
|  | 464 | struct task_struct    * ehandler;  /* Error recovery thread. */ | 
|  | 465 | struct semaphore      * eh_wait;   /* The error recovery thread waits | 
|  | 466 | on this. */ | 
|  | 467 | struct completion     * eh_notify; /* wait for eh to begin or end */ | 
|  | 468 | struct semaphore      * eh_action; /* Wait for specific actions on the | 
|  | 469 | host. */ | 
|  | 470 | unsigned int            eh_active:1; /* Indicates the eh thread is awake and active if | 
|  | 471 | this is true. */ | 
|  | 472 | unsigned int            eh_kill:1; /* set when killing the eh thread */ | 
|  | 473 | wait_queue_head_t       host_wait; | 
|  | 474 | struct scsi_host_template *hostt; | 
|  | 475 | struct scsi_transport_template *transportt; | 
|  | 06f81ea | 2005-04-17 14:57:29 -0500 | [diff] [blame] | 476 |  | 
|  | 477 | /* | 
|  | 478 | * The following two fields are protected with host_lock; | 
|  | 479 | * however, eh routines can safely access during eh processing | 
|  | 480 | * without acquiring the lock. | 
|  | 481 | */ | 
|  | 482 | unsigned int host_busy;		   /* commands actually active on low-level */ | 
|  | 483 | unsigned int host_failed;	   /* commands that failed. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 |  | 
|  | 485 | unsigned short host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ | 
|  | 486 | int resetting; /* if set, it means that last_reset is a valid value */ | 
|  | 487 | unsigned long last_reset; | 
|  | 488 |  | 
|  | 489 | /* | 
|  | 490 | * These three parameters can be used to allow for wide scsi, | 
|  | 491 | * and for host adapters that support multiple busses | 
|  | 492 | * The first two should be set to 1 more than the actual max id | 
|  | 493 | * or lun (i.e. 8 for normal systems). | 
|  | 494 | */ | 
|  | 495 | unsigned int max_id; | 
|  | 496 | unsigned int max_lun; | 
|  | 497 | unsigned int max_channel; | 
|  | 498 |  | 
|  | 499 | /* | 
|  | 500 | * This is a unique identifier that must be assigned so that we | 
|  | 501 | * have some way of identifying each detected host adapter properly | 
|  | 502 | * and uniquely.  For hosts that do not support more than one card | 
|  | 503 | * in the system at one time, this does not need to be set.  It is | 
|  | 504 | * initialized to 0 in scsi_register. | 
|  | 505 | */ | 
|  | 506 | unsigned int unique_id; | 
|  | 507 |  | 
|  | 508 | /* | 
|  | 509 | * The maximum length of SCSI commands that this host can accept. | 
|  | 510 | * Probably 12 for most host adapters, but could be 16 for others. | 
|  | 511 | * For drivers that don't set this field, a value of 12 is | 
|  | 512 | * assumed.  I am leaving this as a number rather than a bit | 
|  | 513 | * because you never know what subsequent SCSI standards might do | 
|  | 514 | * (i.e. could there be a 20 byte or a 24-byte command a few years | 
|  | 515 | * down the road?). | 
|  | 516 | */ | 
|  | 517 | unsigned char max_cmd_len; | 
|  | 518 |  | 
|  | 519 | int this_id; | 
|  | 520 | int can_queue; | 
|  | 521 | short cmd_per_lun; | 
|  | 522 | short unsigned int sg_tablesize; | 
|  | 523 | short unsigned int max_sectors; | 
|  | 524 | unsigned long dma_boundary; | 
|  | 525 | /* | 
|  | 526 | * Used to assign serial numbers to the cmds. | 
|  | 527 | * Protected by the host lock. | 
|  | 528 | */ | 
|  | 529 | unsigned long cmd_serial_number, cmd_pid; | 
|  | 530 |  | 
|  | 531 | unsigned unchecked_isa_dma:1; | 
|  | 532 | unsigned use_clustering:1; | 
|  | 533 | unsigned use_blk_tcq:1; | 
|  | 534 |  | 
|  | 535 | /* | 
|  | 536 | * Host has requested that no further requests come through for the | 
|  | 537 | * time being. | 
|  | 538 | */ | 
|  | 539 | unsigned host_self_blocked:1; | 
|  | 540 |  | 
|  | 541 | /* | 
|  | 542 | * Host uses correct SCSI ordering not PC ordering. The bit is | 
|  | 543 | * set for the minority of drivers whose authors actually read | 
|  | 544 | * the spec ;) | 
|  | 545 | */ | 
|  | 546 | unsigned reverse_ordering:1; | 
|  | 547 |  | 
|  | 548 | /* | 
|  | 549 | * ordered write support | 
|  | 550 | */ | 
|  | 551 | unsigned ordered_flush:1; | 
|  | 552 | unsigned ordered_tag:1; | 
|  | 553 |  | 
|  | 554 | /* | 
|  | 555 | * Optional work queue to be utilized by the transport | 
|  | 556 | */ | 
|  | 557 | char work_q_name[KOBJ_NAME_LEN]; | 
|  | 558 | struct workqueue_struct *work_q; | 
|  | 559 |  | 
|  | 560 | /* | 
|  | 561 | * Host has rejected a command because it was busy. | 
|  | 562 | */ | 
|  | 563 | unsigned int host_blocked; | 
|  | 564 |  | 
|  | 565 | /* | 
|  | 566 | * Value host_blocked counts down from | 
|  | 567 | */ | 
|  | 568 | unsigned int max_host_blocked; | 
|  | 569 |  | 
|  | 570 | /* legacy crap */ | 
|  | 571 | unsigned long base; | 
|  | 572 | unsigned long io_port; | 
|  | 573 | unsigned char n_io_port; | 
|  | 574 | unsigned char dma_channel; | 
|  | 575 | unsigned int  irq; | 
|  | 576 |  | 
|  | 577 |  | 
|  | 578 | unsigned long shost_state; | 
|  | 579 |  | 
|  | 580 | /* ldm bits */ | 
|  | 581 | struct device		shost_gendev; | 
|  | 582 | struct class_device	shost_classdev; | 
|  | 583 |  | 
|  | 584 | /* | 
|  | 585 | * List of hosts per template. | 
|  | 586 | * | 
|  | 587 | * This is only for use by scsi_module.c for legacy templates. | 
|  | 588 | * For these access to it is synchronized implicitly by | 
|  | 589 | * module_init/module_exit. | 
|  | 590 | */ | 
|  | 591 | struct list_head sht_legacy_list; | 
|  | 592 |  | 
|  | 593 | /* | 
|  | 594 | * Points to the transport data (if any) which is allocated | 
|  | 595 | * separately | 
|  | 596 | */ | 
|  | 597 | void *shost_data; | 
|  | 598 |  | 
|  | 599 | /* | 
|  | 600 | * We should ensure that this is aligned, both for better performance | 
|  | 601 | * and also because some compilers (m68k) don't automatically force | 
|  | 602 | * alignment to a long boundary. | 
|  | 603 | */ | 
|  | 604 | unsigned long hostdata[0]  /* Used for storage of host specific stuff */ | 
|  | 605 | __attribute__ ((aligned (sizeof(unsigned long)))); | 
|  | 606 | }; | 
|  | 607 |  | 
|  | 608 | #define		class_to_shost(d)	\ | 
|  | 609 | container_of(d, struct Scsi_Host, shost_classdev) | 
|  | 610 |  | 
|  | 611 | int scsi_is_host_device(const struct device *); | 
|  | 612 |  | 
|  | 613 | static inline struct Scsi_Host *dev_to_shost(struct device *dev) | 
|  | 614 | { | 
|  | 615 | while (!scsi_is_host_device(dev)) { | 
|  | 616 | if (!dev->parent) | 
|  | 617 | return NULL; | 
|  | 618 | dev = dev->parent; | 
|  | 619 | } | 
|  | 620 | return container_of(dev, struct Scsi_Host, shost_gendev); | 
|  | 621 | } | 
|  | 622 |  | 
|  | 623 | extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); | 
|  | 624 | extern void scsi_flush_work(struct Scsi_Host *); | 
|  | 625 |  | 
|  | 626 | extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); | 
|  | 627 | extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *); | 
|  | 628 | extern void scsi_scan_host(struct Scsi_Host *); | 
|  | 629 | extern void scsi_scan_single_target(struct Scsi_Host *, unsigned int, | 
|  | 630 | unsigned int); | 
|  | 631 | extern void scsi_rescan_device(struct device *); | 
|  | 632 | extern void scsi_remove_host(struct Scsi_Host *); | 
|  | 633 | extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); | 
|  | 634 | extern void scsi_host_put(struct Scsi_Host *t); | 
|  | 635 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); | 
|  | 636 |  | 
|  | 637 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); | 
|  | 638 |  | 
|  | 639 | static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock) | 
|  | 640 | { | 
|  | 641 | shost->host_lock = lock; | 
|  | 642 | } | 
|  | 643 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | static inline struct device *scsi_get_device(struct Scsi_Host *shost) | 
|  | 645 | { | 
|  | 646 | return shost->shost_gendev.parent; | 
|  | 647 | } | 
|  | 648 |  | 
|  | 649 | extern void scsi_unblock_requests(struct Scsi_Host *); | 
|  | 650 | extern void scsi_block_requests(struct Scsi_Host *); | 
|  | 651 |  | 
|  | 652 | struct class_container; | 
|  | 653 | /* | 
|  | 654 | * These two functions are used to allocate and free a pseudo device | 
|  | 655 | * which will connect to the host adapter itself rather than any | 
|  | 656 | * physical device.  You must deallocate when you are done with the | 
|  | 657 | * thing.  This physical pseudo-device isn't real and won't be available | 
|  | 658 | * from any high-level drivers. | 
|  | 659 | */ | 
|  | 660 | extern void scsi_free_host_dev(struct scsi_device *); | 
|  | 661 | extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); | 
|  | 662 |  | 
|  | 663 | /* legacy interfaces */ | 
|  | 664 | extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int); | 
|  | 665 | extern void scsi_unregister(struct Scsi_Host *); | 
|  | 666 |  | 
|  | 667 | #endif /* _SCSI_SCSI_HOST_H */ |