| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * I2O kernel space accessible structures/APIs | 
 | 3 |  * | 
 | 4 |  * (c) Copyright 1999, 2000 Red Hat Software | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or | 
 | 7 |  * modify it under the terms of the GNU General Public License | 
 | 8 |  * as published by the Free Software Foundation; either version | 
 | 9 |  * 2 of the License, or (at your option) any later version. | 
 | 10 |  * | 
 | 11 |  ************************************************************************* | 
 | 12 |  * | 
 | 13 |  * This header file defined the I2O APIs/structures for use by | 
 | 14 |  * the I2O kernel modules. | 
 | 15 |  * | 
 | 16 |  */ | 
 | 17 |  | 
 | 18 | #ifndef _I2O_H | 
 | 19 | #define _I2O_H | 
 | 20 |  | 
 | 21 | #ifdef __KERNEL__		/* This file to be included by kernel only */ | 
 | 22 |  | 
 | 23 | #include <linux/i2o-dev.h> | 
 | 24 |  | 
 | 25 | /* How many different OSM's are we allowing */ | 
 | 26 | #define I2O_MAX_DRIVERS		8 | 
 | 27 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/pci.h> | 
 | 29 | #include <linux/dma-mapping.h> | 
| Tim Schmielau | 4e57b68 | 2005-10-30 15:03:48 -0800 | [diff] [blame] | 30 | #include <linux/string.h> | 
 | 31 | #include <linux/slab.h> | 
 | 32 | #include <linux/workqueue.h>	/* work_struct */ | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 33 | #include <linux/mempool.h> | 
| Matthias Kaehlcke | 9ac1625 | 2007-07-15 23:39:49 -0700 | [diff] [blame] | 34 | #include <linux/mutex.h> | 
| Jens Axboe | ba2da2f | 2007-07-24 14:42:11 +0200 | [diff] [blame] | 35 | #include <linux/scatterlist.h> | 
| Tim Schmielau | 4e57b68 | 2005-10-30 15:03:48 -0800 | [diff] [blame] | 36 |  | 
 | 37 | #include <asm/io.h> | 
 | 38 | #include <asm/semaphore.h>	/* Needed for MUTEX init macros */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
 | 40 | /* message queue empty */ | 
 | 41 | #define I2O_QUEUE_EMPTY		0xffffffff | 
 | 42 |  | 
 | 43 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  *	Cache strategies | 
 | 45 |  */ | 
 | 46 |  | 
 | 47 | /*	The NULL strategy leaves everything up to the controller. This tends to be a | 
 | 48 |  *	pessimal but functional choice. | 
 | 49 |  */ | 
 | 50 | #define CACHE_NULL		0 | 
 | 51 | /*	Prefetch data when reading. We continually attempt to load the next 32 sectors | 
 | 52 |  *	into the controller cache. | 
 | 53 |  */ | 
 | 54 | #define CACHE_PREFETCH		1 | 
 | 55 | /*	Prefetch data when reading. We sometimes attempt to load the next 32 sectors | 
 | 56 |  *	into the controller cache. When an I/O is less <= 8K we assume its probably | 
 | 57 |  *	not sequential and don't prefetch (default) | 
 | 58 |  */ | 
 | 59 | #define CACHE_SMARTFETCH	2 | 
 | 60 | /*	Data is written to the cache and then out on to the disk. The I/O must be | 
 | 61 |  *	physically on the medium before the write is acknowledged (default without | 
 | 62 |  *	NVRAM) | 
 | 63 |  */ | 
 | 64 | #define CACHE_WRITETHROUGH	17 | 
 | 65 | /*	Data is written to the cache and then out on to the disk. The controller | 
 | 66 |  *	is permitted to write back the cache any way it wants. (default if battery | 
 | 67 |  *	backed NVRAM is present). It can be useful to set this for swap regardless of | 
 | 68 |  *	battery state. | 
 | 69 |  */ | 
 | 70 | #define CACHE_WRITEBACK		18 | 
 | 71 | /*	Optimise for under powered controllers, especially on RAID1 and RAID0. We | 
 | 72 |  *	write large I/O's directly to disk bypassing the cache to avoid the extra | 
 | 73 |  *	memory copy hits. Small writes are writeback cached | 
 | 74 |  */ | 
 | 75 | #define CACHE_SMARTBACK		19 | 
 | 76 | /*	Optimise for under powered controllers, especially on RAID1 and RAID0. We | 
 | 77 |  *	write large I/O's directly to disk bypassing the cache to avoid the extra | 
 | 78 |  *	memory copy hits. Small writes are writethrough cached. Suitable for devices | 
 | 79 |  *	lacking battery backup | 
 | 80 |  */ | 
 | 81 | #define CACHE_SMARTTHROUGH	20 | 
 | 82 |  | 
 | 83 | /* | 
 | 84 |  *	Ioctl structures | 
 | 85 |  */ | 
 | 86 |  | 
 | 87 | #define 	BLKI2OGRSTRAT	_IOR('2', 1, int) | 
 | 88 | #define 	BLKI2OGWSTRAT	_IOR('2', 2, int) | 
 | 89 | #define 	BLKI2OSRSTRAT	_IOW('2', 3, int) | 
 | 90 | #define 	BLKI2OSWSTRAT	_IOW('2', 4, int) | 
 | 91 |  | 
 | 92 | /* | 
 | 93 |  *	I2O Function codes | 
 | 94 |  */ | 
 | 95 |  | 
 | 96 | /* | 
 | 97 |  *	Executive Class | 
 | 98 |  */ | 
 | 99 | #define	I2O_CMD_ADAPTER_ASSIGN		0xB3 | 
 | 100 | #define	I2O_CMD_ADAPTER_READ		0xB2 | 
 | 101 | #define	I2O_CMD_ADAPTER_RELEASE		0xB5 | 
 | 102 | #define	I2O_CMD_BIOS_INFO_SET		0xA5 | 
 | 103 | #define	I2O_CMD_BOOT_DEVICE_SET		0xA7 | 
 | 104 | #define	I2O_CMD_CONFIG_VALIDATE		0xBB | 
 | 105 | #define	I2O_CMD_CONN_SETUP		0xCA | 
 | 106 | #define	I2O_CMD_DDM_DESTROY		0xB1 | 
 | 107 | #define	I2O_CMD_DDM_ENABLE		0xD5 | 
 | 108 | #define	I2O_CMD_DDM_QUIESCE		0xC7 | 
 | 109 | #define	I2O_CMD_DDM_RESET		0xD9 | 
 | 110 | #define	I2O_CMD_DDM_SUSPEND		0xAF | 
 | 111 | #define	I2O_CMD_DEVICE_ASSIGN		0xB7 | 
 | 112 | #define	I2O_CMD_DEVICE_RELEASE		0xB9 | 
 | 113 | #define	I2O_CMD_HRT_GET			0xA8 | 
 | 114 | #define	I2O_CMD_ADAPTER_CLEAR		0xBE | 
 | 115 | #define	I2O_CMD_ADAPTER_CONNECT		0xC9 | 
 | 116 | #define	I2O_CMD_ADAPTER_RESET		0xBD | 
 | 117 | #define	I2O_CMD_LCT_NOTIFY		0xA2 | 
 | 118 | #define	I2O_CMD_OUTBOUND_INIT		0xA1 | 
 | 119 | #define	I2O_CMD_PATH_ENABLE		0xD3 | 
 | 120 | #define	I2O_CMD_PATH_QUIESCE		0xC5 | 
 | 121 | #define	I2O_CMD_PATH_RESET		0xD7 | 
 | 122 | #define	I2O_CMD_STATIC_MF_CREATE	0xDD | 
 | 123 | #define	I2O_CMD_STATIC_MF_RELEASE	0xDF | 
 | 124 | #define	I2O_CMD_STATUS_GET		0xA0 | 
 | 125 | #define	I2O_CMD_SW_DOWNLOAD		0xA9 | 
 | 126 | #define	I2O_CMD_SW_UPLOAD		0xAB | 
 | 127 | #define	I2O_CMD_SW_REMOVE		0xAD | 
 | 128 | #define	I2O_CMD_SYS_ENABLE		0xD1 | 
 | 129 | #define	I2O_CMD_SYS_MODIFY		0xC1 | 
 | 130 | #define	I2O_CMD_SYS_QUIESCE		0xC3 | 
 | 131 | #define	I2O_CMD_SYS_TAB_SET		0xA3 | 
 | 132 |  | 
 | 133 | /* | 
 | 134 |  * Utility Class | 
 | 135 |  */ | 
 | 136 | #define I2O_CMD_UTIL_NOP		0x00 | 
 | 137 | #define I2O_CMD_UTIL_ABORT		0x01 | 
 | 138 | #define I2O_CMD_UTIL_CLAIM		0x09 | 
 | 139 | #define I2O_CMD_UTIL_RELEASE		0x0B | 
 | 140 | #define I2O_CMD_UTIL_PARAMS_GET		0x06 | 
 | 141 | #define I2O_CMD_UTIL_PARAMS_SET		0x05 | 
 | 142 | #define I2O_CMD_UTIL_EVT_REGISTER	0x13 | 
 | 143 | #define I2O_CMD_UTIL_EVT_ACK		0x14 | 
 | 144 | #define I2O_CMD_UTIL_CONFIG_DIALOG	0x10 | 
 | 145 | #define I2O_CMD_UTIL_DEVICE_RESERVE	0x0D | 
 | 146 | #define I2O_CMD_UTIL_DEVICE_RELEASE	0x0F | 
 | 147 | #define I2O_CMD_UTIL_LOCK		0x17 | 
 | 148 | #define I2O_CMD_UTIL_LOCK_RELEASE	0x19 | 
 | 149 | #define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY	0x15 | 
 | 150 |  | 
 | 151 | /* | 
 | 152 |  * SCSI Host Bus Adapter Class | 
 | 153 |  */ | 
 | 154 | #define I2O_CMD_SCSI_EXEC		0x81 | 
 | 155 | #define I2O_CMD_SCSI_ABORT		0x83 | 
 | 156 | #define I2O_CMD_SCSI_BUSRESET		0x27 | 
 | 157 |  | 
 | 158 | /* | 
| Markus Lidel | f10378f | 2005-06-23 22:02:16 -0700 | [diff] [blame] | 159 |  * Bus Adapter Class | 
 | 160 |  */ | 
 | 161 | #define I2O_CMD_BUS_ADAPTER_RESET	0x85 | 
 | 162 | #define I2O_CMD_BUS_RESET		0x87 | 
 | 163 | #define I2O_CMD_BUS_SCAN		0x89 | 
 | 164 | #define I2O_CMD_BUS_QUIESCE		0x8b | 
 | 165 |  | 
 | 166 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 |  * Random Block Storage Class | 
 | 168 |  */ | 
 | 169 | #define I2O_CMD_BLOCK_READ		0x30 | 
 | 170 | #define I2O_CMD_BLOCK_WRITE		0x31 | 
 | 171 | #define I2O_CMD_BLOCK_CFLUSH		0x37 | 
 | 172 | #define I2O_CMD_BLOCK_MLOCK		0x49 | 
 | 173 | #define I2O_CMD_BLOCK_MUNLOCK		0x4B | 
 | 174 | #define I2O_CMD_BLOCK_MMOUNT		0x41 | 
 | 175 | #define I2O_CMD_BLOCK_MEJECT		0x43 | 
 | 176 | #define I2O_CMD_BLOCK_POWER		0x70 | 
 | 177 |  | 
| Markus Lidel | b2aaee3 | 2005-06-23 22:02:19 -0700 | [diff] [blame] | 178 | #define I2O_CMD_PRIVATE			0xFF | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
 | 180 | /* Command status values  */ | 
 | 181 |  | 
 | 182 | #define I2O_CMD_IN_PROGRESS	0x01 | 
 | 183 | #define I2O_CMD_REJECTED	0x02 | 
 | 184 | #define I2O_CMD_FAILED		0x03 | 
 | 185 | #define I2O_CMD_COMPLETED	0x04 | 
 | 186 |  | 
 | 187 | /* I2O API function return values */ | 
 | 188 |  | 
 | 189 | #define I2O_RTN_NO_ERROR			0 | 
 | 190 | #define I2O_RTN_NOT_INIT			1 | 
 | 191 | #define I2O_RTN_FREE_Q_EMPTY			2 | 
 | 192 | #define I2O_RTN_TCB_ERROR			3 | 
 | 193 | #define I2O_RTN_TRANSACTION_ERROR		4 | 
 | 194 | #define I2O_RTN_ADAPTER_ALREADY_INIT		5 | 
 | 195 | #define I2O_RTN_MALLOC_ERROR			6 | 
 | 196 | #define I2O_RTN_ADPTR_NOT_REGISTERED		7 | 
 | 197 | #define I2O_RTN_MSG_REPLY_TIMEOUT		8 | 
 | 198 | #define I2O_RTN_NO_STATUS			9 | 
 | 199 | #define I2O_RTN_NO_FIRM_VER			10 | 
 | 200 | #define	I2O_RTN_NO_LINK_SPEED			11 | 
 | 201 |  | 
 | 202 | /* Reply message status defines for all messages */ | 
 | 203 |  | 
 | 204 | #define I2O_REPLY_STATUS_SUCCESS                    	0x00 | 
 | 205 | #define I2O_REPLY_STATUS_ABORT_DIRTY                	0x01 | 
 | 206 | #define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER     	0x02 | 
 | 207 | #define	I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER		0x03 | 
 | 208 | #define	I2O_REPLY_STATUS_ERROR_DIRTY			0x04 | 
 | 209 | #define	I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER		0x05 | 
 | 210 | #define	I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER		0x06 | 
 | 211 | #define	I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY		0x08 | 
 | 212 | #define	I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER	0x09 | 
 | 213 | #define	I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER	0x0A | 
 | 214 | #define	I2O_REPLY_STATUS_TRANSACTION_ERROR		0x0B | 
 | 215 | #define	I2O_REPLY_STATUS_PROGRESS_REPORT		0x80 | 
 | 216 |  | 
 | 217 | /* Status codes and Error Information for Parameter functions */ | 
 | 218 |  | 
 | 219 | #define I2O_PARAMS_STATUS_SUCCESS		0x00 | 
 | 220 | #define I2O_PARAMS_STATUS_BAD_KEY_ABORT		0x01 | 
 | 221 | #define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE   	0x02 | 
 | 222 | #define I2O_PARAMS_STATUS_BUFFER_FULL		0x03 | 
 | 223 | #define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL	0x04 | 
 | 224 | #define I2O_PARAMS_STATUS_FIELD_UNREADABLE	0x05 | 
 | 225 | #define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE	0x06 | 
 | 226 | #define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS	0x07 | 
 | 227 | #define I2O_PARAMS_STATUS_INVALID_GROUP_ID	0x08 | 
 | 228 | #define I2O_PARAMS_STATUS_INVALID_OPERATION	0x09 | 
 | 229 | #define I2O_PARAMS_STATUS_NO_KEY_FIELD		0x0A | 
 | 230 | #define I2O_PARAMS_STATUS_NO_SUCH_FIELD		0x0B | 
 | 231 | #define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP	0x0C | 
 | 232 | #define I2O_PARAMS_STATUS_OPERATION_ERROR	0x0D | 
 | 233 | #define I2O_PARAMS_STATUS_SCALAR_ERROR		0x0E | 
 | 234 | #define I2O_PARAMS_STATUS_TABLE_ERROR		0x0F | 
 | 235 | #define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE	0x10 | 
 | 236 |  | 
 | 237 | /* DetailedStatusCode defines for Executive, DDM, Util and Transaction error | 
 | 238 |  * messages: Table 3-2 Detailed Status Codes.*/ | 
 | 239 |  | 
 | 240 | #define I2O_DSC_SUCCESS                        0x0000 | 
 | 241 | #define I2O_DSC_BAD_KEY                        0x0002 | 
 | 242 | #define I2O_DSC_TCL_ERROR                      0x0003 | 
 | 243 | #define I2O_DSC_REPLY_BUFFER_FULL              0x0004 | 
 | 244 | #define I2O_DSC_NO_SUCH_PAGE                   0x0005 | 
 | 245 | #define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT     0x0006 | 
 | 246 | #define I2O_DSC_INSUFFICIENT_RESOURCE_HARD     0x0007 | 
 | 247 | #define I2O_DSC_CHAIN_BUFFER_TOO_LARGE         0x0009 | 
 | 248 | #define I2O_DSC_UNSUPPORTED_FUNCTION           0x000A | 
 | 249 | #define I2O_DSC_DEVICE_LOCKED                  0x000B | 
 | 250 | #define I2O_DSC_DEVICE_RESET                   0x000C | 
 | 251 | #define I2O_DSC_INAPPROPRIATE_FUNCTION         0x000D | 
 | 252 | #define I2O_DSC_INVALID_INITIATOR_ADDRESS      0x000E | 
 | 253 | #define I2O_DSC_INVALID_MESSAGE_FLAGS          0x000F | 
 | 254 | #define I2O_DSC_INVALID_OFFSET                 0x0010 | 
 | 255 | #define I2O_DSC_INVALID_PARAMETER              0x0011 | 
 | 256 | #define I2O_DSC_INVALID_REQUEST                0x0012 | 
 | 257 | #define I2O_DSC_INVALID_TARGET_ADDRESS         0x0013 | 
 | 258 | #define I2O_DSC_MESSAGE_TOO_LARGE              0x0014 | 
 | 259 | #define I2O_DSC_MESSAGE_TOO_SMALL              0x0015 | 
 | 260 | #define I2O_DSC_MISSING_PARAMETER              0x0016 | 
 | 261 | #define I2O_DSC_TIMEOUT                        0x0017 | 
 | 262 | #define I2O_DSC_UNKNOWN_ERROR                  0x0018 | 
 | 263 | #define I2O_DSC_UNKNOWN_FUNCTION               0x0019 | 
 | 264 | #define I2O_DSC_UNSUPPORTED_VERSION            0x001A | 
 | 265 | #define I2O_DSC_DEVICE_BUSY                    0x001B | 
 | 266 | #define I2O_DSC_DEVICE_NOT_AVAILABLE           0x001C | 
 | 267 |  | 
 | 268 | /* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed | 
 | 269 |    Status Codes.*/ | 
 | 270 |  | 
 | 271 | #define I2O_BSA_DSC_SUCCESS               0x0000 | 
 | 272 | #define I2O_BSA_DSC_MEDIA_ERROR           0x0001 | 
 | 273 | #define I2O_BSA_DSC_ACCESS_ERROR          0x0002 | 
 | 274 | #define I2O_BSA_DSC_DEVICE_FAILURE        0x0003 | 
 | 275 | #define I2O_BSA_DSC_DEVICE_NOT_READY      0x0004 | 
 | 276 | #define I2O_BSA_DSC_MEDIA_NOT_PRESENT     0x0005 | 
 | 277 | #define I2O_BSA_DSC_MEDIA_LOCKED          0x0006 | 
 | 278 | #define I2O_BSA_DSC_MEDIA_FAILURE         0x0007 | 
 | 279 | #define I2O_BSA_DSC_PROTOCOL_FAILURE      0x0008 | 
 | 280 | #define I2O_BSA_DSC_BUS_FAILURE           0x0009 | 
 | 281 | #define I2O_BSA_DSC_ACCESS_VIOLATION      0x000A | 
 | 282 | #define I2O_BSA_DSC_WRITE_PROTECTED       0x000B | 
 | 283 | #define I2O_BSA_DSC_DEVICE_RESET          0x000C | 
 | 284 | #define I2O_BSA_DSC_VOLUME_CHANGED        0x000D | 
 | 285 | #define I2O_BSA_DSC_TIMEOUT               0x000E | 
 | 286 |  | 
 | 287 | /* FailureStatusCodes, Table 3-3 Message Failure Codes */ | 
 | 288 |  | 
 | 289 | #define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED             0x81 | 
 | 290 | #define I2O_FSC_TRANSPORT_SERVICE_TERMINATED            0x82 | 
 | 291 | #define I2O_FSC_TRANSPORT_CONGESTION                    0x83 | 
 | 292 | #define I2O_FSC_TRANSPORT_FAILURE                       0x84 | 
 | 293 | #define I2O_FSC_TRANSPORT_STATE_ERROR                   0x85 | 
 | 294 | #define I2O_FSC_TRANSPORT_TIME_OUT                      0x86 | 
 | 295 | #define I2O_FSC_TRANSPORT_ROUTING_FAILURE               0x87 | 
 | 296 | #define I2O_FSC_TRANSPORT_INVALID_VERSION               0x88 | 
 | 297 | #define I2O_FSC_TRANSPORT_INVALID_OFFSET                0x89 | 
 | 298 | #define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS             0x8A | 
 | 299 | #define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL               0x8B | 
 | 300 | #define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE               0x8C | 
 | 301 | #define I2O_FSC_TRANSPORT_INVALID_TARGET_ID             0x8D | 
 | 302 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID          0x8E | 
 | 303 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT     0x8F | 
 | 304 | #define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE               0xFF | 
 | 305 |  | 
 | 306 | /* Device Claim Types */ | 
 | 307 | #define	I2O_CLAIM_PRIMARY					0x01000000 | 
 | 308 | #define	I2O_CLAIM_MANAGEMENT					0x02000000 | 
 | 309 | #define	I2O_CLAIM_AUTHORIZED					0x03000000 | 
 | 310 | #define	I2O_CLAIM_SECONDARY					0x04000000 | 
 | 311 |  | 
 | 312 | /* Message header defines for VersionOffset */ | 
 | 313 | #define I2OVER15	0x0001 | 
 | 314 | #define I2OVER20	0x0002 | 
 | 315 |  | 
| Markus Lidel | f88e119 | 2005-06-23 22:02:14 -0700 | [diff] [blame] | 316 | /* Default is 1.5 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | #define I2OVERSION	I2OVER15 | 
 | 318 |  | 
 | 319 | #define SGL_OFFSET_0    I2OVERSION | 
 | 320 | #define SGL_OFFSET_4    (0x0040 | I2OVERSION) | 
 | 321 | #define SGL_OFFSET_5    (0x0050 | I2OVERSION) | 
 | 322 | #define SGL_OFFSET_6    (0x0060 | I2OVERSION) | 
 | 323 | #define SGL_OFFSET_7    (0x0070 | I2OVERSION) | 
 | 324 | #define SGL_OFFSET_8    (0x0080 | I2OVERSION) | 
 | 325 | #define SGL_OFFSET_9    (0x0090 | I2OVERSION) | 
 | 326 | #define SGL_OFFSET_10   (0x00A0 | I2OVERSION) | 
| Markus Lidel | b2aaee3 | 2005-06-23 22:02:19 -0700 | [diff] [blame] | 327 | #define SGL_OFFSET_11   (0x00B0 | I2OVERSION) | 
 | 328 | #define SGL_OFFSET_12   (0x00C0 | I2OVERSION) | 
 | 329 | #define SGL_OFFSET(x)   (((x)<<4) | I2OVERSION) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 |  | 
 | 331 | /* Transaction Reply Lists (TRL) Control Word structure */ | 
 | 332 | #define TRL_SINGLE_FIXED_LENGTH		0x00 | 
 | 333 | #define TRL_SINGLE_VARIABLE_LENGTH	0x40 | 
 | 334 | #define TRL_MULTIPLE_FIXED_LENGTH	0x80 | 
 | 335 |  | 
 | 336 |  /* msg header defines for MsgFlags */ | 
 | 337 | #define MSG_STATIC	0x0100 | 
 | 338 | #define MSG_64BIT_CNTXT	0x0200 | 
 | 339 | #define MSG_MULTI_TRANS	0x1000 | 
 | 340 | #define MSG_FAIL	0x2000 | 
 | 341 | #define MSG_FINAL	0x4000 | 
 | 342 | #define MSG_REPLY	0x8000 | 
 | 343 |  | 
 | 344 |  /* minimum size msg */ | 
 | 345 | #define THREE_WORD_MSG_SIZE	0x00030000 | 
 | 346 | #define FOUR_WORD_MSG_SIZE	0x00040000 | 
 | 347 | #define FIVE_WORD_MSG_SIZE	0x00050000 | 
 | 348 | #define SIX_WORD_MSG_SIZE	0x00060000 | 
 | 349 | #define SEVEN_WORD_MSG_SIZE	0x00070000 | 
 | 350 | #define EIGHT_WORD_MSG_SIZE	0x00080000 | 
 | 351 | #define NINE_WORD_MSG_SIZE	0x00090000 | 
 | 352 | #define TEN_WORD_MSG_SIZE	0x000A0000 | 
 | 353 | #define ELEVEN_WORD_MSG_SIZE	0x000B0000 | 
 | 354 | #define I2O_MESSAGE_SIZE(x)	((x)<<16) | 
 | 355 |  | 
| Markus Lidel | 9e87545 | 2005-06-23 22:02:21 -0700 | [diff] [blame] | 356 | /* special TID assignments */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | #define ADAPTER_TID		0 | 
 | 358 | #define HOST_TID		1 | 
 | 359 |  | 
| Markus Lidel | 9e87545 | 2005-06-23 22:02:21 -0700 | [diff] [blame] | 360 | /* outbound queue defines */ | 
 | 361 | #define I2O_MAX_OUTBOUND_MSG_FRAMES	128 | 
 | 362 | #define I2O_OUTBOUND_MSG_FRAME_SIZE	128	/* in 32-bit words */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 |  | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 364 | /* inbound queue definitions */ | 
 | 365 | #define I2O_MSG_INPOOL_MIN		32 | 
 | 366 | #define I2O_INBOUND_MSG_FRAME_SIZE	128	/* in 32-bit words */ | 
 | 367 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | #define I2O_POST_WAIT_OK	0 | 
 | 369 | #define I2O_POST_WAIT_TIMEOUT	-ETIMEDOUT | 
 | 370 |  | 
 | 371 | #define I2O_CONTEXT_LIST_MIN_LENGTH	15 | 
 | 372 | #define I2O_CONTEXT_LIST_USED		0x01 | 
 | 373 | #define I2O_CONTEXT_LIST_DELETED	0x02 | 
 | 374 |  | 
 | 375 | /* timeouts */ | 
 | 376 | #define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE	15 | 
 | 377 | #define I2O_TIMEOUT_MESSAGE_GET		5 | 
 | 378 | #define I2O_TIMEOUT_RESET		30 | 
 | 379 | #define I2O_TIMEOUT_STATUS_GET		5 | 
 | 380 | #define I2O_TIMEOUT_LCT_GET		360 | 
 | 381 | #define I2O_TIMEOUT_SCSI_SCB_ABORT	240 | 
 | 382 |  | 
 | 383 | /* retries */ | 
 | 384 | #define I2O_HRT_GET_TRIES		3 | 
 | 385 | #define I2O_LCT_GET_TRIES		3 | 
 | 386 |  | 
| Markus Lidel | b2aaee3 | 2005-06-23 22:02:19 -0700 | [diff] [blame] | 387 | /* defines for max_sectors and max_phys_segments */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | #define I2O_MAX_SECTORS			1024 | 
| Markus Lidel | dcceafe | 2006-01-06 00:19:32 -0800 | [diff] [blame] | 389 | #define I2O_MAX_SECTORS_LIMITED		128 | 
| Markus Lidel | f10378f | 2005-06-23 22:02:16 -0700 | [diff] [blame] | 390 | #define I2O_MAX_PHYS_SEGMENTS		MAX_PHYS_SEGMENTS | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 |  | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 392 | /* | 
 | 393 |  *	Message structures | 
 | 394 |  */ | 
 | 395 | struct i2o_message { | 
 | 396 | 	union { | 
 | 397 | 		struct { | 
 | 398 | 			u8 version_offset; | 
 | 399 | 			u8 flags; | 
 | 400 | 			u16 size; | 
 | 401 | 			u32 target_tid:12; | 
 | 402 | 			u32 init_tid:12; | 
 | 403 | 			u32 function:8; | 
 | 404 | 			u32 icntxt;	/* initiator context */ | 
 | 405 | 			u32 tcntxt;	/* transaction context */ | 
 | 406 | 		} s; | 
 | 407 | 		u32 head[4]; | 
 | 408 | 	} u; | 
 | 409 | 	/* List follows */ | 
 | 410 | 	u32 body[0]; | 
 | 411 | }; | 
 | 412 |  | 
 | 413 | /* MFA and I2O message used by mempool */ | 
 | 414 | struct i2o_msg_mfa { | 
 | 415 | 	u32 mfa;		/* MFA returned by the controller */ | 
 | 416 | 	struct i2o_message msg;	/* I2O message */ | 
 | 417 | }; | 
 | 418 |  | 
 | 419 | /* | 
 | 420 |  *	Each I2O device entity has one of these. There is one per device. | 
 | 421 |  */ | 
 | 422 | struct i2o_device { | 
 | 423 | 	i2o_lct_entry lct_data;	/* Device LCT information */ | 
 | 424 |  | 
 | 425 | 	struct i2o_controller *iop;	/* Controlling IOP */ | 
 | 426 | 	struct list_head list;	/* node in IOP devices list */ | 
 | 427 |  | 
 | 428 | 	struct device device; | 
 | 429 |  | 
| Matthias Kaehlcke | 9ac1625 | 2007-07-15 23:39:49 -0700 | [diff] [blame] | 430 | 	struct mutex lock;	/* device lock */ | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 431 | }; | 
 | 432 |  | 
 | 433 | /* | 
 | 434 |  *	Event structure provided to the event handling function | 
 | 435 |  */ | 
 | 436 | struct i2o_event { | 
 | 437 | 	struct work_struct work; | 
 | 438 | 	struct i2o_device *i2o_dev;	/* I2O device pointer from which the | 
 | 439 | 					   event reply was initiated */ | 
 | 440 | 	u16 size;		/* Size of data in 32-bit words */ | 
 | 441 | 	u32 tcntxt;		/* Transaction context used at | 
 | 442 | 				   registration */ | 
 | 443 | 	u32 event_indicator;	/* Event indicator from reply */ | 
 | 444 | 	u32 data[0];		/* Event data from reply */ | 
 | 445 | }; | 
 | 446 |  | 
 | 447 | /* | 
 | 448 |  *	I2O classes which could be handled by the OSM | 
 | 449 |  */ | 
 | 450 | struct i2o_class_id { | 
 | 451 | 	u16 class_id:12; | 
 | 452 | }; | 
 | 453 |  | 
 | 454 | /* | 
 | 455 |  *	I2O driver structure for OSMs | 
 | 456 |  */ | 
 | 457 | struct i2o_driver { | 
 | 458 | 	char *name;		/* OSM name */ | 
 | 459 | 	int context;		/* Low 8 bits of the transaction info */ | 
 | 460 | 	struct i2o_class_id *classes;	/* I2O classes that this OSM handles */ | 
 | 461 |  | 
 | 462 | 	/* Message reply handler */ | 
 | 463 | 	int (*reply) (struct i2o_controller *, u32, struct i2o_message *); | 
 | 464 |  | 
 | 465 | 	/* Event handler */ | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 466 | 	work_func_t event; | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 467 |  | 
 | 468 | 	struct workqueue_struct *event_queue;	/* Event queue */ | 
 | 469 |  | 
 | 470 | 	struct device_driver driver; | 
 | 471 |  | 
 | 472 | 	/* notification of changes */ | 
 | 473 | 	void (*notify_controller_add) (struct i2o_controller *); | 
 | 474 | 	void (*notify_controller_remove) (struct i2o_controller *); | 
 | 475 | 	void (*notify_device_add) (struct i2o_device *); | 
 | 476 | 	void (*notify_device_remove) (struct i2o_device *); | 
 | 477 |  | 
 | 478 | 	struct semaphore lock; | 
 | 479 | }; | 
 | 480 |  | 
 | 481 | /* | 
 | 482 |  *	Contains DMA mapped address information | 
 | 483 |  */ | 
 | 484 | struct i2o_dma { | 
 | 485 | 	void *virt; | 
 | 486 | 	dma_addr_t phys; | 
 | 487 | 	size_t len; | 
 | 488 | }; | 
 | 489 |  | 
 | 490 | /* | 
 | 491 |  *	Contains slab cache and mempool information | 
 | 492 |  */ | 
 | 493 | struct i2o_pool { | 
 | 494 | 	char *name; | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 495 | 	struct kmem_cache *slab; | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 496 | 	mempool_t *mempool; | 
 | 497 | }; | 
 | 498 |  | 
 | 499 | /* | 
 | 500 |  *	Contains IO mapped address information | 
 | 501 |  */ | 
 | 502 | struct i2o_io { | 
 | 503 | 	void __iomem *virt; | 
 | 504 | 	unsigned long phys; | 
 | 505 | 	unsigned long len; | 
 | 506 | }; | 
 | 507 |  | 
 | 508 | /* | 
 | 509 |  *	Context queue entry, used for 32-bit context on 64-bit systems | 
 | 510 |  */ | 
 | 511 | struct i2o_context_list_element { | 
 | 512 | 	struct list_head list; | 
 | 513 | 	u32 context; | 
 | 514 | 	void *ptr; | 
 | 515 | 	unsigned long timestamp; | 
 | 516 | }; | 
 | 517 |  | 
 | 518 | /* | 
 | 519 |  * Each I2O controller has one of these objects | 
 | 520 |  */ | 
 | 521 | struct i2o_controller { | 
 | 522 | 	char name[16]; | 
 | 523 | 	int unit; | 
 | 524 | 	int type; | 
 | 525 |  | 
 | 526 | 	struct pci_dev *pdev;	/* PCI device */ | 
 | 527 |  | 
 | 528 | 	unsigned int promise:1;	/* Promise controller */ | 
 | 529 | 	unsigned int adaptec:1;	/* DPT / Adaptec controller */ | 
 | 530 | 	unsigned int raptor:1;	/* split bar */ | 
 | 531 | 	unsigned int no_quiesce:1;	/* dont quiesce before reset */ | 
 | 532 | 	unsigned int short_req:1;	/* use small block sizes */ | 
 | 533 | 	unsigned int limit_sectors:1;	/* limit number of sectors / request */ | 
 | 534 | 	unsigned int pae_support:1;	/* controller has 64-bit SGL support */ | 
 | 535 |  | 
 | 536 | 	struct list_head devices;	/* list of I2O devices */ | 
 | 537 | 	struct list_head list;	/* Controller list */ | 
 | 538 |  | 
 | 539 | 	void __iomem *in_port;	/* Inbout port address */ | 
 | 540 | 	void __iomem *out_port;	/* Outbound port address */ | 
 | 541 | 	void __iomem *irq_status;	/* Interrupt status register address */ | 
 | 542 | 	void __iomem *irq_mask;	/* Interrupt mask register address */ | 
 | 543 |  | 
 | 544 | 	struct i2o_dma status;	/* IOP status block */ | 
 | 545 |  | 
 | 546 | 	struct i2o_dma hrt;	/* HW Resource Table */ | 
 | 547 | 	i2o_lct *lct;		/* Logical Config Table */ | 
 | 548 | 	struct i2o_dma dlct;	/* Temp LCT */ | 
| Matthias Kaehlcke | 9ac1625 | 2007-07-15 23:39:49 -0700 | [diff] [blame] | 549 | 	struct mutex lct_lock;	/* Lock for LCT updates */ | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 550 | 	struct i2o_dma status_block;	/* IOP status block */ | 
 | 551 |  | 
 | 552 | 	struct i2o_io base;	/* controller messaging unit */ | 
 | 553 | 	struct i2o_io in_queue;	/* inbound message queue Host->IOP */ | 
 | 554 | 	struct i2o_dma out_queue;	/* outbound message queue IOP->Host */ | 
 | 555 |  | 
 | 556 | 	struct i2o_pool in_msg;	/* mempool for inbound messages */ | 
 | 557 |  | 
 | 558 | 	unsigned int battery:1;	/* Has a battery backup */ | 
 | 559 | 	unsigned int io_alloc:1;	/* An I/O resource was allocated */ | 
 | 560 | 	unsigned int mem_alloc:1;	/* A memory resource was allocated */ | 
 | 561 |  | 
 | 562 | 	struct resource io_resource;	/* I/O resource allocated to the IOP */ | 
 | 563 | 	struct resource mem_resource;	/* Mem resource allocated to the IOP */ | 
 | 564 |  | 
 | 565 | 	struct device device; | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 566 | 	struct i2o_device *exec;	/* Executive */ | 
 | 567 | #if BITS_PER_LONG == 64 | 
 | 568 | 	spinlock_t context_list_lock;	/* lock for context_list */ | 
 | 569 | 	atomic_t context_list_counter;	/* needed for unique contexts */ | 
 | 570 | 	struct list_head context_list;	/* list of context id's | 
 | 571 | 					   and pointers */ | 
 | 572 | #endif | 
 | 573 | 	spinlock_t lock;	/* lock for controller | 
 | 574 | 				   configuration */ | 
 | 575 |  | 
 | 576 | 	void *driver_data[I2O_MAX_DRIVERS];	/* storage for drivers */ | 
 | 577 | }; | 
 | 578 |  | 
 | 579 | /* | 
 | 580 |  * I2O System table entry | 
 | 581 |  * | 
 | 582 |  * The system table contains information about all the IOPs in the | 
 | 583 |  * system.  It is sent to all IOPs so that they can create peer2peer | 
 | 584 |  * connections between them. | 
 | 585 |  */ | 
 | 586 | struct i2o_sys_tbl_entry { | 
 | 587 | 	u16 org_id; | 
 | 588 | 	u16 reserved1; | 
 | 589 | 	u32 iop_id:12; | 
 | 590 | 	u32 reserved2:20; | 
 | 591 | 	u16 seg_num:12; | 
 | 592 | 	u16 i2o_version:4; | 
 | 593 | 	u8 iop_state; | 
 | 594 | 	u8 msg_type; | 
 | 595 | 	u16 frame_size; | 
 | 596 | 	u16 reserved3; | 
 | 597 | 	u32 last_changed; | 
 | 598 | 	u32 iop_capabilities; | 
 | 599 | 	u32 inbound_low; | 
 | 600 | 	u32 inbound_high; | 
 | 601 | }; | 
 | 602 |  | 
 | 603 | struct i2o_sys_tbl { | 
 | 604 | 	u8 num_entries; | 
 | 605 | 	u8 version; | 
 | 606 | 	u16 reserved1; | 
 | 607 | 	u32 change_ind; | 
 | 608 | 	u32 reserved2; | 
 | 609 | 	u32 reserved3; | 
 | 610 | 	struct i2o_sys_tbl_entry iops[0]; | 
 | 611 | }; | 
 | 612 |  | 
 | 613 | extern struct list_head i2o_controllers; | 
 | 614 |  | 
 | 615 | /* Message functions */ | 
 | 616 | static inline struct i2o_message *i2o_msg_get(struct i2o_controller *); | 
 | 617 | extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int); | 
 | 618 | static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *); | 
 | 619 | static inline int i2o_msg_post_wait(struct i2o_controller *, | 
 | 620 | 				    struct i2o_message *, unsigned long); | 
 | 621 | extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *, | 
 | 622 | 				 unsigned long, struct i2o_dma *); | 
 | 623 | static inline void i2o_flush_reply(struct i2o_controller *, u32); | 
 | 624 |  | 
 | 625 | /* IOP functions */ | 
 | 626 | extern int i2o_status_get(struct i2o_controller *); | 
 | 627 |  | 
 | 628 | extern int i2o_event_register(struct i2o_device *, struct i2o_driver *, int, | 
 | 629 | 			      u32); | 
 | 630 | extern struct i2o_device *i2o_iop_find_device(struct i2o_controller *, u16); | 
 | 631 | extern struct i2o_controller *i2o_find_iop(int); | 
 | 632 |  | 
 | 633 | /* Functions needed for handling 64-bit pointers in 32-bit context */ | 
 | 634 | #if BITS_PER_LONG == 64 | 
 | 635 | extern u32 i2o_cntxt_list_add(struct i2o_controller *, void *); | 
 | 636 | extern void *i2o_cntxt_list_get(struct i2o_controller *, u32); | 
 | 637 | extern u32 i2o_cntxt_list_remove(struct i2o_controller *, void *); | 
 | 638 | extern u32 i2o_cntxt_list_get_ptr(struct i2o_controller *, void *); | 
 | 639 |  | 
 | 640 | static inline u32 i2o_ptr_low(void *ptr) | 
 | 641 | { | 
 | 642 | 	return (u32) (u64) ptr; | 
 | 643 | }; | 
 | 644 |  | 
 | 645 | static inline u32 i2o_ptr_high(void *ptr) | 
 | 646 | { | 
 | 647 | 	return (u32) ((u64) ptr >> 32); | 
 | 648 | }; | 
 | 649 |  | 
 | 650 | static inline u32 i2o_dma_low(dma_addr_t dma_addr) | 
 | 651 | { | 
 | 652 | 	return (u32) (u64) dma_addr; | 
 | 653 | }; | 
 | 654 |  | 
 | 655 | static inline u32 i2o_dma_high(dma_addr_t dma_addr) | 
 | 656 | { | 
 | 657 | 	return (u32) ((u64) dma_addr >> 32); | 
 | 658 | }; | 
 | 659 | #else | 
 | 660 | static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) | 
 | 661 | { | 
 | 662 | 	return (u32) ptr; | 
 | 663 | }; | 
 | 664 |  | 
 | 665 | static inline void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) | 
 | 666 | { | 
 | 667 | 	return (void *)context; | 
 | 668 | }; | 
 | 669 |  | 
 | 670 | static inline u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr) | 
 | 671 | { | 
 | 672 | 	return (u32) ptr; | 
 | 673 | }; | 
 | 674 |  | 
 | 675 | static inline u32 i2o_cntxt_list_get_ptr(struct i2o_controller *c, void *ptr) | 
 | 676 | { | 
 | 677 | 	return (u32) ptr; | 
 | 678 | }; | 
 | 679 |  | 
 | 680 | static inline u32 i2o_ptr_low(void *ptr) | 
 | 681 | { | 
 | 682 | 	return (u32) ptr; | 
 | 683 | }; | 
 | 684 |  | 
 | 685 | static inline u32 i2o_ptr_high(void *ptr) | 
 | 686 | { | 
 | 687 | 	return 0; | 
 | 688 | }; | 
 | 689 |  | 
 | 690 | static inline u32 i2o_dma_low(dma_addr_t dma_addr) | 
 | 691 | { | 
 | 692 | 	return (u32) dma_addr; | 
 | 693 | }; | 
 | 694 |  | 
 | 695 | static inline u32 i2o_dma_high(dma_addr_t dma_addr) | 
 | 696 | { | 
 | 697 | 	return 0; | 
 | 698 | }; | 
 | 699 | #endif | 
 | 700 |  | 
 | 701 | /** | 
 | 702 |  *	i2o_sg_tablesize - Calculate the maximum number of elements in a SGL | 
 | 703 |  *	@c: I2O controller for which the calculation should be done | 
 | 704 |  *	@body_size: maximum body size used for message in 32-bit words. | 
 | 705 |  * | 
 | 706 |  *	Return the maximum number of SG elements in a SG list. | 
 | 707 |  */ | 
 | 708 | static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) | 
 | 709 | { | 
 | 710 | 	i2o_status_block *sb = c->status_block.virt; | 
 | 711 | 	u16 sg_count = | 
 | 712 | 	    (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - | 
 | 713 | 	    body_size; | 
 | 714 |  | 
 | 715 | 	if (c->pae_support) { | 
 | 716 | 		/* | 
 | 717 | 		 * for 64-bit a SG attribute element must be added and each | 
 | 718 | 		 * SG element needs 12 bytes instead of 8. | 
 | 719 | 		 */ | 
 | 720 | 		sg_count -= 2; | 
 | 721 | 		sg_count /= 3; | 
 | 722 | 	} else | 
 | 723 | 		sg_count /= 2; | 
 | 724 |  | 
 | 725 | 	if (c->short_req && (sg_count > 8)) | 
 | 726 | 		sg_count = 8; | 
 | 727 |  | 
 | 728 | 	return sg_count; | 
 | 729 | }; | 
 | 730 |  | 
 | 731 | /** | 
 | 732 |  *	i2o_dma_map_single - Map pointer to controller and fill in I2O message. | 
 | 733 |  *	@c: I2O controller | 
 | 734 |  *	@ptr: pointer to the data which should be mapped | 
 | 735 |  *	@size: size of data in bytes | 
 | 736 |  *	@direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | 
 | 737 |  *	@sg_ptr: pointer to the SG list inside the I2O message | 
 | 738 |  * | 
 | 739 |  *	This function does all necessary DMA handling and also writes the I2O | 
 | 740 |  *	SGL elements into the I2O message. For details on DMA handling see also | 
 | 741 |  *	dma_map_single(). The pointer sg_ptr will only be set to the end of the | 
 | 742 |  *	SG list if the allocation was successful. | 
 | 743 |  * | 
 | 744 |  *	Returns DMA address which must be checked for failures using | 
 | 745 |  *	dma_mapping_error(). | 
 | 746 |  */ | 
 | 747 | static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | 
 | 748 | 					    size_t size, | 
 | 749 | 					    enum dma_data_direction direction, | 
 | 750 | 					    u32 ** sg_ptr) | 
 | 751 | { | 
 | 752 | 	u32 sg_flags; | 
 | 753 | 	u32 *mptr = *sg_ptr; | 
 | 754 | 	dma_addr_t dma_addr; | 
 | 755 |  | 
 | 756 | 	switch (direction) { | 
 | 757 | 	case DMA_TO_DEVICE: | 
 | 758 | 		sg_flags = 0xd4000000; | 
 | 759 | 		break; | 
 | 760 | 	case DMA_FROM_DEVICE: | 
 | 761 | 		sg_flags = 0xd0000000; | 
 | 762 | 		break; | 
 | 763 | 	default: | 
 | 764 | 		return 0; | 
 | 765 | 	} | 
 | 766 |  | 
 | 767 | 	dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | 
 | 768 | 	if (!dma_mapping_error(dma_addr)) { | 
 | 769 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 
 | 770 | 		if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | 
 | 771 | 			*mptr++ = cpu_to_le32(0x7C020002); | 
 | 772 | 			*mptr++ = cpu_to_le32(PAGE_SIZE); | 
 | 773 | 		} | 
 | 774 | #endif | 
 | 775 |  | 
 | 776 | 		*mptr++ = cpu_to_le32(sg_flags | size); | 
 | 777 | 		*mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); | 
 | 778 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 
 | 779 | 		if ((sizeof(dma_addr_t) > 4) && c->pae_support) | 
 | 780 | 			*mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); | 
 | 781 | #endif | 
 | 782 | 		*sg_ptr = mptr; | 
 | 783 | 	} | 
 | 784 | 	return dma_addr; | 
 | 785 | }; | 
 | 786 |  | 
 | 787 | /** | 
 | 788 |  *	i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. | 
 | 789 |  *	@c: I2O controller | 
 | 790 |  *	@sg: SG list to be mapped | 
 | 791 |  *	@sg_count: number of elements in the SG list | 
 | 792 |  *	@direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | 
 | 793 |  *	@sg_ptr: pointer to the SG list inside the I2O message | 
 | 794 |  * | 
 | 795 |  *	This function does all necessary DMA handling and also writes the I2O | 
 | 796 |  *	SGL elements into the I2O message. For details on DMA handling see also | 
 | 797 |  *	dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG | 
 | 798 |  *	list if the allocation was successful. | 
 | 799 |  * | 
 | 800 |  *	Returns 0 on failure or 1 on success. | 
 | 801 |  */ | 
 | 802 | static inline int i2o_dma_map_sg(struct i2o_controller *c, | 
 | 803 | 				 struct scatterlist *sg, int sg_count, | 
 | 804 | 				 enum dma_data_direction direction, | 
 | 805 | 				 u32 ** sg_ptr) | 
 | 806 | { | 
 | 807 | 	u32 sg_flags; | 
 | 808 | 	u32 *mptr = *sg_ptr; | 
 | 809 |  | 
 | 810 | 	switch (direction) { | 
 | 811 | 	case DMA_TO_DEVICE: | 
 | 812 | 		sg_flags = 0x14000000; | 
 | 813 | 		break; | 
 | 814 | 	case DMA_FROM_DEVICE: | 
 | 815 | 		sg_flags = 0x10000000; | 
 | 816 | 		break; | 
 | 817 | 	default: | 
 | 818 | 		return 0; | 
 | 819 | 	} | 
 | 820 |  | 
 | 821 | 	sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); | 
 | 822 | 	if (!sg_count) | 
 | 823 | 		return 0; | 
 | 824 |  | 
 | 825 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 
 | 826 | 	if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | 
 | 827 | 		*mptr++ = cpu_to_le32(0x7C020002); | 
 | 828 | 		*mptr++ = cpu_to_le32(PAGE_SIZE); | 
 | 829 | 	} | 
 | 830 | #endif | 
 | 831 |  | 
 | 832 | 	while (sg_count-- > 0) { | 
 | 833 | 		if (!sg_count) | 
 | 834 | 			sg_flags |= 0xC0000000; | 
 | 835 | 		*mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); | 
 | 836 | 		*mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); | 
 | 837 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 
 | 838 | 		if ((sizeof(dma_addr_t) > 4) && c->pae_support) | 
 | 839 | 			*mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); | 
 | 840 | #endif | 
| Jens Axboe | ba2da2f | 2007-07-24 14:42:11 +0200 | [diff] [blame] | 841 | 		sg = sg_next(sg); | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 842 | 	} | 
 | 843 | 	*sg_ptr = mptr; | 
 | 844 |  | 
 | 845 | 	return 1; | 
 | 846 | }; | 
 | 847 |  | 
 | 848 | /** | 
 | 849 |  *	i2o_dma_alloc - Allocate DMA memory | 
 | 850 |  *	@dev: struct device pointer to the PCI device of the I2O controller | 
 | 851 |  *	@addr: i2o_dma struct which should get the DMA buffer | 
 | 852 |  *	@len: length of the new DMA memory | 
 | 853 |  *	@gfp_mask: GFP mask | 
 | 854 |  * | 
 | 855 |  *	Allocate a coherent DMA memory and write the pointers into addr. | 
 | 856 |  * | 
 | 857 |  *	Returns 0 on success or -ENOMEM on failure. | 
 | 858 |  */ | 
 | 859 | static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, | 
 | 860 | 				size_t len, gfp_t gfp_mask) | 
 | 861 | { | 
 | 862 | 	struct pci_dev *pdev = to_pci_dev(dev); | 
 | 863 | 	int dma_64 = 0; | 
 | 864 |  | 
 | 865 | 	if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { | 
 | 866 | 		dma_64 = 1; | 
 | 867 | 		if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) | 
 | 868 | 			return -ENOMEM; | 
 | 869 | 	} | 
 | 870 |  | 
 | 871 | 	addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); | 
 | 872 |  | 
 | 873 | 	if ((sizeof(dma_addr_t) > 4) && dma_64) | 
 | 874 | 		if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | 
 | 875 | 			printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); | 
 | 876 |  | 
 | 877 | 	if (!addr->virt) | 
 | 878 | 		return -ENOMEM; | 
 | 879 |  | 
 | 880 | 	memset(addr->virt, 0, len); | 
 | 881 | 	addr->len = len; | 
 | 882 |  | 
 | 883 | 	return 0; | 
 | 884 | }; | 
 | 885 |  | 
 | 886 | /** | 
 | 887 |  *	i2o_dma_free - Free DMA memory | 
 | 888 |  *	@dev: struct device pointer to the PCI device of the I2O controller | 
 | 889 |  *	@addr: i2o_dma struct which contains the DMA buffer | 
 | 890 |  * | 
 | 891 |  *	Free a coherent DMA memory and set virtual address of addr to NULL. | 
 | 892 |  */ | 
 | 893 | static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) | 
 | 894 | { | 
 | 895 | 	if (addr->virt) { | 
 | 896 | 		if (addr->phys) | 
 | 897 | 			dma_free_coherent(dev, addr->len, addr->virt, | 
 | 898 | 					  addr->phys); | 
 | 899 | 		else | 
 | 900 | 			kfree(addr->virt); | 
 | 901 | 		addr->virt = NULL; | 
 | 902 | 	} | 
 | 903 | }; | 
 | 904 |  | 
 | 905 | /** | 
 | 906 |  *	i2o_dma_realloc - Realloc DMA memory | 
 | 907 |  *	@dev: struct device pointer to the PCI device of the I2O controller | 
 | 908 |  *	@addr: pointer to a i2o_dma struct DMA buffer | 
 | 909 |  *	@len: new length of memory | 
 | 910 |  *	@gfp_mask: GFP mask | 
 | 911 |  * | 
 | 912 |  *	If there was something allocated in the addr, free it first. If len > 0 | 
 | 913 |  *	than try to allocate it and write the addresses back to the addr | 
 | 914 |  *	structure. If len == 0 set the virtual address to NULL. | 
 | 915 |  * | 
 | 916 |  *	Returns the 0 on success or negative error code on failure. | 
 | 917 |  */ | 
 | 918 | static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, | 
 | 919 | 				  size_t len, gfp_t gfp_mask) | 
 | 920 | { | 
 | 921 | 	i2o_dma_free(dev, addr); | 
 | 922 |  | 
 | 923 | 	if (len) | 
 | 924 | 		return i2o_dma_alloc(dev, addr, len, gfp_mask); | 
 | 925 |  | 
 | 926 | 	return 0; | 
 | 927 | }; | 
 | 928 |  | 
 | 929 | /* | 
 | 930 |  *	i2o_pool_alloc - Allocate an slab cache and mempool | 
 | 931 |  *	@mempool: pointer to struct i2o_pool to write data into. | 
 | 932 |  *	@name: name which is used to identify cache | 
 | 933 |  *	@size: size of each object | 
 | 934 |  *	@min_nr: minimum number of objects | 
 | 935 |  * | 
 | 936 |  *	First allocates a slab cache with name and size. Then allocates a | 
 | 937 |  *	mempool which uses the slab cache for allocation and freeing. | 
 | 938 |  * | 
 | 939 |  *	Returns 0 on success or negative error code on failure. | 
 | 940 |  */ | 
 | 941 | static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | 
 | 942 | 				 size_t size, int min_nr) | 
 | 943 | { | 
 | 944 | 	pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | 
 | 945 | 	if (!pool->name) | 
 | 946 | 		goto exit; | 
 | 947 | 	strcpy(pool->name, name); | 
 | 948 |  | 
 | 949 | 	pool->slab = | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 950 | 	    kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 951 | 	if (!pool->slab) | 
 | 952 | 		goto free_name; | 
 | 953 |  | 
| Matthew Dobson | 93d2341 | 2006-03-26 01:37:50 -0800 | [diff] [blame] | 954 | 	pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 955 | 	if (!pool->mempool) | 
 | 956 | 		goto free_slab; | 
 | 957 |  | 
 | 958 | 	return 0; | 
 | 959 |  | 
 | 960 |       free_slab: | 
 | 961 | 	kmem_cache_destroy(pool->slab); | 
 | 962 |  | 
 | 963 |       free_name: | 
 | 964 | 	kfree(pool->name); | 
 | 965 |  | 
 | 966 |       exit: | 
 | 967 | 	return -ENOMEM; | 
 | 968 | }; | 
 | 969 |  | 
 | 970 | /* | 
 | 971 |  *	i2o_pool_free - Free slab cache and mempool again | 
 | 972 |  *	@mempool: pointer to struct i2o_pool which should be freed | 
 | 973 |  * | 
 | 974 |  *	Note that you have to return all objects to the mempool again before | 
 | 975 |  *	calling i2o_pool_free(). | 
 | 976 |  */ | 
 | 977 | static inline void i2o_pool_free(struct i2o_pool *pool) | 
 | 978 | { | 
 | 979 | 	mempool_destroy(pool->mempool); | 
 | 980 | 	kmem_cache_destroy(pool->slab); | 
 | 981 | 	kfree(pool->name); | 
 | 982 | }; | 
 | 983 |  | 
 | 984 | /* I2O driver (OSM) functions */ | 
 | 985 | extern int i2o_driver_register(struct i2o_driver *); | 
 | 986 | extern void i2o_driver_unregister(struct i2o_driver *); | 
 | 987 |  | 
 | 988 | /** | 
 | 989 |  *	i2o_driver_notify_controller_add - Send notification of added controller | 
| Randy Dunlap | d9489fb | 2006-12-06 20:38:43 -0800 | [diff] [blame] | 990 |  *	@drv: I2O driver | 
 | 991 |  *	@c: I2O controller | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 992 |  * | 
 | 993 |  *	Send notification of added controller to a single registered driver. | 
 | 994 |  */ | 
 | 995 | static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv, | 
 | 996 | 						    struct i2o_controller *c) | 
 | 997 | { | 
 | 998 | 	if (drv->notify_controller_add) | 
 | 999 | 		drv->notify_controller_add(c); | 
 | 1000 | }; | 
 | 1001 |  | 
 | 1002 | /** | 
| Randy Dunlap | d9489fb | 2006-12-06 20:38:43 -0800 | [diff] [blame] | 1003 |  *	i2o_driver_notify_controller_remove - Send notification of removed controller | 
 | 1004 |  *	@drv: I2O driver | 
 | 1005 |  *	@c: I2O controller | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 1006 |  * | 
 | 1007 |  *	Send notification of removed controller to a single registered driver. | 
 | 1008 |  */ | 
 | 1009 | static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv, | 
 | 1010 | 						       struct i2o_controller *c) | 
 | 1011 | { | 
 | 1012 | 	if (drv->notify_controller_remove) | 
 | 1013 | 		drv->notify_controller_remove(c); | 
 | 1014 | }; | 
 | 1015 |  | 
 | 1016 | /** | 
| Randy Dunlap | d9489fb | 2006-12-06 20:38:43 -0800 | [diff] [blame] | 1017 |  *	i2o_driver_notify_device_add - Send notification of added device | 
 | 1018 |  *	@drv: I2O driver | 
 | 1019 |  *	@i2o_dev: the added i2o_device | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 1020 |  * | 
 | 1021 |  *	Send notification of added device to a single registered driver. | 
 | 1022 |  */ | 
 | 1023 | static inline void i2o_driver_notify_device_add(struct i2o_driver *drv, | 
 | 1024 | 						struct i2o_device *i2o_dev) | 
 | 1025 | { | 
 | 1026 | 	if (drv->notify_device_add) | 
 | 1027 | 		drv->notify_device_add(i2o_dev); | 
 | 1028 | }; | 
 | 1029 |  | 
 | 1030 | /** | 
 | 1031 |  *	i2o_driver_notify_device_remove - Send notification of removed device | 
| Randy Dunlap | d9489fb | 2006-12-06 20:38:43 -0800 | [diff] [blame] | 1032 |  *	@drv: I2O driver | 
 | 1033 |  *	@i2o_dev: the added i2o_device | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 1034 |  * | 
 | 1035 |  *	Send notification of removed device to a single registered driver. | 
 | 1036 |  */ | 
 | 1037 | static inline void i2o_driver_notify_device_remove(struct i2o_driver *drv, | 
 | 1038 | 						   struct i2o_device *i2o_dev) | 
 | 1039 | { | 
 | 1040 | 	if (drv->notify_device_remove) | 
 | 1041 | 		drv->notify_device_remove(i2o_dev); | 
 | 1042 | }; | 
 | 1043 |  | 
 | 1044 | extern void i2o_driver_notify_controller_add_all(struct i2o_controller *); | 
 | 1045 | extern void i2o_driver_notify_controller_remove_all(struct i2o_controller *); | 
 | 1046 | extern void i2o_driver_notify_device_add_all(struct i2o_device *); | 
 | 1047 | extern void i2o_driver_notify_device_remove_all(struct i2o_device *); | 
 | 1048 |  | 
 | 1049 | /* I2O device functions */ | 
 | 1050 | extern int i2o_device_claim(struct i2o_device *); | 
 | 1051 | extern int i2o_device_claim_release(struct i2o_device *); | 
 | 1052 |  | 
 | 1053 | /* Exec OSM functions */ | 
 | 1054 | extern int i2o_exec_lct_get(struct i2o_controller *); | 
 | 1055 |  | 
 | 1056 | /* device / driver / kobject conversion functions */ | 
 | 1057 | #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) | 
 | 1058 | #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) | 
 | 1059 | #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) | 
 | 1060 | #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) | 
 | 1061 |  | 
 | 1062 | /** | 
 | 1063 |  *	i2o_out_to_virt - Turn an I2O message to a virtual address | 
 | 1064 |  *	@c: controller | 
 | 1065 |  *	@m: message engine value | 
 | 1066 |  * | 
 | 1067 |  *	Turn a receive message from an I2O controller bus address into | 
 | 1068 |  *	a Linux virtual address. The shared page frame is a linear block | 
 | 1069 |  *	so we simply have to shift the offset. This function does not | 
 | 1070 |  *	work for sender side messages as they are ioremap objects | 
 | 1071 |  *	provided by the I2O controller. | 
 | 1072 |  */ | 
 | 1073 | static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, | 
 | 1074 | 						      u32 m) | 
 | 1075 | { | 
 | 1076 | 	BUG_ON(m < c->out_queue.phys | 
 | 1077 | 	       || m >= c->out_queue.phys + c->out_queue.len); | 
 | 1078 |  | 
 | 1079 | 	return c->out_queue.virt + (m - c->out_queue.phys); | 
 | 1080 | }; | 
 | 1081 |  | 
 | 1082 | /** | 
 | 1083 |  *	i2o_msg_in_to_virt - Turn an I2O message to a virtual address | 
 | 1084 |  *	@c: controller | 
 | 1085 |  *	@m: message engine value | 
 | 1086 |  * | 
 | 1087 |  *	Turn a send message from an I2O controller bus address into | 
 | 1088 |  *	a Linux virtual address. The shared page frame is a linear block | 
 | 1089 |  *	so we simply have to shift the offset. This function does not | 
 | 1090 |  *	work for receive side messages as they are kmalloc objects | 
 | 1091 |  *	in a different pool. | 
 | 1092 |  */ | 
 | 1093 | static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct | 
 | 1094 | 							     i2o_controller *c, | 
 | 1095 | 							     u32 m) | 
 | 1096 | { | 
 | 1097 | 	return c->in_queue.virt + m; | 
 | 1098 | }; | 
 | 1099 |  | 
 | 1100 | /** | 
 | 1101 |  *	i2o_msg_get - obtain an I2O message from the IOP | 
 | 1102 |  *	@c: I2O controller | 
 | 1103 |  * | 
 | 1104 |  *	This function tries to get a message frame. If no message frame is | 
 | 1105 |  *	available do not wait until one is availabe (see also i2o_msg_get_wait). | 
 | 1106 |  *	The returned pointer to the message frame is not in I/O memory, it is | 
 | 1107 |  *	allocated from a mempool. But because a MFA is allocated from the | 
 | 1108 |  *	controller too it is guaranteed that i2o_msg_post() will never fail. | 
 | 1109 |  * | 
 | 1110 |  *	On a success a pointer to the message frame is returned. If the message | 
 | 1111 |  *	queue is empty -EBUSY is returned and if no memory is available -ENOMEM | 
 | 1112 |  *	is returned. | 
 | 1113 |  */ | 
 | 1114 | static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c) | 
 | 1115 | { | 
 | 1116 | 	struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC); | 
 | 1117 | 	if (!mmsg) | 
 | 1118 | 		return ERR_PTR(-ENOMEM); | 
 | 1119 |  | 
 | 1120 | 	mmsg->mfa = readl(c->in_port); | 
| Markus Lidel | 8b3e09e | 2006-02-03 03:04:29 -0800 | [diff] [blame] | 1121 | 	if (unlikely(mmsg->mfa >= c->in_queue.len)) { | 
| Markus Lidel | 57a62fe | 2006-06-10 09:54:14 -0700 | [diff] [blame] | 1122 | 		u32 mfa = mmsg->mfa; | 
 | 1123 |  | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 1124 | 		mempool_free(mmsg, c->in_msg.mempool); | 
| Markus Lidel | 57a62fe | 2006-06-10 09:54:14 -0700 | [diff] [blame] | 1125 |  | 
 | 1126 | 		if (mfa == I2O_QUEUE_EMPTY) | 
| Markus Lidel | 8b3e09e | 2006-02-03 03:04:29 -0800 | [diff] [blame] | 1127 | 			return ERR_PTR(-EBUSY); | 
 | 1128 | 		return ERR_PTR(-EFAULT); | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 1129 | 	} | 
 | 1130 |  | 
 | 1131 | 	return &mmsg->msg; | 
 | 1132 | }; | 
 | 1133 |  | 
 | 1134 | /** | 
 | 1135 |  *	i2o_msg_post - Post I2O message to I2O controller | 
 | 1136 |  *	@c: I2O controller to which the message should be send | 
 | 1137 |  *	@msg: message returned by i2o_msg_get() | 
 | 1138 |  * | 
 | 1139 |  *	Post the message to the I2O controller and return immediately. | 
 | 1140 |  */ | 
 | 1141 | static inline void i2o_msg_post(struct i2o_controller *c, | 
 | 1142 | 				struct i2o_message *msg) | 
 | 1143 | { | 
 | 1144 | 	struct i2o_msg_mfa *mmsg; | 
 | 1145 |  | 
 | 1146 | 	mmsg = container_of(msg, struct i2o_msg_mfa, msg); | 
 | 1147 | 	memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg, | 
 | 1148 | 		    (le32_to_cpu(msg->u.head[0]) >> 16) << 2); | 
 | 1149 | 	writel(mmsg->mfa, c->in_port); | 
 | 1150 | 	mempool_free(mmsg, c->in_msg.mempool); | 
 | 1151 | }; | 
 | 1152 |  | 
 | 1153 | /** | 
 | 1154 |  * 	i2o_msg_post_wait - Post and wait a message and wait until return | 
 | 1155 |  *	@c: controller | 
| Randy Dunlap | d9489fb | 2006-12-06 20:38:43 -0800 | [diff] [blame] | 1156 |  *	@msg: message to post | 
| Markus Lidel | a1a5ea7 | 2006-01-06 00:19:29 -0800 | [diff] [blame] | 1157 |  *	@timeout: time in seconds to wait | 
 | 1158 |  * | 
 | 1159 |  * 	This API allows an OSM to post a message and then be told whether or | 
 | 1160 |  *	not the system received a successful reply. If the message times out | 
 | 1161 |  *	then the value '-ETIMEDOUT' is returned. | 
 | 1162 |  * | 
 | 1163 |  *	Returns 0 on success or negative error code on failure. | 
 | 1164 |  */ | 
 | 1165 | static inline int i2o_msg_post_wait(struct i2o_controller *c, | 
 | 1166 | 				    struct i2o_message *msg, | 
 | 1167 | 				    unsigned long timeout) | 
 | 1168 | { | 
 | 1169 | 	return i2o_msg_post_wait_mem(c, msg, timeout, NULL); | 
 | 1170 | }; | 
 | 1171 |  | 
 | 1172 | /** | 
 | 1173 |  *	i2o_msg_nop_mfa - Returns a fetched MFA back to the controller | 
 | 1174 |  *	@c: I2O controller from which the MFA was fetched | 
 | 1175 |  *	@mfa: MFA which should be returned | 
 | 1176 |  * | 
 | 1177 |  *	This function must be used for preserved messages, because i2o_msg_nop() | 
 | 1178 |  *	also returns the allocated memory back to the msg_pool mempool. | 
 | 1179 |  */ | 
 | 1180 | static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa) | 
 | 1181 | { | 
 | 1182 | 	struct i2o_message __iomem *msg; | 
 | 1183 | 	u32 nop[3] = { | 
 | 1184 | 		THREE_WORD_MSG_SIZE | SGL_OFFSET_0, | 
 | 1185 | 		I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, | 
 | 1186 | 		0x00000000 | 
 | 1187 | 	}; | 
 | 1188 |  | 
 | 1189 | 	msg = i2o_msg_in_to_virt(c, mfa); | 
 | 1190 | 	memcpy_toio(msg, nop, sizeof(nop)); | 
 | 1191 | 	writel(mfa, c->in_port); | 
 | 1192 | }; | 
 | 1193 |  | 
 | 1194 | /** | 
 | 1195 |  *	i2o_msg_nop - Returns a message which is not used | 
 | 1196 |  *	@c: I2O controller from which the message was created | 
 | 1197 |  *	@msg: message which should be returned | 
 | 1198 |  * | 
 | 1199 |  *	If you fetch a message via i2o_msg_get, and can't use it, you must | 
 | 1200 |  *	return the message with this function. Otherwise the MFA is lost as well | 
 | 1201 |  *	as the allocated memory from the mempool. | 
 | 1202 |  */ | 
 | 1203 | static inline void i2o_msg_nop(struct i2o_controller *c, | 
 | 1204 | 			       struct i2o_message *msg) | 
 | 1205 | { | 
 | 1206 | 	struct i2o_msg_mfa *mmsg; | 
 | 1207 | 	mmsg = container_of(msg, struct i2o_msg_mfa, msg); | 
 | 1208 |  | 
 | 1209 | 	i2o_msg_nop_mfa(c, mmsg->mfa); | 
 | 1210 | 	mempool_free(mmsg, c->in_msg.mempool); | 
 | 1211 | }; | 
 | 1212 |  | 
 | 1213 | /** | 
 | 1214 |  *	i2o_flush_reply - Flush reply from I2O controller | 
 | 1215 |  *	@c: I2O controller | 
 | 1216 |  *	@m: the message identifier | 
 | 1217 |  * | 
 | 1218 |  *	The I2O controller must be informed that the reply message is not needed | 
 | 1219 |  *	anymore. If you forget to flush the reply, the message frame can't be | 
 | 1220 |  *	used by the controller anymore and is therefore lost. | 
 | 1221 |  */ | 
 | 1222 | static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) | 
 | 1223 | { | 
 | 1224 | 	writel(m, c->out_port); | 
 | 1225 | }; | 
 | 1226 |  | 
 | 1227 | /* | 
 | 1228 |  *	Endian handling wrapped into the macro - keeps the core code | 
 | 1229 |  *	cleaner. | 
 | 1230 |  */ | 
 | 1231 |  | 
 | 1232 | #define i2o_raw_writel(val, mem)	__raw_writel(cpu_to_le32(val), mem) | 
 | 1233 |  | 
 | 1234 | extern int i2o_parm_field_get(struct i2o_device *, int, int, void *, int); | 
 | 1235 | extern int i2o_parm_table_get(struct i2o_device *, int, int, int, void *, int, | 
 | 1236 | 			      void *, int); | 
 | 1237 |  | 
 | 1238 | /* debugging and troubleshooting/diagnostic helpers. */ | 
 | 1239 | #define osm_printk(level, format, arg...)  \ | 
 | 1240 | 	printk(level "%s: " format, OSM_NAME , ## arg) | 
 | 1241 |  | 
 | 1242 | #ifdef DEBUG | 
 | 1243 | #define osm_debug(format, arg...) \ | 
 | 1244 | 	osm_printk(KERN_DEBUG, format , ## arg) | 
 | 1245 | #else | 
 | 1246 | #define osm_debug(format, arg...) \ | 
 | 1247 |         do { } while (0) | 
 | 1248 | #endif | 
 | 1249 |  | 
 | 1250 | #define osm_err(format, arg...)		\ | 
 | 1251 | 	osm_printk(KERN_ERR, format , ## arg) | 
 | 1252 | #define osm_info(format, arg...)		\ | 
 | 1253 | 	osm_printk(KERN_INFO, format , ## arg) | 
 | 1254 | #define osm_warn(format, arg...)		\ | 
 | 1255 | 	osm_printk(KERN_WARNING, format , ## arg) | 
 | 1256 |  | 
 | 1257 | /* debugging functions */ | 
 | 1258 | extern void i2o_report_status(const char *, const char *, struct i2o_message *); | 
 | 1259 | extern void i2o_dump_message(struct i2o_message *); | 
 | 1260 | extern void i2o_dump_hrt(struct i2o_controller *c); | 
 | 1261 | extern void i2o_debug_state(struct i2o_controller *c); | 
 | 1262 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | #endif				/* __KERNEL__ */ | 
 | 1264 | #endif				/* _I2O_H */ |