| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_FIREWIRE_H | 
|  | 2 | #define _LINUX_FIREWIRE_H | 
|  | 3 |  | 
|  | 4 | #include <linux/completion.h> | 
|  | 5 | #include <linux/device.h> | 
| Jay Fenlason | c76acec | 2009-05-18 13:08:06 -0400 | [diff] [blame] | 6 | #include <linux/dma-mapping.h> | 
| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 7 | #include <linux/kernel.h> | 
|  | 8 | #include <linux/kref.h> | 
|  | 9 | #include <linux/list.h> | 
|  | 10 | #include <linux/mutex.h> | 
|  | 11 | #include <linux/spinlock.h> | 
|  | 12 | #include <linux/sysfs.h> | 
|  | 13 | #include <linux/timer.h> | 
|  | 14 | #include <linux/types.h> | 
|  | 15 | #include <linux/workqueue.h> | 
|  | 16 |  | 
|  | 17 | #include <asm/atomic.h> | 
|  | 18 | #include <asm/byteorder.h> | 
|  | 19 |  | 
|  | 20 | #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) | 
|  | 21 | #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) | 
|  | 22 |  | 
|  | 23 | static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size) | 
|  | 24 | { | 
|  | 25 | u32    *dst = _dst; | 
|  | 26 | __be32 *src = _src; | 
|  | 27 | int i; | 
|  | 28 |  | 
|  | 29 | for (i = 0; i < size / 4; i++) | 
|  | 30 | dst[i] = be32_to_cpu(src[i]); | 
|  | 31 | } | 
|  | 32 |  | 
|  | 33 | static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size) | 
|  | 34 | { | 
|  | 35 | fw_memcpy_from_be32(_dst, _src, size); | 
|  | 36 | } | 
|  | 37 | #define CSR_REGISTER_BASE		0xfffff0000000ULL | 
|  | 38 |  | 
|  | 39 | /* register offsets are relative to CSR_REGISTER_BASE */ | 
|  | 40 | #define CSR_STATE_CLEAR			0x0 | 
|  | 41 | #define CSR_STATE_SET			0x4 | 
|  | 42 | #define CSR_NODE_IDS			0x8 | 
|  | 43 | #define CSR_RESET_START			0xc | 
|  | 44 | #define CSR_SPLIT_TIMEOUT_HI		0x18 | 
|  | 45 | #define CSR_SPLIT_TIMEOUT_LO		0x1c | 
|  | 46 | #define CSR_CYCLE_TIME			0x200 | 
|  | 47 | #define CSR_BUS_TIME			0x204 | 
|  | 48 | #define CSR_BUSY_TIMEOUT		0x210 | 
|  | 49 | #define CSR_BUS_MANAGER_ID		0x21c | 
|  | 50 | #define CSR_BANDWIDTH_AVAILABLE		0x220 | 
|  | 51 | #define CSR_CHANNELS_AVAILABLE		0x224 | 
|  | 52 | #define CSR_CHANNELS_AVAILABLE_HI	0x224 | 
|  | 53 | #define CSR_CHANNELS_AVAILABLE_LO	0x228 | 
|  | 54 | #define CSR_BROADCAST_CHANNEL		0x234 | 
|  | 55 | #define CSR_CONFIG_ROM			0x400 | 
|  | 56 | #define CSR_CONFIG_ROM_END		0x800 | 
|  | 57 | #define CSR_FCP_COMMAND			0xB00 | 
|  | 58 | #define CSR_FCP_RESPONSE		0xD00 | 
|  | 59 | #define CSR_FCP_END			0xF00 | 
|  | 60 | #define CSR_TOPOLOGY_MAP		0x1000 | 
|  | 61 | #define CSR_TOPOLOGY_MAP_END		0x1400 | 
|  | 62 | #define CSR_SPEED_MAP			0x2000 | 
|  | 63 | #define CSR_SPEED_MAP_END		0x3000 | 
|  | 64 |  | 
|  | 65 | #define CSR_OFFSET		0x40 | 
|  | 66 | #define CSR_LEAF		0x80 | 
|  | 67 | #define CSR_DIRECTORY		0xc0 | 
|  | 68 |  | 
|  | 69 | #define CSR_DESCRIPTOR		0x01 | 
|  | 70 | #define CSR_VENDOR		0x03 | 
|  | 71 | #define CSR_HARDWARE_VERSION	0x04 | 
|  | 72 | #define CSR_NODE_CAPABILITIES	0x0c | 
|  | 73 | #define CSR_UNIT		0x11 | 
|  | 74 | #define CSR_SPECIFIER_ID	0x12 | 
|  | 75 | #define CSR_VERSION		0x13 | 
|  | 76 | #define CSR_DEPENDENT_INFO	0x14 | 
|  | 77 | #define CSR_MODEL		0x17 | 
|  | 78 | #define CSR_INSTANCE		0x18 | 
|  | 79 | #define CSR_DIRECTORY_ID	0x20 | 
|  | 80 |  | 
|  | 81 | struct fw_csr_iterator { | 
|  | 82 | u32 *p; | 
|  | 83 | u32 *end; | 
|  | 84 | }; | 
|  | 85 |  | 
|  | 86 | void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p); | 
|  | 87 | int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); | 
|  | 88 |  | 
|  | 89 | extern struct bus_type fw_bus_type; | 
|  | 90 |  | 
|  | 91 | struct fw_card_driver; | 
|  | 92 | struct fw_node; | 
|  | 93 |  | 
|  | 94 | struct fw_card { | 
|  | 95 | const struct fw_card_driver *driver; | 
|  | 96 | struct device *device; | 
|  | 97 | struct kref kref; | 
|  | 98 | struct completion done; | 
|  | 99 |  | 
|  | 100 | int node_id; | 
|  | 101 | int generation; | 
| Stefan Richter | 1e626fd | 2009-06-14 13:23:58 +0200 | [diff] [blame] | 102 | int current_tlabel; | 
|  | 103 | u64 tlabel_mask; | 
| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 104 | struct list_head transaction_list; | 
|  | 105 | struct timer_list flush_timer; | 
|  | 106 | unsigned long reset_jiffies; | 
|  | 107 |  | 
|  | 108 | unsigned long long guid; | 
|  | 109 | unsigned max_receive; | 
|  | 110 | int link_speed; | 
|  | 111 | int config_rom_generation; | 
|  | 112 |  | 
|  | 113 | spinlock_t lock; /* Take this lock when handling the lists in | 
|  | 114 | * this struct. */ | 
|  | 115 | struct fw_node *local_node; | 
|  | 116 | struct fw_node *root_node; | 
|  | 117 | struct fw_node *irm_node; | 
|  | 118 | u8 color; /* must be u8 to match the definition in struct fw_node */ | 
|  | 119 | int gap_count; | 
|  | 120 | bool beta_repeaters_present; | 
|  | 121 |  | 
|  | 122 | int index; | 
|  | 123 |  | 
|  | 124 | struct list_head link; | 
|  | 125 |  | 
|  | 126 | /* Work struct for BM duties. */ | 
|  | 127 | struct delayed_work work; | 
|  | 128 | int bm_retries; | 
|  | 129 | int bm_generation; | 
| Stefan Richter | 6fdc037 | 2009-06-20 13:23:59 +0200 | [diff] [blame] | 130 | __be32 bm_transaction_data[2]; | 
| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 131 |  | 
|  | 132 | bool broadcast_channel_allocated; | 
|  | 133 | u32 broadcast_channel; | 
|  | 134 | u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; | 
|  | 135 | }; | 
|  | 136 |  | 
|  | 137 | static inline struct fw_card *fw_card_get(struct fw_card *card) | 
|  | 138 | { | 
|  | 139 | kref_get(&card->kref); | 
|  | 140 |  | 
|  | 141 | return card; | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | void fw_card_release(struct kref *kref); | 
|  | 145 |  | 
|  | 146 | static inline void fw_card_put(struct fw_card *card) | 
|  | 147 | { | 
|  | 148 | kref_put(&card->kref, fw_card_release); | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | struct fw_attribute_group { | 
|  | 152 | struct attribute_group *groups[2]; | 
|  | 153 | struct attribute_group group; | 
|  | 154 | struct attribute *attrs[12]; | 
|  | 155 | }; | 
|  | 156 |  | 
|  | 157 | enum fw_device_state { | 
|  | 158 | FW_DEVICE_INITIALIZING, | 
|  | 159 | FW_DEVICE_RUNNING, | 
|  | 160 | FW_DEVICE_GONE, | 
|  | 161 | FW_DEVICE_SHUTDOWN, | 
|  | 162 | }; | 
|  | 163 |  | 
|  | 164 | /* | 
|  | 165 | * Note, fw_device.generation always has to be read before fw_device.node_id. | 
|  | 166 | * Use SMP memory barriers to ensure this.  Otherwise requests will be sent | 
|  | 167 | * to an outdated node_id if the generation was updated in the meantime due | 
|  | 168 | * to a bus reset. | 
|  | 169 | * | 
|  | 170 | * Likewise, fw-core will take care to update .node_id before .generation so | 
|  | 171 | * that whenever fw_device.generation is current WRT the actual bus generation, | 
|  | 172 | * fw_device.node_id is guaranteed to be current too. | 
|  | 173 | * | 
|  | 174 | * The same applies to fw_device.card->node_id vs. fw_device.generation. | 
|  | 175 | * | 
|  | 176 | * fw_device.config_rom and fw_device.config_rom_length may be accessed during | 
|  | 177 | * the lifetime of any fw_unit belonging to the fw_device, before device_del() | 
|  | 178 | * was called on the last fw_unit.  Alternatively, they may be accessed while | 
|  | 179 | * holding fw_device_rwsem. | 
|  | 180 | */ | 
|  | 181 | struct fw_device { | 
|  | 182 | atomic_t state; | 
|  | 183 | struct fw_node *node; | 
|  | 184 | int node_id; | 
|  | 185 | int generation; | 
|  | 186 | unsigned max_speed; | 
|  | 187 | struct fw_card *card; | 
|  | 188 | struct device device; | 
|  | 189 |  | 
|  | 190 | struct mutex client_list_mutex; | 
|  | 191 | struct list_head client_list; | 
|  | 192 |  | 
|  | 193 | u32 *config_rom; | 
|  | 194 | size_t config_rom_length; | 
|  | 195 | int config_rom_retries; | 
|  | 196 | unsigned is_local:1; | 
| Stefan Richter | 837ec78 | 2009-06-09 23:56:55 +0200 | [diff] [blame] | 197 | unsigned max_rec:4; | 
| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 198 | unsigned cmc:1; | 
| Stefan Richter | 837ec78 | 2009-06-09 23:56:55 +0200 | [diff] [blame] | 199 | unsigned irmc:1; | 
| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 200 | unsigned bc_implemented:2; | 
|  | 201 |  | 
|  | 202 | struct delayed_work work; | 
|  | 203 | struct fw_attribute_group attribute_group; | 
|  | 204 | }; | 
|  | 205 |  | 
|  | 206 | static inline struct fw_device *fw_device(struct device *dev) | 
|  | 207 | { | 
|  | 208 | return container_of(dev, struct fw_device, device); | 
|  | 209 | } | 
|  | 210 |  | 
|  | 211 | static inline int fw_device_is_shutdown(struct fw_device *device) | 
|  | 212 | { | 
|  | 213 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | static inline struct fw_device *fw_device_get(struct fw_device *device) | 
|  | 217 | { | 
|  | 218 | get_device(&device->device); | 
|  | 219 |  | 
|  | 220 | return device; | 
|  | 221 | } | 
|  | 222 |  | 
|  | 223 | static inline void fw_device_put(struct fw_device *device) | 
|  | 224 | { | 
|  | 225 | put_device(&device->device); | 
|  | 226 | } | 
|  | 227 |  | 
|  | 228 | int fw_device_enable_phys_dma(struct fw_device *device); | 
|  | 229 |  | 
|  | 230 | /* | 
|  | 231 | * fw_unit.directory must not be accessed after device_del(&fw_unit.device). | 
|  | 232 | */ | 
|  | 233 | struct fw_unit { | 
|  | 234 | struct device device; | 
|  | 235 | u32 *directory; | 
|  | 236 | struct fw_attribute_group attribute_group; | 
|  | 237 | }; | 
|  | 238 |  | 
|  | 239 | static inline struct fw_unit *fw_unit(struct device *dev) | 
|  | 240 | { | 
|  | 241 | return container_of(dev, struct fw_unit, device); | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) | 
|  | 245 | { | 
|  | 246 | get_device(&unit->device); | 
|  | 247 |  | 
|  | 248 | return unit; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | static inline void fw_unit_put(struct fw_unit *unit) | 
|  | 252 | { | 
|  | 253 | put_device(&unit->device); | 
|  | 254 | } | 
|  | 255 |  | 
| Stefan Richter | e5110d0 | 2009-06-06 18:35:27 +0200 | [diff] [blame] | 256 | static inline struct fw_device *fw_parent_device(struct fw_unit *unit) | 
|  | 257 | { | 
|  | 258 | return fw_device(unit->device.parent); | 
|  | 259 | } | 
|  | 260 |  | 
| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 261 | struct ieee1394_device_id; | 
|  | 262 |  | 
|  | 263 | struct fw_driver { | 
|  | 264 | struct device_driver driver; | 
|  | 265 | /* Called when the parent device sits through a bus reset. */ | 
|  | 266 | void (*update)(struct fw_unit *unit); | 
|  | 267 | const struct ieee1394_device_id *id_table; | 
|  | 268 | }; | 
|  | 269 |  | 
|  | 270 | struct fw_packet; | 
|  | 271 | struct fw_request; | 
|  | 272 |  | 
|  | 273 | typedef void (*fw_packet_callback_t)(struct fw_packet *packet, | 
|  | 274 | struct fw_card *card, int status); | 
|  | 275 | typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | 
|  | 276 | void *data, size_t length, | 
|  | 277 | void *callback_data); | 
|  | 278 | /* | 
|  | 279 | * Important note:  The callback must guarantee that either fw_send_response() | 
|  | 280 | * or kfree() is called on the @request. | 
|  | 281 | */ | 
|  | 282 | typedef void (*fw_address_callback_t)(struct fw_card *card, | 
|  | 283 | struct fw_request *request, | 
|  | 284 | int tcode, int destination, int source, | 
|  | 285 | int generation, int speed, | 
|  | 286 | unsigned long long offset, | 
|  | 287 | void *data, size_t length, | 
|  | 288 | void *callback_data); | 
|  | 289 |  | 
|  | 290 | struct fw_packet { | 
|  | 291 | int speed; | 
|  | 292 | int generation; | 
|  | 293 | u32 header[4]; | 
|  | 294 | size_t header_length; | 
|  | 295 | void *payload; | 
|  | 296 | size_t payload_length; | 
|  | 297 | dma_addr_t payload_bus; | 
|  | 298 | u32 timestamp; | 
|  | 299 |  | 
|  | 300 | /* | 
|  | 301 | * This callback is called when the packet transmission has | 
|  | 302 | * completed; for successful transmission, the status code is | 
|  | 303 | * the ack received from the destination, otherwise it's a | 
|  | 304 | * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. | 
|  | 305 | * The callback can be called from tasklet context and thus | 
|  | 306 | * must never block. | 
|  | 307 | */ | 
|  | 308 | fw_packet_callback_t callback; | 
|  | 309 | int ack; | 
|  | 310 | struct list_head link; | 
|  | 311 | void *driver_data; | 
|  | 312 | }; | 
|  | 313 |  | 
|  | 314 | struct fw_transaction { | 
|  | 315 | int node_id; /* The generation is implied; it is always the current. */ | 
|  | 316 | int tlabel; | 
|  | 317 | int timestamp; | 
|  | 318 | struct list_head link; | 
|  | 319 |  | 
|  | 320 | struct fw_packet packet; | 
|  | 321 |  | 
|  | 322 | /* | 
|  | 323 | * The data passed to the callback is valid only during the | 
|  | 324 | * callback. | 
|  | 325 | */ | 
|  | 326 | fw_transaction_callback_t callback; | 
|  | 327 | void *callback_data; | 
|  | 328 | }; | 
|  | 329 |  | 
|  | 330 | struct fw_address_handler { | 
|  | 331 | u64 offset; | 
|  | 332 | size_t length; | 
|  | 333 | fw_address_callback_t address_callback; | 
|  | 334 | void *callback_data; | 
|  | 335 | struct list_head link; | 
|  | 336 | }; | 
|  | 337 |  | 
|  | 338 | struct fw_address_region { | 
|  | 339 | u64 start; | 
|  | 340 | u64 end; | 
|  | 341 | }; | 
|  | 342 |  | 
|  | 343 | extern const struct fw_address_region fw_high_memory_region; | 
|  | 344 |  | 
|  | 345 | int fw_core_add_address_handler(struct fw_address_handler *handler, | 
|  | 346 | const struct fw_address_region *region); | 
|  | 347 | void fw_core_remove_address_handler(struct fw_address_handler *handler); | 
|  | 348 | void fw_send_response(struct fw_card *card, | 
|  | 349 | struct fw_request *request, int rcode); | 
|  | 350 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, | 
|  | 351 | int tcode, int destination_id, int generation, int speed, | 
|  | 352 | unsigned long long offset, void *payload, size_t length, | 
|  | 353 | fw_transaction_callback_t callback, void *callback_data); | 
|  | 354 | int fw_cancel_transaction(struct fw_card *card, | 
|  | 355 | struct fw_transaction *transaction); | 
|  | 356 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | 
|  | 357 | int generation, int speed, unsigned long long offset, | 
|  | 358 | void *payload, size_t length); | 
|  | 359 |  | 
| Jay Fenlason | c76acec | 2009-05-18 13:08:06 -0400 | [diff] [blame] | 360 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) | 
|  | 361 | { | 
|  | 362 | return tag << 14 | channel << 8 | sy; | 
|  | 363 | } | 
|  | 364 |  | 
|  | 365 | struct fw_descriptor { | 
|  | 366 | struct list_head link; | 
|  | 367 | size_t length; | 
|  | 368 | u32 immediate; | 
|  | 369 | u32 key; | 
|  | 370 | const u32 *data; | 
|  | 371 | }; | 
|  | 372 |  | 
|  | 373 | int fw_core_add_descriptor(struct fw_descriptor *desc); | 
|  | 374 | void fw_core_remove_descriptor(struct fw_descriptor *desc); | 
|  | 375 |  | 
|  | 376 | /* | 
|  | 377 | * The iso packet format allows for an immediate header/payload part | 
|  | 378 | * stored in 'header' immediately after the packet info plus an | 
|  | 379 | * indirect payload part that is pointer to by the 'payload' field. | 
|  | 380 | * Applications can use one or the other or both to implement simple | 
|  | 381 | * low-bandwidth streaming (e.g. audio) or more advanced | 
|  | 382 | * scatter-gather streaming (e.g. assembling video frame automatically). | 
|  | 383 | */ | 
|  | 384 | struct fw_iso_packet { | 
|  | 385 | u16 payload_length;	/* Length of indirect payload. */ | 
|  | 386 | u32 interrupt:1;	/* Generate interrupt on this packet */ | 
|  | 387 | u32 skip:1;		/* Set to not send packet at all. */ | 
|  | 388 | u32 tag:2; | 
|  | 389 | u32 sy:4; | 
|  | 390 | u32 header_length:8;	/* Length of immediate header. */ | 
|  | 391 | u32 header[0]; | 
|  | 392 | }; | 
|  | 393 |  | 
|  | 394 | #define FW_ISO_CONTEXT_TRANSMIT	0 | 
|  | 395 | #define FW_ISO_CONTEXT_RECEIVE	1 | 
|  | 396 |  | 
|  | 397 | #define FW_ISO_CONTEXT_MATCH_TAG0	 1 | 
|  | 398 | #define FW_ISO_CONTEXT_MATCH_TAG1	 2 | 
|  | 399 | #define FW_ISO_CONTEXT_MATCH_TAG2	 4 | 
|  | 400 | #define FW_ISO_CONTEXT_MATCH_TAG3	 8 | 
|  | 401 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS	15 | 
|  | 402 |  | 
|  | 403 | /* | 
|  | 404 | * An iso buffer is just a set of pages mapped for DMA in the | 
|  | 405 | * specified direction.  Since the pages are to be used for DMA, they | 
|  | 406 | * are not mapped into the kernel virtual address space.  We store the | 
|  | 407 | * DMA address in the page private. The helper function | 
|  | 408 | * fw_iso_buffer_map() will map the pages into a given vma. | 
|  | 409 | */ | 
|  | 410 | struct fw_iso_buffer { | 
|  | 411 | enum dma_data_direction direction; | 
|  | 412 | struct page **pages; | 
|  | 413 | int page_count; | 
|  | 414 | }; | 
|  | 415 |  | 
|  | 416 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 
|  | 417 | int page_count, enum dma_data_direction direction); | 
|  | 418 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | 
|  | 419 |  | 
|  | 420 | struct fw_iso_context; | 
|  | 421 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | 
|  | 422 | u32 cycle, size_t header_length, | 
|  | 423 | void *header, void *data); | 
|  | 424 | struct fw_iso_context { | 
|  | 425 | struct fw_card *card; | 
|  | 426 | int type; | 
|  | 427 | int channel; | 
|  | 428 | int speed; | 
|  | 429 | size_t header_size; | 
|  | 430 | fw_iso_callback_t callback; | 
|  | 431 | void *callback_data; | 
|  | 432 | }; | 
|  | 433 |  | 
|  | 434 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 
|  | 435 | int type, int channel, int speed, size_t header_size, | 
|  | 436 | fw_iso_callback_t callback, void *callback_data); | 
|  | 437 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 
|  | 438 | struct fw_iso_packet *packet, | 
|  | 439 | struct fw_iso_buffer *buffer, | 
|  | 440 | unsigned long payload); | 
|  | 441 | int fw_iso_context_start(struct fw_iso_context *ctx, | 
|  | 442 | int cycle, int sync, int tags); | 
|  | 443 | int fw_iso_context_stop(struct fw_iso_context *ctx); | 
|  | 444 | void fw_iso_context_destroy(struct fw_iso_context *ctx); | 
|  | 445 |  | 
| Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 446 | #endif /* _LINUX_FIREWIRE_H */ |