| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2003 Sistina Software | 
|  | 3 | * | 
|  | 4 | * This file is released under the GPL. | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | #ifndef _DM_IO_H | 
|  | 8 | #define _DM_IO_H | 
|  | 9 |  | 
|  | 10 | #include "dm.h" | 
|  | 11 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | struct io_region { | 
|  | 13 | struct block_device *bdev; | 
|  | 14 | sector_t sector; | 
|  | 15 | sector_t count; | 
|  | 16 | }; | 
|  | 17 |  | 
|  | 18 | struct page_list { | 
|  | 19 | struct page_list *next; | 
|  | 20 | struct page *page; | 
|  | 21 | }; | 
|  | 22 |  | 
|  | 23 |  | 
|  | 24 | /* | 
|  | 25 | * 'error' is a bitset, with each bit indicating whether an error | 
|  | 26 | * occurred doing io to the corresponding region. | 
|  | 27 | */ | 
|  | 28 | typedef void (*io_notify_fn)(unsigned long error, void *context); | 
|  | 29 |  | 
|  | 30 |  | 
|  | 31 | /* | 
|  | 32 | * Before anyone uses the IO interface they should call | 
|  | 33 | * dm_io_get(), specifying roughly how many pages they are | 
|  | 34 | * expecting to perform io on concurrently. | 
|  | 35 | * | 
|  | 36 | * This function may block. | 
|  | 37 | */ | 
|  | 38 | int dm_io_get(unsigned int num_pages); | 
|  | 39 | void dm_io_put(unsigned int num_pages); | 
|  | 40 |  | 
|  | 41 | /* | 
|  | 42 | * Synchronous IO. | 
|  | 43 | * | 
|  | 44 | * Please ensure that the rw flag in the next two functions is | 
|  | 45 | * either READ or WRITE, ie. we don't take READA.  Any | 
|  | 46 | * regions with a zero count field will be ignored. | 
|  | 47 | */ | 
|  | 48 | int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, | 
|  | 49 | struct page_list *pl, unsigned int offset, | 
|  | 50 | unsigned long *error_bits); | 
|  | 51 |  | 
|  | 52 | int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, | 
|  | 53 | struct bio_vec *bvec, unsigned long *error_bits); | 
|  | 54 |  | 
|  | 55 | int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, | 
|  | 56 | void *data, unsigned long *error_bits); | 
|  | 57 |  | 
|  | 58 | /* | 
|  | 59 | * Aynchronous IO. | 
|  | 60 | * | 
|  | 61 | * The 'where' array may be safely allocated on the stack since | 
|  | 62 | * the function takes a copy. | 
|  | 63 | */ | 
|  | 64 | int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, | 
|  | 65 | struct page_list *pl, unsigned int offset, | 
|  | 66 | io_notify_fn fn, void *context); | 
|  | 67 |  | 
|  | 68 | int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, | 
|  | 69 | struct bio_vec *bvec, io_notify_fn fn, void *context); | 
|  | 70 |  | 
|  | 71 | int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, | 
|  | 72 | void *data, io_notify_fn fn, void *context); | 
|  | 73 |  | 
|  | 74 | #endif |