| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |    raid0.c : Multiple Devices driver for Linux | 
 | 3 |              Copyright (C) 1994-96 Marc ZYNGIER | 
 | 4 | 	     <zyngier@ufr-info-p7.ibp.fr> or | 
 | 5 | 	     <maz@gloups.fdn.fr> | 
 | 6 |              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat | 
 | 7 |  | 
 | 8 |  | 
 | 9 |    RAID-0 management functions. | 
 | 10 |  | 
 | 11 |    This program is free software; you can redistribute it and/or modify | 
 | 12 |    it under the terms of the GNU General Public License as published by | 
 | 13 |    the Free Software Foundation; either version 2, or (at your option) | 
 | 14 |    any later version. | 
 | 15 |     | 
 | 16 |    You should have received a copy of the GNU General Public License | 
 | 17 |    (for example /usr/src/linux/COPYING); if not, write to the Free | 
 | 18 |    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.   | 
 | 19 | */ | 
 | 20 |  | 
 | 21 | #include <linux/module.h> | 
 | 22 | #include <linux/raid/raid0.h> | 
 | 23 |  | 
 | 24 | #define MAJOR_NR MD_MAJOR | 
 | 25 | #define MD_DRIVER | 
 | 26 | #define MD_PERSONALITY | 
 | 27 |  | 
 | 28 | static void raid0_unplug(request_queue_t *q) | 
 | 29 | { | 
 | 30 | 	mddev_t *mddev = q->queuedata; | 
 | 31 | 	raid0_conf_t *conf = mddev_to_conf(mddev); | 
 | 32 | 	mdk_rdev_t **devlist = conf->strip_zone[0].dev; | 
 | 33 | 	int i; | 
 | 34 |  | 
 | 35 | 	for (i=0; i<mddev->raid_disks; i++) { | 
 | 36 | 		request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev); | 
 | 37 |  | 
 | 38 | 		if (r_queue->unplug_fn) | 
 | 39 | 			r_queue->unplug_fn(r_queue); | 
 | 40 | 	} | 
 | 41 | } | 
 | 42 |  | 
 | 43 | static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk, | 
 | 44 | 			     sector_t *error_sector) | 
 | 45 | { | 
 | 46 | 	mddev_t *mddev = q->queuedata; | 
 | 47 | 	raid0_conf_t *conf = mddev_to_conf(mddev); | 
 | 48 | 	mdk_rdev_t **devlist = conf->strip_zone[0].dev; | 
 | 49 | 	int i, ret = 0; | 
 | 50 |  | 
 | 51 | 	for (i=0; i<mddev->raid_disks && ret == 0; i++) { | 
 | 52 | 		struct block_device *bdev = devlist[i]->bdev; | 
 | 53 | 		request_queue_t *r_queue = bdev_get_queue(bdev); | 
 | 54 |  | 
 | 55 | 		if (!r_queue->issue_flush_fn) | 
 | 56 | 			ret = -EOPNOTSUPP; | 
 | 57 | 		else | 
 | 58 | 			ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector); | 
 | 59 | 	} | 
 | 60 | 	return ret; | 
 | 61 | } | 
 | 62 |  | 
 | 63 |  | 
 | 64 | static int create_strip_zones (mddev_t *mddev) | 
 | 65 | { | 
 | 66 | 	int i, c, j; | 
 | 67 | 	sector_t current_offset, curr_zone_offset; | 
 | 68 | 	sector_t min_spacing; | 
 | 69 | 	raid0_conf_t *conf = mddev_to_conf(mddev); | 
 | 70 | 	mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; | 
 | 71 | 	struct list_head *tmp1, *tmp2; | 
 | 72 | 	struct strip_zone *zone; | 
 | 73 | 	int cnt; | 
 | 74 | 	char b[BDEVNAME_SIZE]; | 
 | 75 |   | 
 | 76 | 	/* | 
 | 77 | 	 * The number of 'same size groups' | 
 | 78 | 	 */ | 
 | 79 | 	conf->nr_strip_zones = 0; | 
 | 80 |   | 
 | 81 | 	ITERATE_RDEV(mddev,rdev1,tmp1) { | 
 | 82 | 		printk("raid0: looking at %s\n", | 
 | 83 | 			bdevname(rdev1->bdev,b)); | 
 | 84 | 		c = 0; | 
 | 85 | 		ITERATE_RDEV(mddev,rdev2,tmp2) { | 
 | 86 | 			printk("raid0:   comparing %s(%llu)", | 
 | 87 | 			       bdevname(rdev1->bdev,b), | 
 | 88 | 			       (unsigned long long)rdev1->size); | 
 | 89 | 			printk(" with %s(%llu)\n", | 
 | 90 | 			       bdevname(rdev2->bdev,b), | 
 | 91 | 			       (unsigned long long)rdev2->size); | 
 | 92 | 			if (rdev2 == rdev1) { | 
 | 93 | 				printk("raid0:   END\n"); | 
 | 94 | 				break; | 
 | 95 | 			} | 
 | 96 | 			if (rdev2->size == rdev1->size) | 
 | 97 | 			{ | 
 | 98 | 				/* | 
 | 99 | 				 * Not unique, don't count it as a new | 
 | 100 | 				 * group | 
 | 101 | 				 */ | 
 | 102 | 				printk("raid0:   EQUAL\n"); | 
 | 103 | 				c = 1; | 
 | 104 | 				break; | 
 | 105 | 			} | 
 | 106 | 			printk("raid0:   NOT EQUAL\n"); | 
 | 107 | 		} | 
 | 108 | 		if (!c) { | 
 | 109 | 			printk("raid0:   ==> UNIQUE\n"); | 
 | 110 | 			conf->nr_strip_zones++; | 
 | 111 | 			printk("raid0: %d zones\n", conf->nr_strip_zones); | 
 | 112 | 		} | 
 | 113 | 	} | 
 | 114 | 	printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); | 
 | 115 |  | 
 | 116 | 	conf->strip_zone = kmalloc(sizeof(struct strip_zone)* | 
 | 117 | 				conf->nr_strip_zones, GFP_KERNEL); | 
 | 118 | 	if (!conf->strip_zone) | 
 | 119 | 		return 1; | 
 | 120 | 	conf->devlist = kmalloc(sizeof(mdk_rdev_t*)* | 
 | 121 | 				conf->nr_strip_zones*mddev->raid_disks, | 
 | 122 | 				GFP_KERNEL); | 
 | 123 | 	if (!conf->devlist) | 
 | 124 | 		return 1; | 
 | 125 |  | 
 | 126 | 	memset(conf->strip_zone, 0,sizeof(struct strip_zone)* | 
 | 127 | 				   conf->nr_strip_zones); | 
 | 128 | 	memset(conf->devlist, 0, | 
 | 129 | 	       sizeof(mdk_rdev_t*) * conf->nr_strip_zones * mddev->raid_disks); | 
 | 130 |  | 
 | 131 | 	/* The first zone must contain all devices, so here we check that | 
 | 132 | 	 * there is a proper alignment of slots to devices and find them all | 
 | 133 | 	 */ | 
 | 134 | 	zone = &conf->strip_zone[0]; | 
 | 135 | 	cnt = 0; | 
 | 136 | 	smallest = NULL; | 
 | 137 | 	zone->dev = conf->devlist; | 
 | 138 | 	ITERATE_RDEV(mddev, rdev1, tmp1) { | 
 | 139 | 		int j = rdev1->raid_disk; | 
 | 140 |  | 
 | 141 | 		if (j < 0 || j >= mddev->raid_disks) { | 
 | 142 | 			printk("raid0: bad disk number %d - aborting!\n", j); | 
 | 143 | 			goto abort; | 
 | 144 | 		} | 
 | 145 | 		if (zone->dev[j]) { | 
 | 146 | 			printk("raid0: multiple devices for %d - aborting!\n", | 
 | 147 | 				j); | 
 | 148 | 			goto abort; | 
 | 149 | 		} | 
 | 150 | 		zone->dev[j] = rdev1; | 
 | 151 |  | 
 | 152 | 		blk_queue_stack_limits(mddev->queue, | 
 | 153 | 				       rdev1->bdev->bd_disk->queue); | 
 | 154 | 		/* as we don't honour merge_bvec_fn, we must never risk | 
 | 155 | 		 * violating it, so limit ->max_sector to one PAGE, as | 
 | 156 | 		 * a one page request is never in violation. | 
 | 157 | 		 */ | 
 | 158 |  | 
 | 159 | 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && | 
 | 160 | 		    mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 
 | 161 | 			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 
 | 162 |  | 
 | 163 | 		if (!smallest || (rdev1->size <smallest->size)) | 
 | 164 | 			smallest = rdev1; | 
 | 165 | 		cnt++; | 
 | 166 | 	} | 
 | 167 | 	if (cnt != mddev->raid_disks) { | 
 | 168 | 		printk("raid0: too few disks (%d of %d) - aborting!\n", | 
 | 169 | 			cnt, mddev->raid_disks); | 
 | 170 | 		goto abort; | 
 | 171 | 	} | 
 | 172 | 	zone->nb_dev = cnt; | 
 | 173 | 	zone->size = smallest->size * cnt; | 
 | 174 | 	zone->zone_offset = 0; | 
 | 175 |  | 
 | 176 | 	current_offset = smallest->size; | 
 | 177 | 	curr_zone_offset = zone->size; | 
 | 178 |  | 
 | 179 | 	/* now do the other zones */ | 
 | 180 | 	for (i = 1; i < conf->nr_strip_zones; i++) | 
 | 181 | 	{ | 
 | 182 | 		zone = conf->strip_zone + i; | 
 | 183 | 		zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; | 
 | 184 |  | 
 | 185 | 		printk("raid0: zone %d\n", i); | 
 | 186 | 		zone->dev_offset = current_offset; | 
 | 187 | 		smallest = NULL; | 
 | 188 | 		c = 0; | 
 | 189 |  | 
 | 190 | 		for (j=0; j<cnt; j++) { | 
 | 191 | 			char b[BDEVNAME_SIZE]; | 
 | 192 | 			rdev = conf->strip_zone[0].dev[j]; | 
 | 193 | 			printk("raid0: checking %s ...", bdevname(rdev->bdev,b)); | 
 | 194 | 			if (rdev->size > current_offset) | 
 | 195 | 			{ | 
 | 196 | 				printk(" contained as device %d\n", c); | 
 | 197 | 				zone->dev[c] = rdev; | 
 | 198 | 				c++; | 
 | 199 | 				if (!smallest || (rdev->size <smallest->size)) { | 
 | 200 | 					smallest = rdev; | 
 | 201 | 					printk("  (%llu) is smallest!.\n",  | 
 | 202 | 						(unsigned long long)rdev->size); | 
 | 203 | 				} | 
 | 204 | 			} else | 
 | 205 | 				printk(" nope.\n"); | 
 | 206 | 		} | 
 | 207 |  | 
 | 208 | 		zone->nb_dev = c; | 
 | 209 | 		zone->size = (smallest->size - current_offset) * c; | 
 | 210 | 		printk("raid0: zone->nb_dev: %d, size: %llu\n", | 
 | 211 | 			zone->nb_dev, (unsigned long long)zone->size); | 
 | 212 |  | 
 | 213 | 		zone->zone_offset = curr_zone_offset; | 
 | 214 | 		curr_zone_offset += zone->size; | 
 | 215 |  | 
 | 216 | 		current_offset = smallest->size; | 
 | 217 | 		printk("raid0: current zone offset: %llu\n", | 
 | 218 | 			(unsigned long long)current_offset); | 
 | 219 | 	} | 
 | 220 |  | 
 | 221 | 	/* Now find appropriate hash spacing. | 
 | 222 | 	 * We want a number which causes most hash entries to cover | 
 | 223 | 	 * at most two strips, but the hash table must be at most | 
 | 224 | 	 * 1 PAGE.  We choose the smallest strip, or contiguous collection | 
 | 225 | 	 * of strips, that has big enough size.  We never consider the last | 
 | 226 | 	 * strip though as it's size has no bearing on the efficacy of the hash | 
 | 227 | 	 * table. | 
 | 228 | 	 */ | 
 | 229 | 	conf->hash_spacing = curr_zone_offset; | 
 | 230 | 	min_spacing = curr_zone_offset; | 
 | 231 | 	sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); | 
 | 232 | 	for (i=0; i < conf->nr_strip_zones-1; i++) { | 
 | 233 | 		sector_t sz = 0; | 
 | 234 | 		for (j=i; j<conf->nr_strip_zones-1 && | 
 | 235 | 			     sz < min_spacing ; j++) | 
 | 236 | 			sz += conf->strip_zone[j].size; | 
 | 237 | 		if (sz >= min_spacing && sz < conf->hash_spacing) | 
 | 238 | 			conf->hash_spacing = sz; | 
 | 239 | 	} | 
 | 240 |  | 
 | 241 | 	mddev->queue->unplug_fn = raid0_unplug; | 
 | 242 |  | 
 | 243 | 	mddev->queue->issue_flush_fn = raid0_issue_flush; | 
 | 244 |  | 
 | 245 | 	printk("raid0: done.\n"); | 
 | 246 | 	return 0; | 
 | 247 |  abort: | 
 | 248 | 	return 1; | 
 | 249 | } | 
 | 250 |  | 
 | 251 | /** | 
 | 252 |  *	raid0_mergeable_bvec -- tell bio layer if a two requests can be merged | 
 | 253 |  *	@q: request queue | 
 | 254 |  *	@bio: the buffer head that's been built up so far | 
 | 255 |  *	@biovec: the request that could be merged to it. | 
 | 256 |  * | 
 | 257 |  *	Return amount of bytes we can accept at this offset | 
 | 258 |  */ | 
 | 259 | static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) | 
 | 260 | { | 
 | 261 | 	mddev_t *mddev = q->queuedata; | 
 | 262 | 	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); | 
 | 263 | 	int max; | 
 | 264 | 	unsigned int chunk_sectors = mddev->chunk_size >> 9; | 
 | 265 | 	unsigned int bio_sectors = bio->bi_size >> 9; | 
 | 266 |  | 
 | 267 | 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; | 
 | 268 | 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */ | 
 | 269 | 	if (max <= biovec->bv_len && bio_sectors == 0) | 
 | 270 | 		return biovec->bv_len; | 
 | 271 | 	else  | 
 | 272 | 		return max; | 
 | 273 | } | 
 | 274 |  | 
 | 275 | static int raid0_run (mddev_t *mddev) | 
 | 276 | { | 
 | 277 | 	unsigned  cur=0, i=0, nb_zone; | 
 | 278 | 	s64 size; | 
 | 279 | 	raid0_conf_t *conf; | 
 | 280 | 	mdk_rdev_t *rdev; | 
 | 281 | 	struct list_head *tmp; | 
 | 282 |  | 
 | 283 | 	printk("%s: setting max_sectors to %d, segment boundary to %d\n", | 
 | 284 | 	       mdname(mddev), | 
 | 285 | 	       mddev->chunk_size >> 9, | 
 | 286 | 	       (mddev->chunk_size>>1)-1); | 
 | 287 | 	blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); | 
 | 288 | 	blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); | 
 | 289 |  | 
 | 290 | 	conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); | 
 | 291 | 	if (!conf) | 
 | 292 | 		goto out; | 
 | 293 | 	mddev->private = (void *)conf; | 
 | 294 |   | 
 | 295 | 	conf->strip_zone = NULL; | 
 | 296 | 	conf->devlist = NULL; | 
 | 297 | 	if (create_strip_zones (mddev))  | 
 | 298 | 		goto out_free_conf; | 
 | 299 |  | 
 | 300 | 	/* calculate array device size */ | 
 | 301 | 	mddev->array_size = 0; | 
 | 302 | 	ITERATE_RDEV(mddev,rdev,tmp) | 
 | 303 | 		mddev->array_size += rdev->size; | 
 | 304 |  | 
 | 305 | 	printk("raid0 : md_size is %llu blocks.\n",  | 
 | 306 | 		(unsigned long long)mddev->array_size); | 
 | 307 | 	printk("raid0 : conf->hash_spacing is %llu blocks.\n", | 
 | 308 | 		(unsigned long long)conf->hash_spacing); | 
 | 309 | 	{ | 
 | 310 | #if __GNUC__ < 3 | 
 | 311 | 		volatile | 
 | 312 | #endif | 
 | 313 | 		sector_t s = mddev->array_size; | 
 | 314 | 		sector_t space = conf->hash_spacing; | 
 | 315 | 		int round; | 
 | 316 | 		conf->preshift = 0; | 
 | 317 | 		if (sizeof(sector_t) > sizeof(unsigned long)) { | 
 | 318 | 			/*shift down space and s so that sector_div will work */ | 
 | 319 | 			while (space > (sector_t) (~(unsigned long)0)) { | 
 | 320 | 				s >>= 1; | 
 | 321 | 				space >>= 1; | 
 | 322 | 				s += 1; /* force round-up */ | 
 | 323 | 				conf->preshift++; | 
 | 324 | 			} | 
 | 325 | 		} | 
 | 326 | 		round = sector_div(s, (unsigned long)space) ? 1 : 0; | 
 | 327 | 		nb_zone = s + round; | 
 | 328 | 	} | 
 | 329 | 	printk("raid0 : nb_zone is %d.\n", nb_zone); | 
 | 330 |  | 
 | 331 | 	printk("raid0 : Allocating %Zd bytes for hash.\n", | 
 | 332 | 				nb_zone*sizeof(struct strip_zone*)); | 
 | 333 | 	conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); | 
 | 334 | 	if (!conf->hash_table) | 
 | 335 | 		goto out_free_conf; | 
 | 336 | 	size = conf->strip_zone[cur].size; | 
 | 337 |  | 
 | 338 | 	for (i=0; i< nb_zone; i++) { | 
 | 339 | 		conf->hash_table[i] = conf->strip_zone + cur; | 
 | 340 | 		while (size <= conf->hash_spacing) { | 
 | 341 | 			cur++; | 
 | 342 | 			size += conf->strip_zone[cur].size; | 
 | 343 | 		} | 
 | 344 | 		size -= conf->hash_spacing; | 
 | 345 | 	} | 
 | 346 | 	if (conf->preshift) { | 
 | 347 | 		conf->hash_spacing >>= conf->preshift; | 
 | 348 | 		/* round hash_spacing up so when we divide by it, we | 
 | 349 | 		 * err on the side of too-low, which is safest | 
 | 350 | 		 */ | 
 | 351 | 		conf->hash_spacing++; | 
 | 352 | 	} | 
 | 353 |  | 
 | 354 | 	/* calculate the max read-ahead size. | 
 | 355 | 	 * For read-ahead of large files to be effective, we need to | 
 | 356 | 	 * readahead at least twice a whole stripe. i.e. number of devices | 
 | 357 | 	 * multiplied by chunk size times 2. | 
 | 358 | 	 * If an individual device has an ra_pages greater than the | 
 | 359 | 	 * chunk size, then we will not drive that device as hard as it | 
 | 360 | 	 * wants.  We consider this a configuration error: a larger | 
 | 361 | 	 * chunksize should be used in that case. | 
 | 362 | 	 */ | 
 | 363 | 	{ | 
 | 364 | 		int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; | 
 | 365 | 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) | 
 | 366 | 			mddev->queue->backing_dev_info.ra_pages = 2* stripe; | 
 | 367 | 	} | 
 | 368 |  | 
 | 369 |  | 
 | 370 | 	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); | 
 | 371 | 	return 0; | 
 | 372 |  | 
 | 373 | out_free_conf: | 
 | 374 | 	if (conf->strip_zone) | 
 | 375 | 		kfree(conf->strip_zone); | 
 | 376 | 	if (conf->devlist) | 
 | 377 | 		kfree (conf->devlist); | 
 | 378 | 	kfree(conf); | 
 | 379 | 	mddev->private = NULL; | 
 | 380 | out: | 
 | 381 | 	return 1; | 
 | 382 | } | 
 | 383 |  | 
 | 384 | static int raid0_stop (mddev_t *mddev) | 
 | 385 | { | 
 | 386 | 	raid0_conf_t *conf = mddev_to_conf(mddev); | 
 | 387 |  | 
 | 388 | 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 
 | 389 | 	kfree (conf->hash_table); | 
 | 390 | 	conf->hash_table = NULL; | 
 | 391 | 	kfree (conf->strip_zone); | 
 | 392 | 	conf->strip_zone = NULL; | 
 | 393 | 	kfree (conf); | 
 | 394 | 	mddev->private = NULL; | 
 | 395 |  | 
 | 396 | 	return 0; | 
 | 397 | } | 
 | 398 |  | 
 | 399 | static int raid0_make_request (request_queue_t *q, struct bio *bio) | 
 | 400 | { | 
 | 401 | 	mddev_t *mddev = q->queuedata; | 
 | 402 | 	unsigned int sect_in_chunk, chunksize_bits,  chunk_size, chunk_sects; | 
 | 403 | 	raid0_conf_t *conf = mddev_to_conf(mddev); | 
 | 404 | 	struct strip_zone *zone; | 
 | 405 | 	mdk_rdev_t *tmp_dev; | 
 | 406 | 	unsigned long chunk; | 
 | 407 | 	sector_t block, rsect; | 
 | 408 |  | 
 | 409 | 	if (bio_data_dir(bio)==WRITE) { | 
 | 410 | 		disk_stat_inc(mddev->gendisk, writes); | 
 | 411 | 		disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio)); | 
 | 412 | 	} else { | 
 | 413 | 		disk_stat_inc(mddev->gendisk, reads); | 
 | 414 | 		disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio)); | 
 | 415 | 	} | 
 | 416 |  | 
 | 417 | 	chunk_size = mddev->chunk_size >> 10; | 
 | 418 | 	chunk_sects = mddev->chunk_size >> 9; | 
 | 419 | 	chunksize_bits = ffz(~chunk_size); | 
 | 420 | 	block = bio->bi_sector >> 1; | 
 | 421 | 	 | 
 | 422 |  | 
 | 423 | 	if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { | 
 | 424 | 		struct bio_pair *bp; | 
 | 425 | 		/* Sanity check -- queue functions should prevent this happening */ | 
 | 426 | 		if (bio->bi_vcnt != 1 || | 
 | 427 | 		    bio->bi_idx != 0) | 
 | 428 | 			goto bad_map; | 
 | 429 | 		/* This is a one page bio that upper layers | 
 | 430 | 		 * refuse to split for us, so we need to split it. | 
 | 431 | 		 */ | 
 | 432 | 		bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); | 
 | 433 | 		if (raid0_make_request(q, &bp->bio1)) | 
 | 434 | 			generic_make_request(&bp->bio1); | 
 | 435 | 		if (raid0_make_request(q, &bp->bio2)) | 
 | 436 | 			generic_make_request(&bp->bio2); | 
 | 437 |  | 
 | 438 | 		bio_pair_release(bp); | 
 | 439 | 		return 0; | 
 | 440 | 	} | 
 | 441 |   | 
 | 442 |  | 
 | 443 | 	{ | 
 | 444 | #if __GNUC__ < 3 | 
 | 445 | 		volatile | 
 | 446 | #endif | 
 | 447 | 		sector_t x = block >> conf->preshift; | 
 | 448 | 		sector_div(x, (unsigned long)conf->hash_spacing); | 
 | 449 | 		zone = conf->hash_table[x]; | 
 | 450 | 	} | 
 | 451 |   | 
 | 452 | 	while (block >= (zone->zone_offset + zone->size))  | 
 | 453 | 		zone++; | 
 | 454 |      | 
 | 455 | 	sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1); | 
 | 456 |  | 
 | 457 |  | 
 | 458 | 	{ | 
 | 459 | 		sector_t x =  (block - zone->zone_offset) >> chunksize_bits; | 
 | 460 |  | 
 | 461 | 		sector_div(x, zone->nb_dev); | 
 | 462 | 		chunk = x; | 
 | 463 | 		BUG_ON(x != (sector_t)chunk); | 
 | 464 |  | 
 | 465 | 		x = block >> chunksize_bits; | 
 | 466 | 		tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; | 
 | 467 | 	} | 
 | 468 | 	rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1) | 
 | 469 | 		+ sect_in_chunk; | 
 | 470 |   | 
 | 471 | 	bio->bi_bdev = tmp_dev->bdev; | 
 | 472 | 	bio->bi_sector = rsect + tmp_dev->data_offset; | 
 | 473 |  | 
 | 474 | 	/* | 
 | 475 | 	 * Let the main block layer submit the IO and resolve recursion: | 
 | 476 | 	 */ | 
 | 477 | 	return 1; | 
 | 478 |  | 
 | 479 | bad_map: | 
 | 480 | 	printk("raid0_make_request bug: can't convert block across chunks" | 
 | 481 | 		" or bigger than %dk %llu %d\n", chunk_size,  | 
 | 482 | 		(unsigned long long)bio->bi_sector, bio->bi_size >> 10); | 
 | 483 |  | 
 | 484 | 	bio_io_error(bio, bio->bi_size); | 
 | 485 | 	return 0; | 
 | 486 | } | 
 | 487 | 			    | 
 | 488 | static void raid0_status (struct seq_file *seq, mddev_t *mddev) | 
 | 489 | { | 
 | 490 | #undef MD_DEBUG | 
 | 491 | #ifdef MD_DEBUG | 
 | 492 | 	int j, k, h; | 
 | 493 | 	char b[BDEVNAME_SIZE]; | 
 | 494 | 	raid0_conf_t *conf = mddev_to_conf(mddev); | 
 | 495 |    | 
 | 496 | 	h = 0; | 
 | 497 | 	for (j = 0; j < conf->nr_strip_zones; j++) { | 
 | 498 | 		seq_printf(seq, "      z%d", j); | 
 | 499 | 		if (conf->hash_table[h] == conf->strip_zone+j) | 
 | 500 | 			seq_printf("(h%d)", h++); | 
 | 501 | 		seq_printf(seq, "=["); | 
 | 502 | 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++) | 
 | 503 | 			seq_printf (seq, "%s/", bdevname( | 
 | 504 | 				conf->strip_zone[j].dev[k]->bdev,b)); | 
 | 505 |  | 
 | 506 | 		seq_printf (seq, "] zo=%d do=%d s=%d\n", | 
 | 507 | 				conf->strip_zone[j].zone_offset, | 
 | 508 | 				conf->strip_zone[j].dev_offset, | 
 | 509 | 				conf->strip_zone[j].size); | 
 | 510 | 	} | 
 | 511 | #endif | 
 | 512 | 	seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); | 
 | 513 | 	return; | 
 | 514 | } | 
 | 515 |  | 
 | 516 | static mdk_personality_t raid0_personality= | 
 | 517 | { | 
 | 518 | 	.name		= "raid0", | 
 | 519 | 	.owner		= THIS_MODULE, | 
 | 520 | 	.make_request	= raid0_make_request, | 
 | 521 | 	.run		= raid0_run, | 
 | 522 | 	.stop		= raid0_stop, | 
 | 523 | 	.status		= raid0_status, | 
 | 524 | }; | 
 | 525 |  | 
 | 526 | static int __init raid0_init (void) | 
 | 527 | { | 
 | 528 | 	return register_md_personality (RAID0, &raid0_personality); | 
 | 529 | } | 
 | 530 |  | 
 | 531 | static void raid0_exit (void) | 
 | 532 | { | 
 | 533 | 	unregister_md_personality (RAID0); | 
 | 534 | } | 
 | 535 |  | 
 | 536 | module_init(raid0_init); | 
 | 537 | module_exit(raid0_exit); | 
 | 538 | MODULE_LICENSE("GPL"); | 
 | 539 | MODULE_ALIAS("md-personality-2"); /* RAID0 */ |