]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/md/raid0.c
md: raid0 create_strip_zones(): Make two local variables sector-based.
[mv-sheeva.git] / drivers / md / raid0.c
1 /*
2    raid0.c : Multiple Devices driver for Linux
3              Copyright (C) 1994-96 Marc ZYNGIER
4              <zyngier@ufr-info-p7.ibp.fr> or
5              <maz@gloups.fdn.fr>
6              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9    RAID-0 management functions.
10
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 2, or (at your option)
14    any later version.
15    
16    You should have received a copy of the GNU General Public License
17    (for example /usr/src/linux/COPYING); if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
19 */
20
21 #include <linux/raid/raid0.h>
22
23 static void raid0_unplug(struct request_queue *q)
24 {
25         mddev_t *mddev = q->queuedata;
26         raid0_conf_t *conf = mddev_to_conf(mddev);
27         mdk_rdev_t **devlist = conf->strip_zone[0].dev;
28         int i;
29
30         for (i=0; i<mddev->raid_disks; i++) {
31                 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
32
33                 blk_unplug(r_queue);
34         }
35 }
36
37 static int raid0_congested(void *data, int bits)
38 {
39         mddev_t *mddev = data;
40         raid0_conf_t *conf = mddev_to_conf(mddev);
41         mdk_rdev_t **devlist = conf->strip_zone[0].dev;
42         int i, ret = 0;
43
44         for (i = 0; i < mddev->raid_disks && !ret ; i++) {
45                 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
46
47                 ret |= bdi_congested(&q->backing_dev_info, bits);
48         }
49         return ret;
50 }
51
52
53 static int create_strip_zones (mddev_t *mddev)
54 {
55         int i, c, j;
56         sector_t current_start, curr_zone_start;
57         sector_t min_spacing;
58         raid0_conf_t *conf = mddev_to_conf(mddev);
59         mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
60         struct list_head *tmp1, *tmp2;
61         struct strip_zone *zone;
62         int cnt;
63         char b[BDEVNAME_SIZE];
64  
65         /*
66          * The number of 'same size groups'
67          */
68         conf->nr_strip_zones = 0;
69  
70         rdev_for_each(rdev1, tmp1, mddev) {
71                 printk("raid0: looking at %s\n",
72                         bdevname(rdev1->bdev,b));
73                 c = 0;
74                 rdev_for_each(rdev2, tmp2, mddev) {
75                         printk("raid0:   comparing %s(%llu)",
76                                bdevname(rdev1->bdev,b),
77                                (unsigned long long)rdev1->size);
78                         printk(" with %s(%llu)\n",
79                                bdevname(rdev2->bdev,b),
80                                (unsigned long long)rdev2->size);
81                         if (rdev2 == rdev1) {
82                                 printk("raid0:   END\n");
83                                 break;
84                         }
85                         if (rdev2->size == rdev1->size)
86                         {
87                                 /*
88                                  * Not unique, don't count it as a new
89                                  * group
90                                  */
91                                 printk("raid0:   EQUAL\n");
92                                 c = 1;
93                                 break;
94                         }
95                         printk("raid0:   NOT EQUAL\n");
96                 }
97                 if (!c) {
98                         printk("raid0:   ==> UNIQUE\n");
99                         conf->nr_strip_zones++;
100                         printk("raid0: %d zones\n", conf->nr_strip_zones);
101                 }
102         }
103         printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
104
105         conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
106                                 conf->nr_strip_zones, GFP_KERNEL);
107         if (!conf->strip_zone)
108                 return 1;
109         conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
110                                 conf->nr_strip_zones*mddev->raid_disks,
111                                 GFP_KERNEL);
112         if (!conf->devlist)
113                 return 1;
114
115         /* The first zone must contain all devices, so here we check that
116          * there is a proper alignment of slots to devices and find them all
117          */
118         zone = &conf->strip_zone[0];
119         cnt = 0;
120         smallest = NULL;
121         zone->dev = conf->devlist;
122         rdev_for_each(rdev1, tmp1, mddev) {
123                 int j = rdev1->raid_disk;
124
125                 if (j < 0 || j >= mddev->raid_disks) {
126                         printk("raid0: bad disk number %d - aborting!\n", j);
127                         goto abort;
128                 }
129                 if (zone->dev[j]) {
130                         printk("raid0: multiple devices for %d - aborting!\n",
131                                 j);
132                         goto abort;
133                 }
134                 zone->dev[j] = rdev1;
135
136                 blk_queue_stack_limits(mddev->queue,
137                                        rdev1->bdev->bd_disk->queue);
138                 /* as we don't honour merge_bvec_fn, we must never risk
139                  * violating it, so limit ->max_sector to one PAGE, as
140                  * a one page request is never in violation.
141                  */
142
143                 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
144                     mddev->queue->max_sectors > (PAGE_SIZE>>9))
145                         blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
146
147                 if (!smallest || (rdev1->size <smallest->size))
148                         smallest = rdev1;
149                 cnt++;
150         }
151         if (cnt != mddev->raid_disks) {
152                 printk("raid0: too few disks (%d of %d) - aborting!\n",
153                         cnt, mddev->raid_disks);
154                 goto abort;
155         }
156         zone->nb_dev = cnt;
157         zone->size = smallest->size * cnt;
158         zone->zone_start = 0;
159
160         current_start = smallest->size * 2;
161         curr_zone_start = zone->size * 2;
162
163         /* now do the other zones */
164         for (i = 1; i < conf->nr_strip_zones; i++)
165         {
166                 zone = conf->strip_zone + i;
167                 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
168
169                 printk("raid0: zone %d\n", i);
170                 zone->dev_start = current_start;
171                 smallest = NULL;
172                 c = 0;
173
174                 for (j=0; j<cnt; j++) {
175                         char b[BDEVNAME_SIZE];
176                         rdev = conf->strip_zone[0].dev[j];
177                         printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
178                         if (rdev->size > current_start / 2) {
179                                 printk(" contained as device %d\n", c);
180                                 zone->dev[c] = rdev;
181                                 c++;
182                                 if (!smallest || (rdev->size <smallest->size)) {
183                                         smallest = rdev;
184                                         printk("  (%llu) is smallest!.\n", 
185                                                 (unsigned long long)rdev->size);
186                                 }
187                         } else
188                                 printk(" nope.\n");
189                 }
190
191                 zone->nb_dev = c;
192                 zone->size = (smallest->size - current_start / 2) * c;
193                 printk("raid0: zone->nb_dev: %d, size: %llu\n",
194                         zone->nb_dev, (unsigned long long)zone->size);
195
196                 zone->zone_start = curr_zone_start;
197                 curr_zone_start += zone->size * 2;
198
199                 current_start = smallest->size * 2;
200                 printk(KERN_INFO "raid0: current zone start: %llu\n",
201                         (unsigned long long)current_start);
202         }
203
204         /* Now find appropriate hash spacing.
205          * We want a number which causes most hash entries to cover
206          * at most two strips, but the hash table must be at most
207          * 1 PAGE.  We choose the smallest strip, or contiguous collection
208          * of strips, that has big enough size.  We never consider the last
209          * strip though as it's size has no bearing on the efficacy of the hash
210          * table.
211          */
212         conf->hash_spacing = curr_zone_start / 2;
213         min_spacing = curr_zone_start / 2;
214         sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
215         for (i=0; i < conf->nr_strip_zones-1; i++) {
216                 sector_t sz = 0;
217                 for (j=i; j<conf->nr_strip_zones-1 &&
218                              sz < min_spacing ; j++)
219                         sz += conf->strip_zone[j].size;
220                 if (sz >= min_spacing && sz < conf->hash_spacing)
221                         conf->hash_spacing = sz;
222         }
223
224         mddev->queue->unplug_fn = raid0_unplug;
225
226         mddev->queue->backing_dev_info.congested_fn = raid0_congested;
227         mddev->queue->backing_dev_info.congested_data = mddev;
228
229         printk("raid0: done.\n");
230         return 0;
231  abort:
232         return 1;
233 }
234
235 /**
236  *      raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
237  *      @q: request queue
238  *      @bvm: properties of new bio
239  *      @biovec: the request that could be merged to it.
240  *
241  *      Return amount of bytes we can accept at this offset
242  */
243 static int raid0_mergeable_bvec(struct request_queue *q,
244                                 struct bvec_merge_data *bvm,
245                                 struct bio_vec *biovec)
246 {
247         mddev_t *mddev = q->queuedata;
248         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
249         int max;
250         unsigned int chunk_sectors = mddev->chunk_size >> 9;
251         unsigned int bio_sectors = bvm->bi_size >> 9;
252
253         max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
254         if (max < 0) max = 0; /* bio_add cannot handle a negative return */
255         if (max <= biovec->bv_len && bio_sectors == 0)
256                 return biovec->bv_len;
257         else 
258                 return max;
259 }
260
261 static int raid0_run (mddev_t *mddev)
262 {
263         unsigned  cur=0, i=0, nb_zone;
264         s64 size;
265         raid0_conf_t *conf;
266         mdk_rdev_t *rdev;
267         struct list_head *tmp;
268
269         if (mddev->chunk_size == 0) {
270                 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
271                 return -EINVAL;
272         }
273         printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
274                mdname(mddev),
275                mddev->chunk_size >> 9,
276                (mddev->chunk_size>>1)-1);
277         blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
278         blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
279         mddev->queue->queue_lock = &mddev->queue->__queue_lock;
280
281         conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
282         if (!conf)
283                 goto out;
284         mddev->private = (void *)conf;
285  
286         conf->strip_zone = NULL;
287         conf->devlist = NULL;
288         if (create_strip_zones (mddev)) 
289                 goto out_free_conf;
290
291         /* calculate array device size */
292         mddev->array_sectors = 0;
293         rdev_for_each(rdev, tmp, mddev)
294                 mddev->array_sectors += rdev->size * 2;
295
296         printk("raid0 : md_size is %llu blocks.\n", 
297                 (unsigned long long)mddev->array_sectors / 2);
298         printk("raid0 : conf->hash_spacing is %llu blocks.\n",
299                 (unsigned long long)conf->hash_spacing);
300         {
301                 sector_t s = mddev->array_sectors / 2;
302                 sector_t space = conf->hash_spacing;
303                 int round;
304                 conf->preshift = 0;
305                 if (sizeof(sector_t) > sizeof(u32)) {
306                         /*shift down space and s so that sector_div will work */
307                         while (space > (sector_t) (~(u32)0)) {
308                                 s >>= 1;
309                                 space >>= 1;
310                                 s += 1; /* force round-up */
311                                 conf->preshift++;
312                         }
313                 }
314                 round = sector_div(s, (u32)space) ? 1 : 0;
315                 nb_zone = s + round;
316         }
317         printk("raid0 : nb_zone is %d.\n", nb_zone);
318
319         printk("raid0 : Allocating %Zd bytes for hash.\n",
320                                 nb_zone*sizeof(struct strip_zone*));
321         conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
322         if (!conf->hash_table)
323                 goto out_free_conf;
324         size = conf->strip_zone[cur].size;
325
326         conf->hash_table[0] = conf->strip_zone + cur;
327         for (i=1; i< nb_zone; i++) {
328                 while (size <= conf->hash_spacing) {
329                         cur++;
330                         size += conf->strip_zone[cur].size;
331                 }
332                 size -= conf->hash_spacing;
333                 conf->hash_table[i] = conf->strip_zone + cur;
334         }
335         if (conf->preshift) {
336                 conf->hash_spacing >>= conf->preshift;
337                 /* round hash_spacing up so when we divide by it, we
338                  * err on the side of too-low, which is safest
339                  */
340                 conf->hash_spacing++;
341         }
342
343         /* calculate the max read-ahead size.
344          * For read-ahead of large files to be effective, we need to
345          * readahead at least twice a whole stripe. i.e. number of devices
346          * multiplied by chunk size times 2.
347          * If an individual device has an ra_pages greater than the
348          * chunk size, then we will not drive that device as hard as it
349          * wants.  We consider this a configuration error: a larger
350          * chunksize should be used in that case.
351          */
352         {
353                 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
354                 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
355                         mddev->queue->backing_dev_info.ra_pages = 2* stripe;
356         }
357
358
359         blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
360         return 0;
361
362 out_free_conf:
363         kfree(conf->strip_zone);
364         kfree(conf->devlist);
365         kfree(conf);
366         mddev->private = NULL;
367 out:
368         return -ENOMEM;
369 }
370
371 static int raid0_stop (mddev_t *mddev)
372 {
373         raid0_conf_t *conf = mddev_to_conf(mddev);
374
375         blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
376         kfree(conf->hash_table);
377         conf->hash_table = NULL;
378         kfree(conf->strip_zone);
379         conf->strip_zone = NULL;
380         kfree(conf);
381         mddev->private = NULL;
382
383         return 0;
384 }
385
386 static int raid0_make_request (struct request_queue *q, struct bio *bio)
387 {
388         mddev_t *mddev = q->queuedata;
389         unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
390         raid0_conf_t *conf = mddev_to_conf(mddev);
391         struct strip_zone *zone;
392         mdk_rdev_t *tmp_dev;
393         sector_t chunk;
394         sector_t sector, rsect;
395         const int rw = bio_data_dir(bio);
396         int cpu;
397
398         if (unlikely(bio_barrier(bio))) {
399                 bio_endio(bio, -EOPNOTSUPP);
400                 return 0;
401         }
402
403         cpu = part_stat_lock();
404         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
405         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
406                       bio_sectors(bio));
407         part_stat_unlock();
408
409         chunk_sects = mddev->chunk_size >> 9;
410         chunksect_bits = ffz(~chunk_sects);
411         sector = bio->bi_sector;
412
413         if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
414                 struct bio_pair *bp;
415                 /* Sanity check -- queue functions should prevent this happening */
416                 if (bio->bi_vcnt != 1 ||
417                     bio->bi_idx != 0)
418                         goto bad_map;
419                 /* This is a one page bio that upper layers
420                  * refuse to split for us, so we need to split it.
421                  */
422                 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
423                 if (raid0_make_request(q, &bp->bio1))
424                         generic_make_request(&bp->bio1);
425                 if (raid0_make_request(q, &bp->bio2))
426                         generic_make_request(&bp->bio2);
427
428                 bio_pair_release(bp);
429                 return 0;
430         }
431  
432
433         {
434                 sector_t x = sector >> (conf->preshift + 1);
435                 sector_div(x, (u32)conf->hash_spacing);
436                 zone = conf->hash_table[x];
437         }
438  
439         while (sector / 2 >= (zone->zone_start / 2 + zone->size))
440                 zone++;
441     
442         sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
443
444
445         {
446                 sector_t x = (sector - zone->zone_start) >> chunksect_bits;
447
448                 sector_div(x, zone->nb_dev);
449                 chunk = x;
450
451                 x = sector >> chunksect_bits;
452                 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
453         }
454         rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
455  
456         bio->bi_bdev = tmp_dev->bdev;
457         bio->bi_sector = rsect + tmp_dev->data_offset;
458
459         /*
460          * Let the main block layer submit the IO and resolve recursion:
461          */
462         return 1;
463
464 bad_map:
465         printk("raid0_make_request bug: can't convert block across chunks"
466                 " or bigger than %dk %llu %d\n", chunk_sects / 2,
467                 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
468
469         bio_io_error(bio);
470         return 0;
471 }
472
473 static void raid0_status (struct seq_file *seq, mddev_t *mddev)
474 {
475 #undef MD_DEBUG
476 #ifdef MD_DEBUG
477         int j, k, h;
478         char b[BDEVNAME_SIZE];
479         raid0_conf_t *conf = mddev_to_conf(mddev);
480
481         h = 0;
482         for (j = 0; j < conf->nr_strip_zones; j++) {
483                 seq_printf(seq, "      z%d", j);
484                 if (conf->hash_table[h] == conf->strip_zone+j)
485                         seq_printf(seq, "(h%d)", h++);
486                 seq_printf(seq, "=[");
487                 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
488                         seq_printf(seq, "%s/", bdevname(
489                                 conf->strip_zone[j].dev[k]->bdev,b));
490
491                 seq_printf(seq, "] zs=%d ds=%d s=%d\n",
492                                 conf->strip_zone[j].zone_start,
493                                 conf->strip_zone[j].dev_start,
494                                 conf->strip_zone[j].size);
495         }
496 #endif
497         seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
498         return;
499 }
500
501 static struct mdk_personality raid0_personality=
502 {
503         .name           = "raid0",
504         .level          = 0,
505         .owner          = THIS_MODULE,
506         .make_request   = raid0_make_request,
507         .run            = raid0_run,
508         .stop           = raid0_stop,
509         .status         = raid0_status,
510 };
511
512 static int __init raid0_init (void)
513 {
514         return register_md_personality (&raid0_personality);
515 }
516
517 static void raid0_exit (void)
518 {
519         unregister_md_personality (&raid0_personality);
520 }
521
522 module_init(raid0_init);
523 module_exit(raid0_exit);
524 MODULE_LICENSE("GPL");
525 MODULE_ALIAS("md-personality-2"); /* RAID0 */
526 MODULE_ALIAS("md-raid0");
527 MODULE_ALIAS("md-level-0");