static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
- int size = offsetof(r1bio_t, bios[pi->raid_disks]);
+ int size = offsetof(struct r1bio, bios[pi->raid_disks]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
return kzalloc(size, gfp_flags);
{
struct pool_info *pi = data;
struct page *page;
- r1bio_t *r1_bio;
+ struct r1bio *r1_bio;
struct bio *bio;
int i, j;
{
struct pool_info *pi = data;
int i,j;
- r1bio_t *r1bio = __r1_bio;
+ struct r1bio *r1bio = __r1_bio;
for (i = 0; i < RESYNC_PAGES; i++)
for (j = pi->raid_disks; j-- ;) {
r1bio_pool_free(r1bio, data);
}
-static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
+static void put_all_bios(conf_t *conf, struct r1bio *r1_bio)
{
int i;
}
}
-static void free_r1bio(r1bio_t *r1_bio)
+static void free_r1bio(struct r1bio *r1_bio)
{
conf_t *conf = r1_bio->mddev->private;
mempool_free(r1_bio, conf->r1bio_pool);
}
-static void put_buf(r1bio_t *r1_bio)
+static void put_buf(struct r1bio *r1_bio)
{
conf_t *conf = r1_bio->mddev->private;
int i;
lower_barrier(conf);
}
-static void reschedule_retry(r1bio_t *r1_bio)
+static void reschedule_retry(struct r1bio *r1_bio)
{
unsigned long flags;
struct mddev *mddev = r1_bio->mddev;
* operation and are ready to return a success/failure code to the buffer
* cache layer.
*/
-static void call_bio_endio(r1bio_t *r1_bio)
+static void call_bio_endio(struct r1bio *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
int done;
}
}
-static void raid_end_bio_io(r1bio_t *r1_bio)
+static void raid_end_bio_io(struct r1bio *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
/*
* Update disk head position estimator based on IRQ completion info.
*/
-static inline void update_head_pos(int disk, r1bio_t *r1_bio)
+static inline void update_head_pos(int disk, struct r1bio *r1_bio)
{
conf_t *conf = r1_bio->mddev->private;
/*
* Find the disk number which triggered given bio
*/
-static int find_bio_disk(r1bio_t *r1_bio, struct bio *bio)
+static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
{
int mirror;
int raid_disks = r1_bio->mddev->raid_disks;
static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t *r1_bio = bio->bi_private;
+ struct r1bio *r1_bio = bio->bi_private;
int mirror;
conf_t *conf = r1_bio->mddev->private;
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}
-static void close_write(r1bio_t *r1_bio)
+static void close_write(struct r1bio *r1_bio)
{
/* it really is the end of this request */
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
md_write_end(r1_bio->mddev);
}
-static void r1_bio_write_done(r1bio_t *r1_bio)
+static void r1_bio_write_done(struct r1bio *r1_bio)
{
if (!atomic_dec_and_test(&r1_bio->remaining))
return;
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t *r1_bio = bio->bi_private;
+ struct r1bio *r1_bio = bio->bi_private;
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
*
* The rdev for the device selected will have nr_pending incremented.
*/
-static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
+static int read_balance(conf_t *conf, struct r1bio *r1_bio, int *max_sectors)
{
const sector_t this_sector = r1_bio->sector;
int sectors;
/* duplicate the data pages for behind I/O
*/
-static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
+static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
{
int i;
struct bio_vec *bvec;
{
conf_t *conf = mddev->private;
mirror_info_t *mirror;
- r1bio_t *r1_bio;
+ struct r1bio *r1_bio;
struct bio *read_bio;
int i, disks;
struct bitmap *bitmap;
static void end_sync_read(struct bio *bio, int error)
{
- r1bio_t *r1_bio = bio->bi_private;
+ struct r1bio *r1_bio = bio->bi_private;
update_head_pos(r1_bio->read_disk, r1_bio);
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t *r1_bio = bio->bi_private;
+ struct r1bio *r1_bio = bio->bi_private;
struct mddev *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
int mirror=0;
return 0;
}
-static int fix_sync_read_error(r1bio_t *r1_bio)
+static int fix_sync_read_error(struct r1bio *r1_bio)
{
/* Try some synchronous reads of other devices to get
* good data, much like with normal read errors. Only
return 1;
}
-static int process_checks(r1bio_t *r1_bio)
+static int process_checks(struct r1bio *r1_bio)
{
/* We have read all readable devices. If we haven't
* got the block, then there is no hope left.
return 0;
}
-static void sync_request_write(struct mddev *mddev, r1bio_t *r1_bio)
+static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
{
conf_t *conf = mddev->private;
int i;
return test_bit(BIO_UPTODATE, &bio->bi_flags);
}
-static int narrow_write_error(r1bio_t *r1_bio, int i)
+static int narrow_write_error(struct r1bio *r1_bio, int i)
{
struct mddev *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
return ok;
}
-static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
+static void handle_sync_write_finished(conf_t *conf, struct r1bio *r1_bio)
{
int m;
int s = r1_bio->sectors;
md_done_sync(conf->mddev, s, 1);
}
-static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
+static void handle_write_finished(conf_t *conf, struct r1bio *r1_bio)
{
int m;
for (m = 0; m < conf->raid_disks ; m++)
raid_end_bio_io(r1_bio);
}
-static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
+static void handle_read_error(conf_t *conf, struct r1bio *r1_bio)
{
int disk;
int max_sectors;
static void raid1d(struct mddev *mddev)
{
- r1bio_t *r1_bio;
+ struct r1bio *r1_bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
spin_unlock_irqrestore(&conf->device_lock, flags);
break;
}
- r1_bio = list_entry(head->prev, r1bio_t, retry_list);
+ r1_bio = list_entry(head->prev, struct r1bio, retry_list);
list_del(head->prev);
conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
conf_t *conf = mddev->private;
- r1bio_t *r1_bio;
+ struct r1bio *r1_bio;
struct bio *bio;
sector_t max_sector, nr_sectors;
int disk = -1;
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
- int size = offsetof(struct r10bio_s, devs[conf->copies]);
+ int size = offsetof(struct r10bio, devs[conf->copies]);
/* allocate a r10bio with room for raid_disks entries in the bios array */
return kzalloc(size, gfp_flags);
{
conf_t *conf = data;
struct page *page;
- r10bio_t *r10_bio;
+ struct r10bio *r10_bio;
struct bio *bio;
int i, j;
int nalloc;
{
int i;
conf_t *conf = data;
- r10bio_t *r10bio = __r10_bio;
+ struct r10bio *r10bio = __r10_bio;
int j;
for (j=0; j < conf->copies; j++) {
r10bio_pool_free(r10bio, conf);
}
-static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
+static void put_all_bios(conf_t *conf, struct r10bio *r10_bio)
{
int i;
}
}
-static void free_r10bio(r10bio_t *r10_bio)
+static void free_r10bio(struct r10bio *r10_bio)
{
conf_t *conf = r10_bio->mddev->private;
mempool_free(r10_bio, conf->r10bio_pool);
}
-static void put_buf(r10bio_t *r10_bio)
+static void put_buf(struct r10bio *r10_bio)
{
conf_t *conf = r10_bio->mddev->private;
lower_barrier(conf);
}
-static void reschedule_retry(r10bio_t *r10_bio)
+static void reschedule_retry(struct r10bio *r10_bio)
{
unsigned long flags;
struct mddev *mddev = r10_bio->mddev;
* operation and are ready to return a success/failure code to the buffer
* cache layer.
*/
-static void raid_end_bio_io(r10bio_t *r10_bio)
+static void raid_end_bio_io(struct r10bio *r10_bio)
{
struct bio *bio = r10_bio->master_bio;
int done;
/*
* Update disk head position estimator based on IRQ completion info.
*/
-static inline void update_head_pos(int slot, r10bio_t *r10_bio)
+static inline void update_head_pos(int slot, struct r10bio *r10_bio)
{
conf_t *conf = r10_bio->mddev->private;
/*
* Find the disk number which triggered given bio
*/
-static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
+static int find_bio_disk(conf_t *conf, struct r10bio *r10_bio,
struct bio *bio, int *slotp)
{
int slot;
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t *r10_bio = bio->bi_private;
+ struct r10bio *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
}
}
-static void close_write(r10bio_t *r10_bio)
+static void close_write(struct r10bio *r10_bio)
{
/* clear the bitmap if all writes complete successfully */
bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
md_write_end(r10_bio->mddev);
}
-static void one_write_done(r10bio_t *r10_bio)
+static void one_write_done(struct r10bio *r10_bio)
{
if (atomic_dec_and_test(&r10_bio->remaining)) {
if (test_bit(R10BIO_WriteError, &r10_bio->state))
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t *r10_bio = bio->bi_private;
+ struct r10bio *r10_bio = bio->bi_private;
int dev;
int dec_rdev = 1;
conf_t *conf = r10_bio->mddev->private;
* sector offset to a virtual address
*/
-static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
+static void raid10_find_phys(conf_t *conf, struct r10bio *r10bio)
{
int n,f;
sector_t sector;
* FIXME: possibly should rethink readbalancing and do it differently
* depending on near_copies / far_copies geometry.
*/
-static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
+static int read_balance(conf_t *conf, struct r10bio *r10_bio, int *max_sectors)
{
const sector_t this_sector = r10_bio->sector;
int disk, slot;
{
conf_t *conf = mddev->private;
mirror_info_t *mirror;
- r10bio_t *r10_bio;
+ struct r10bio *r10_bio;
struct bio *read_bio;
int i;
int chunk_sects = conf->chunk_mask + 1;
static void end_sync_read(struct bio *bio, int error)
{
- r10bio_t *r10_bio = bio->bi_private;
+ struct r10bio *r10_bio = bio->bi_private;
conf_t *conf = r10_bio->mddev->private;
int d;
}
}
-static void end_sync_request(r10bio_t *r10_bio)
+static void end_sync_request(struct r10bio *r10_bio)
{
struct mddev *mddev = r10_bio->mddev;
md_done_sync(mddev, s, 1);
break;
} else {
- r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
+ struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
test_bit(R10BIO_WriteError, &r10_bio->state))
reschedule_retry(r10_bio);
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t *r10_bio = bio->bi_private;
+ struct r10bio *r10_bio = bio->bi_private;
struct mddev *mddev = r10_bio->mddev;
conf_t *conf = mddev->private;
int d;
* We check if all blocks are in-sync and only write to blocks that
* aren't in sync
*/
-static void sync_request_write(struct mddev *mddev, r10bio_t *r10_bio)
+static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
conf_t *conf = mddev->private;
int i, first;
* The second for writing.
*
*/
-static void fix_recovery_read_error(r10bio_t *r10_bio)
+static void fix_recovery_read_error(struct r10bio *r10_bio)
{
/* We got a read error during recovery.
* We repeat the read in smaller page-sized sections.
}
}
-static void recovery_request_write(struct mddev *mddev, r10bio_t *r10_bio)
+static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
conf_t *conf = mddev->private;
int d;
* 3. Performs writes following reads for array synchronising.
*/
-static void fix_read_error(conf_t *conf, struct mddev *mddev, r10bio_t *r10_bio)
+static void fix_read_error(conf_t *conf, struct mddev *mddev, struct r10bio *r10_bio)
{
int sect = 0; /* Offset from r10_bio->sector */
int sectors = r10_bio->sectors;
return test_bit(BIO_UPTODATE, &bio->bi_flags);
}
-static int narrow_write_error(r10bio_t *r10_bio, int i)
+static int narrow_write_error(struct r10bio *r10_bio, int i)
{
struct bio *bio = r10_bio->master_bio;
struct mddev *mddev = r10_bio->mddev;
return ok;
}
-static void handle_read_error(struct mddev *mddev, r10bio_t *r10_bio)
+static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
{
int slot = r10_bio->read_slot;
int mirror = r10_bio->devs[slot].devnum;
generic_make_request(bio);
}
-static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
+static void handle_write_completed(conf_t *conf, struct r10bio *r10_bio)
{
/* Some sort of write request has finished and it
* succeeded in writing where we thought there was a
static void raid10d(struct mddev *mddev)
{
- r10bio_t *r10_bio;
+ struct r10bio *r10_bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
spin_unlock_irqrestore(&conf->device_lock, flags);
break;
}
- r10_bio = list_entry(head->prev, r10bio_t, retry_list);
+ r10_bio = list_entry(head->prev, struct r10bio, retry_list);
list_del(head->prev);
conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
int *skipped, int go_faster)
{
conf_t *conf = mddev->private;
- r10bio_t *r10_bio;
+ struct r10bio *r10_bio;
struct bio *biolist = NULL, *bio;
sector_t max_sector, nr_sectors;
int i;
for (i=0 ; i<conf->raid_disks; i++) {
int still_degraded;
- r10bio_t *rb2;
+ struct r10bio *rb2;
sector_t sect;
int must_sync;
int any_working;
}
if (biolist == NULL) {
while (r10_bio) {
- r10bio_t *rb2 = r10_bio;
- r10_bio = (r10bio_t*) rb2->master_bio;
+ struct r10bio *rb2 = r10_bio;
+ r10_bio = (struct r10bio*) rb2->master_bio;
rb2->master_bio = NULL;
put_buf(rb2);
}