if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
- sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < speed_min(mddev));
+ sectors = mddev->pers->sync_request(mddev, j, &skipped);
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
- mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
+ mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (mddev_is_clustered(mddev))
md_cluster_ops->resync_finish(mddev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
- sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
+ sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev);
* that can be installed to exclude normal IO requests.
*/
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
*skipped = 1;
return sync_blocks;
}
- /*
- * If there is non-resync activity waiting for a turn,
- * and resync is going fast enough,
- * then let it though before starting on this new sync request.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
*/
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
- int *skipped, int go_faster)
+ int *skipped)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
if (conf->geo.near_copies < conf->geo.raid_disks &&
max_sector > (sector_nr | chunk_mask))
max_sector = (sector_nr | chunk_mask) + 1;
- /*
- * If there is non-resync activity waiting for us then
- * put in a delay to throttle resync.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
return reshape_sectors;
}
-/* FIXME go_faster isn't used */
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r5conf *conf = mddev->private;
struct stripe_head *sh;