2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 #include <linux/blkdev.h>
19 #include <linux/kobject.h>
20 #include <linux/list.h>
22 #include <linux/mutex.h>
23 #include <linux/timer.h>
24 #include <linux/wait.h>
25 #include <linux/workqueue.h>
27 #define MaxSector (~(sector_t)0)
29 typedef struct mddev_s mddev_t;
30 typedef struct mdk_rdev_s mdk_rdev_t;
32 /* generic plugging support - like that provided with request_queue,
33 * but does not require a request_queue
36 void (*unplug_fn)(struct plug_handle *);
37 struct timer_list unplug_timer;
38 struct work_struct unplug_work;
39 unsigned long unplug_flag;
41 #define PLUGGED_FLAG 1
42 void plugger_init(struct plug_handle *plug,
43 void (*unplug_fn)(struct plug_handle *));
44 void plugger_set_plug(struct plug_handle *plug);
45 int plugger_remove_plug(struct plug_handle *plug);
46 static inline void plugger_flush(struct plug_handle *plug)
48 del_timer_sync(&plug->unplug_timer);
49 cancel_work_sync(&plug->unplug_work);
53 * MD's 'extended' device
57 struct list_head same_set; /* RAID devices within the same set */
59 sector_t sectors; /* Device size (in 512bytes sectors) */
60 mddev_t *mddev; /* RAID array if running */
61 int last_events; /* IO event timestamp */
63 struct block_device *bdev; /* block device handle */
68 sector_t data_offset; /* start of data in array */
69 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
70 int sb_size; /* bytes in the superblock */
71 int preferred_minor; /* autorun support */
75 /* A device can be in one of three states based on two flags:
76 * Not working: faulty==1 in_sync==0
77 * Fully working: faulty==0 in_sync==1
80 * faulty==0 in_sync==0
82 * It can never have faulty==1, in_sync==1
83 * This reduces the burden of testing multiple flags in many cases
87 #define Faulty 1 /* device is known to have a fault */
88 #define In_sync 2 /* device is in_sync with rest of array */
89 #define WriteMostly 4 /* Avoid reading if at all possible */
90 #define AllReserved 6 /* If whole device is reserved for
92 #define AutoDetected 7 /* added by auto-detect */
93 #define Blocked 8 /* An error occured on an externally
94 * managed array, don't allow writes
95 * until it is cleared */
96 wait_queue_head_t blocked_wait;
98 int desc_nr; /* descriptor index in the superblock */
99 int raid_disk; /* role of device in array */
100 int new_raid_disk; /* role that the device will have in
101 * the array after a level-change completes.
103 int saved_raid_disk; /* role that device used to have in the
104 * array and could again if we did a partial
105 * resync from the bitmap
107 sector_t recovery_offset;/* If this device has been partially
108 * recovered, this is where we were
112 atomic_t nr_pending; /* number of pending requests.
113 * only maintained for arrays that
114 * support hot removal
116 atomic_t read_errors; /* number of consecutive read errors that
117 * we have tried to ignore.
119 struct timespec last_read_error; /* monotonic time since our
122 atomic_t corrected_errors; /* number of corrected read errors,
123 * for reporting to userspace and storing
126 struct work_struct del_work; /* used for delayed sysfs removal */
128 struct sysfs_dirent *sysfs_state; /* handle for 'state'
135 struct mdk_personality *pers;
138 struct list_head disks;
140 #define MD_CHANGE_DEVS 0 /* Some device status has changed */
141 #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
142 #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
147 int sysfs_active; /* set when sysfs deletes
148 * are happening, so run/
149 * takeover/stop are not safe
151 int ready; /* See when safe to pass
152 * IO requests down */
153 struct gendisk *gendisk;
157 #define UNTIL_IOCTL 1
160 /* Superblock information */
165 int external; /* metadata is
166 * managed externally */
167 char metadata_type[17]; /* externally set*/
174 sector_t dev_sectors; /* used size of
175 * component devices */
176 sector_t array_sectors; /* exported array size */
177 int external_size; /* size managed
180 /* If the last 'event' was simply a clean->dirty transition, and
181 * we didn't write it to the spares, then it is safe and simple
182 * to just decrement the event count on a dirty->clean transition.
183 * So we record that possibility here.
185 int can_decrease_events;
189 /* If the array is being reshaped, we need to record the
190 * new shape and an indication of where we are up to.
191 * This is written to the superblock.
192 * If reshape_position is MaxSector, then no reshape is happening (yet).
194 sector_t reshape_position;
195 int delta_disks, new_level, new_layout;
196 int new_chunk_sectors;
198 struct mdk_thread_s *thread; /* management thread */
199 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
200 sector_t curr_resync; /* last block scheduled */
201 /* As resync requests can complete out of order, we cannot easily track
202 * how much resync has been completed. So we occasionally pause until
203 * everything completes, then set curr_resync_completed to curr_resync.
204 * As such it may be well behind the real resync mark, but it is a value
207 sector_t curr_resync_completed;
208 unsigned long resync_mark; /* a recent timestamp */
209 sector_t resync_mark_cnt;/* blocks written at resync_mark */
210 sector_t curr_mark_cnt; /* blocks scheduled now */
212 sector_t resync_max_sectors; /* may be set by personality */
214 sector_t resync_mismatches; /* count of sectors where
215 * parity/replica mismatch found
218 /* allow user-space to request suspension of IO to regions of the array */
221 /* if zero, use the system-wide default */
225 /* resync even though the same disks are shared among md-devices */
228 int ok_start_degraded;
229 /* recovery/resync flags
230 * NEEDED: we might need to start a resync/recover
231 * RUNNING: a thread is running, or about to be started
232 * SYNC: actually doing a resync, not a recovery
233 * RECOVER: doing recovery, or need to try it.
234 * INTR: resync needs to be aborted for some reason
235 * DONE: thread is done and is waiting to be reaped
236 * REQUEST: user-space has requested a sync (used with SYNC)
237 * CHECK: user-space request for check-only, no repair
238 * RESHAPE: A reshape is happening
240 * If neither SYNC or RESHAPE are set, then it is a recovery.
242 #define MD_RECOVERY_RUNNING 0
243 #define MD_RECOVERY_SYNC 1
244 #define MD_RECOVERY_RECOVER 2
245 #define MD_RECOVERY_INTR 3
246 #define MD_RECOVERY_DONE 4
247 #define MD_RECOVERY_NEEDED 5
248 #define MD_RECOVERY_REQUESTED 6
249 #define MD_RECOVERY_CHECK 7
250 #define MD_RECOVERY_RESHAPE 8
251 #define MD_RECOVERY_FROZEN 9
253 unsigned long recovery;
254 int recovery_disabled; /* if we detect that recovery
255 * will always fail, set this
256 * so we don't loop trying */
258 int in_sync; /* know to not need resync */
259 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
260 * that we are never stopping an array while it is open.
261 * 'reconfig_mutex' protects all other reconfiguration.
262 * These locks are separate due to conflicting interactions
263 * with bdev->bd_mutex.
265 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
266 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
268 struct mutex open_mutex;
269 struct mutex reconfig_mutex;
270 atomic_t active; /* general refcount */
271 atomic_t openers; /* number of active opens */
273 int degraded; /* whether md should consider
277 atomic_t recovery_active; /* blocks scheduled, but not written */
278 wait_queue_head_t recovery_wait;
279 sector_t recovery_cp;
280 sector_t resync_min; /* user requested sync
282 sector_t resync_max; /* resync should pause
283 * when it gets here */
285 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
288 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
290 struct work_struct del_work; /* used for delayed sysfs removal */
292 spinlock_t write_lock;
293 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
294 atomic_t pending_writes; /* number of active superblock writes */
296 unsigned int safemode; /* if set, update "clean" superblock
297 * when no writes pending.
299 unsigned int safemode_delay;
300 struct timer_list safemode_timer;
301 atomic_t writes_pending;
302 struct request_queue *queue; /* for plugging ... */
304 struct bitmap *bitmap; /* the bitmap for the device */
306 struct file *file; /* the bitmap file */
307 loff_t offset; /* offset from superblock of
308 * start of bitmap. May be
309 * negative, but not '0'
310 * For external metadata, offset
311 * from start of device.
313 loff_t default_offset; /* this is the offset to use when
314 * hot-adding a bitmap. It should
315 * eventually be settable by sysfs.
317 /* When md is serving under dm, it might use a
318 * dirty_log to store the bits.
320 struct dm_dirty_log *log;
323 unsigned long chunksize;
324 unsigned long daemon_sleep; /* how many jiffies between updates? */
325 unsigned long max_write_behind; /* write-behind mode */
329 atomic_t max_corr_read_errors; /* max read retries */
330 struct list_head all_mddevs;
332 struct attribute_group *to_remove;
333 struct plug_handle *plug; /* if used by personality */
335 struct bio_set *bio_set;
337 /* Generic flush handling.
338 * The last to finish preflush schedules a worker to submit
339 * the rest of the request (without the REQ_FLUSH flag).
341 struct bio *flush_bio;
342 atomic_t flush_pending;
343 struct work_struct flush_work;
344 struct work_struct event_work; /* used by dm to report failure event */
348 static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
350 int faulty = test_bit(Faulty, &rdev->flags);
351 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
352 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
355 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
357 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
360 struct mdk_personality
364 struct list_head list;
365 struct module *owner;
366 int (*make_request)(mddev_t *mddev, struct bio *bio);
367 int (*run)(mddev_t *mddev);
368 int (*stop)(mddev_t *mddev);
369 void (*status)(struct seq_file *seq, mddev_t *mddev);
370 /* error_handler must set ->faulty and clear ->in_sync
371 * if appropriate, and should abort recovery if needed
373 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
374 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
375 int (*hot_remove_disk) (mddev_t *mddev, int number);
376 int (*spare_active) (mddev_t *mddev);
377 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
378 int (*resize) (mddev_t *mddev, sector_t sectors);
379 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
380 int (*check_reshape) (mddev_t *mddev);
381 int (*start_reshape) (mddev_t *mddev);
382 void (*finish_reshape) (mddev_t *mddev);
383 /* quiesce moves between quiescence states
385 * 1 - no new requests allowed
388 void (*quiesce) (mddev_t *mddev, int state);
389 /* takeover is used to transition an array from one
390 * personality to another. The new personality must be able
391 * to handle the data in the current layout.
392 * e.g. 2drive raid1 -> 2drive raid5
393 * ndrive raid5 -> degraded n+1drive raid6 with special layout
394 * If the takeover succeeds, a new 'private' structure is returned.
395 * This needs to be installed and then ->run used to activate the
398 void *(*takeover) (mddev_t *mddev);
402 struct md_sysfs_entry {
403 struct attribute attr;
404 ssize_t (*show)(mddev_t *, char *);
405 ssize_t (*store)(mddev_t *, const char *, size_t);
407 extern struct attribute_group md_bitmap_group;
409 static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
412 return sysfs_get_dirent(sd, NULL, name);
415 static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
418 sysfs_notify_dirent(sd);
421 static inline char * mdname (mddev_t * mddev)
423 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
427 * iterates through some rdev ringlist. It's safe to remove the
428 * current 'rdev'. Dont touch 'tmp' though.
430 #define rdev_for_each_list(rdev, tmp, head) \
431 list_for_each_entry_safe(rdev, tmp, head, same_set)
434 * iterates through the 'same array disks' ringlist
436 #define rdev_for_each(rdev, tmp, mddev) \
437 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
439 #define rdev_for_each_rcu(rdev, mddev) \
440 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
442 typedef struct mdk_thread_s {
443 void (*run) (mddev_t *mddev);
445 wait_queue_head_t wqueue;
447 struct task_struct *tsk;
448 unsigned long timeout;
451 #define THREAD_WAKEUP 0
453 #define __wait_event_lock_irq(wq, condition, lock, cmd) \
455 wait_queue_t __wait; \
456 init_waitqueue_entry(&__wait, current); \
458 add_wait_queue(&wq, &__wait); \
460 set_current_state(TASK_UNINTERRUPTIBLE); \
463 spin_unlock_irq(&lock); \
466 spin_lock_irq(&lock); \
468 current->state = TASK_RUNNING; \
469 remove_wait_queue(&wq, &__wait); \
472 #define wait_event_lock_irq(wq, condition, lock, cmd) \
476 __wait_event_lock_irq(wq, condition, lock, cmd); \
479 static inline void safe_put_page(struct page *p)
484 extern int register_md_personality(struct mdk_personality *p);
485 extern int unregister_md_personality(struct mdk_personality *p);
486 extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
487 mddev_t *mddev, const char *name);
488 extern void md_unregister_thread(mdk_thread_t *thread);
489 extern void md_wakeup_thread(mdk_thread_t *thread);
490 extern void md_check_recovery(mddev_t *mddev);
491 extern void md_write_start(mddev_t *mddev, struct bio *bi);
492 extern void md_write_end(mddev_t *mddev);
493 extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
494 extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
496 extern int mddev_congested(mddev_t *mddev, int bits);
497 extern void md_flush_request(mddev_t *mddev, struct bio *bio);
498 extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
499 sector_t sector, int size, struct page *page);
500 extern void md_super_wait(mddev_t *mddev);
501 extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
502 struct page *page, int rw, bool metadata_op);
503 extern void md_do_sync(mddev_t *mddev);
504 extern void md_new_event(mddev_t *mddev);
505 extern int md_allow_write(mddev_t *mddev);
506 extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
507 extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
508 extern int md_check_no_bitmap(mddev_t *mddev);
509 extern int md_integrity_register(mddev_t *mddev);
510 extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
511 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
512 extern void restore_bitmap_write_access(struct file *file);
513 extern void md_unplug(mddev_t *mddev);
515 extern void mddev_init(mddev_t *mddev);
516 extern int md_run(mddev_t *mddev);
517 extern void md_stop(mddev_t *mddev);
518 extern void md_stop_writes(mddev_t *mddev);
519 extern void md_rdev_init(mdk_rdev_t *rdev);
521 extern void mddev_suspend(mddev_t *mddev);
522 extern void mddev_resume(mddev_t *mddev);
523 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
525 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
527 #endif /* _MD_MD_H */