2 * include/linux/writeback.h
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
10 #include <linux/flex_proportions.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/blk_types.h>
16 DECLARE_PER_CPU(int, dirty_throttle_leaks);
19 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
21 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
23 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
24 * time) for the dirty pages to drop, unless written enough pages.
26 * The global dirty threshold is normally equal to the global dirty limit,
27 * except when the system suddenly allocates a lot of anonymous memory and
28 * knocks down the global dirty threshold quickly, in which case the global
29 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
32 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
34 struct backing_dev_info;
39 enum writeback_sync_modes {
40 WB_SYNC_NONE, /* Don't wait on anything */
41 WB_SYNC_ALL, /* Wait on every mapping */
45 * why some writeback work was initiated
52 WB_REASON_LAPTOP_TIMER,
53 WB_REASON_FREE_MORE_MEM,
54 WB_REASON_FS_FREE_SPACE,
56 * There is no bdi forker thread any more and works are done
57 * by emergency worker, however, this is TPs userland visible
58 * and we'll be exposing exactly the same information,
59 * so it has a mismatch name.
61 WB_REASON_FORKER_THREAD,
67 * A control structure which tells the writeback code what to do. These are
68 * always on the stack, and hence need no locking. They are always initialised
69 * in a manner such that unspecified fields are set to zero.
71 struct writeback_control {
72 long nr_to_write; /* Write this many pages, and decrement
73 this for each page written */
74 long pages_skipped; /* Pages which were not written */
77 * For a_ops->writepages(): if start or end are non-zero then this is
78 * a hint that the filesystem need only write out the pages inside that
79 * byterange. The byte at `end' is included in the writeout request.
84 enum writeback_sync_modes sync_mode;
86 unsigned for_kupdate:1; /* A kupdate writeback */
87 unsigned for_background:1; /* A background writeback */
88 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
89 unsigned for_reclaim:1; /* Invoked from the page allocator */
90 unsigned range_cyclic:1; /* range_start is cyclic */
91 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
92 #ifdef CONFIG_CGROUP_WRITEBACK
93 struct bdi_writeback *wb; /* wb this writeback is issued under */
94 struct inode *inode; /* inode being written out */
96 /* foreign inode detection, see wbc_detach_inode() */
97 int wb_id; /* current wb id */
98 int wb_lcand_id; /* last foreign candidate wb id */
99 int wb_tcand_id; /* this foreign candidate wb id */
100 size_t wb_bytes; /* bytes written by current wb */
101 size_t wb_lcand_bytes; /* bytes written by last candidate */
102 size_t wb_tcand_bytes; /* bytes written by this candidate */
106 static inline int wbc_to_write_flags(struct writeback_control *wbc)
108 if (wbc->sync_mode == WB_SYNC_ALL)
110 else if (wbc->for_kupdate || wbc->for_background)
111 return REQ_BACKGROUND;
117 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
118 * and are measured against each other in. There always is one global
119 * domain, global_wb_domain, that every wb in the system is a member of.
120 * This allows measuring the relative bandwidth of each wb to distribute
121 * dirtyable memory accordingly.
127 * Scale the writeback cache size proportional to the relative
130 * We do this by keeping a floating proportion between BDIs, based
131 * on page writeback completions [end_page_writeback()]. Those
132 * devices that write out pages fastest will get the larger share,
133 * while the slower will get a smaller share.
135 * We use page writeout completions because we are interested in
136 * getting rid of dirty pages. Having them written out is the
139 * We introduce a concept of time, a period over which we measure
140 * these events, because demand can/will vary over time. The length
141 * of this period itself is measured in page writeback completions.
143 struct fprop_global completions;
144 struct timer_list period_timer; /* timer for aging of completions */
145 unsigned long period_time;
148 * The dirtyable memory and dirty threshold could be suddenly
149 * knocked down by a large amount (eg. on the startup of KVM in a
150 * swapless system). This may throw the system into deep dirty
151 * exceeded state and throttle heavy/light dirtiers alike. To
152 * retain good responsiveness, maintain global_dirty_limit for
153 * tracking slowly down to the knocked down dirty threshold.
155 * Both fields are protected by ->lock.
157 unsigned long dirty_limit_tstamp;
158 unsigned long dirty_limit;
162 * wb_domain_size_changed - memory available to a wb_domain has changed
163 * @dom: wb_domain of interest
165 * This function should be called when the amount of memory available to
166 * @dom has changed. It resets @dom's dirty limit parameters to prevent
167 * the past values which don't match the current configuration from skewing
168 * dirty throttling. Without this, when memory size of a wb_domain is
169 * greatly reduced, the dirty throttling logic may allow too many pages to
170 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
173 static inline void wb_domain_size_changed(struct wb_domain *dom)
175 spin_lock(&dom->lock);
176 dom->dirty_limit_tstamp = jiffies;
177 dom->dirty_limit = 0;
178 spin_unlock(&dom->lock);
184 struct bdi_writeback;
185 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
186 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
187 enum wb_reason reason);
188 bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
189 bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
190 enum wb_reason reason);
191 void sync_inodes_sb(struct super_block *);
192 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
193 void inode_wait_for_writeback(struct inode *inode);
195 /* writeback.h requires fs.h; it, too, is not included from here. */
196 static inline void wait_on_inode(struct inode *inode)
199 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
202 #ifdef CONFIG_CGROUP_WRITEBACK
204 #include <linux/cgroup.h>
205 #include <linux/bio.h>
207 void __inode_attach_wb(struct inode *inode, struct page *page);
208 void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
210 __releases(&inode->i_lock);
211 void wbc_detach_inode(struct writeback_control *wbc);
212 void wbc_account_io(struct writeback_control *wbc, struct page *page,
214 void cgroup_writeback_umount(void);
217 * inode_attach_wb - associate an inode with its wb
218 * @inode: inode of interest
219 * @page: page being dirtied (may be NULL)
221 * If @inode doesn't have its wb, associate it with the wb matching the
222 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
225 static inline void inode_attach_wb(struct inode *inode, struct page *page)
228 __inode_attach_wb(inode, page);
232 * inode_detach_wb - disassociate an inode from its wb
233 * @inode: inode of interest
235 * @inode is being freed. Detach from its wb.
237 static inline void inode_detach_wb(struct inode *inode)
246 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
247 * @wbc: writeback_control of interest
248 * @inode: target inode
250 * This function is to be used by __filemap_fdatawrite_range(), which is an
251 * alternative entry point into writeback code, and first ensures @inode is
252 * associated with a bdi_writeback and attaches it to @wbc.
254 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
257 spin_lock(&inode->i_lock);
258 inode_attach_wb(inode, NULL);
259 wbc_attach_and_unlock_inode(wbc, inode);
263 * wbc_init_bio - writeback specific initializtion of bio
264 * @wbc: writeback_control for the writeback in progress
265 * @bio: bio to be initialized
267 * @bio is a part of the writeback in progress controlled by @wbc. Perform
268 * writeback specific initialization. This is used to apply the cgroup
271 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
274 * pageout() path doesn't attach @wbc to the inode being written
275 * out. This is intentional as we don't want the function to block
276 * behind a slow cgroup. Ultimately, we want pageout() to kick off
277 * regular writeback instead of writing things out itself.
280 bio_associate_blkcg(bio, wbc->wb->blkcg_css);
283 #else /* CONFIG_CGROUP_WRITEBACK */
285 static inline void inode_attach_wb(struct inode *inode, struct page *page)
289 static inline void inode_detach_wb(struct inode *inode)
293 static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
295 __releases(&inode->i_lock)
297 spin_unlock(&inode->i_lock);
300 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
305 static inline void wbc_detach_inode(struct writeback_control *wbc)
309 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
313 static inline void wbc_account_io(struct writeback_control *wbc,
314 struct page *page, size_t bytes)
318 static inline void cgroup_writeback_umount(void)
322 #endif /* CONFIG_CGROUP_WRITEBACK */
325 * mm/page-writeback.c
328 void laptop_io_completion(struct backing_dev_info *info);
329 void laptop_sync_completion(void);
330 void laptop_mode_sync(struct work_struct *work);
331 void laptop_mode_timer_fn(unsigned long data);
333 static inline void laptop_sync_completion(void) { }
335 bool node_dirty_ok(struct pglist_data *pgdat);
336 int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
337 #ifdef CONFIG_CGROUP_WRITEBACK
338 void wb_domain_exit(struct wb_domain *dom);
341 extern struct wb_domain global_wb_domain;
343 /* These are exported to sysctl. */
344 extern int dirty_background_ratio;
345 extern unsigned long dirty_background_bytes;
346 extern int vm_dirty_ratio;
347 extern unsigned long vm_dirty_bytes;
348 extern unsigned int dirty_writeback_interval;
349 extern unsigned int dirty_expire_interval;
350 extern unsigned int dirtytime_expire_interval;
351 extern int vm_highmem_is_dirtyable;
352 extern int block_dump;
353 extern int laptop_mode;
355 extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
356 void __user *buffer, size_t *lenp,
358 extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
359 void __user *buffer, size_t *lenp,
361 extern int dirty_ratio_handler(struct ctl_table *table, int write,
362 void __user *buffer, size_t *lenp,
364 extern int dirty_bytes_handler(struct ctl_table *table, int write,
365 void __user *buffer, size_t *lenp,
367 int dirtytime_interval_handler(struct ctl_table *table, int write,
368 void __user *buffer, size_t *lenp, loff_t *ppos);
371 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
372 void __user *, size_t *, loff_t *);
374 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
375 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
377 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
378 void balance_dirty_pages_ratelimited(struct address_space *mapping);
379 bool wb_over_bg_thresh(struct bdi_writeback *wb);
381 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
384 int generic_writepages(struct address_space *mapping,
385 struct writeback_control *wbc);
386 void tag_pages_for_writeback(struct address_space *mapping,
387 pgoff_t start, pgoff_t end);
388 int write_cache_pages(struct address_space *mapping,
389 struct writeback_control *wbc, writepage_t writepage,
391 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
392 void writeback_set_ratelimit(void);
393 void tag_pages_for_writeback(struct address_space *mapping,
394 pgoff_t start, pgoff_t end);
396 void account_page_redirty(struct page *page);
398 void sb_mark_inode_writeback(struct inode *inode);
399 void sb_clear_inode_writeback(struct inode *inode);
401 #endif /* WRITEBACK_H */