2 * include/linux/backing-dev.h
4 * low-level device information and state which is propagated up through
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
11 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/blkdev.h>
15 #include <linux/writeback.h>
16 #include <linux/blk-cgroup.h>
17 #include <linux/backing-dev-defs.h>
18 #include <linux/slab.h>
20 int __must_check bdi_init(struct backing_dev_info *bdi);
21 void bdi_destroy(struct backing_dev_info *bdi);
24 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
25 const char *fmt, ...);
26 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
27 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
28 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
29 bool range_cyclic, enum wb_reason reason);
30 void wb_start_background_writeback(struct bdi_writeback *wb);
31 void wb_workfn(struct work_struct *work);
32 void wb_wakeup_delayed(struct bdi_writeback *wb);
34 extern spinlock_t bdi_lock;
35 extern struct list_head bdi_list;
37 extern struct workqueue_struct *bdi_wq;
39 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
41 return test_bit(WB_has_dirty_io, &wb->state);
44 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
48 * any dirty wbs. See wb_update_write_bandwidth().
50 return atomic_long_read(&bdi->tot_write_bandwidth);
53 static inline void __add_wb_stat(struct bdi_writeback *wb,
54 enum wb_stat_item item, s64 amount)
56 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
59 static inline void __inc_wb_stat(struct bdi_writeback *wb,
60 enum wb_stat_item item)
62 __add_wb_stat(wb, item, 1);
65 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
69 local_irq_save(flags);
70 __inc_wb_stat(wb, item);
71 local_irq_restore(flags);
74 static inline void __dec_wb_stat(struct bdi_writeback *wb,
75 enum wb_stat_item item)
77 __add_wb_stat(wb, item, -1);
80 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
84 local_irq_save(flags);
85 __dec_wb_stat(wb, item);
86 local_irq_restore(flags);
89 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
91 return percpu_counter_read_positive(&wb->stat[item]);
94 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
95 enum wb_stat_item item)
97 return percpu_counter_sum_positive(&wb->stat[item]);
100 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
105 local_irq_save(flags);
106 sum = __wb_stat_sum(wb, item);
107 local_irq_restore(flags);
112 extern void wb_writeout_inc(struct bdi_writeback *wb);
115 * maximal error of a stat counter.
117 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
120 return nr_cpu_ids * WB_STAT_BATCH;
126 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
127 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
130 * Flags in backing_dev_info::capability
132 * The first three flags control whether dirty pages will contribute to the
133 * VM's accounting and whether writepages() should be called for dirty pages
134 * (something that would not, for example, be appropriate for ramfs)
136 * WARNING: these flags are closely related and should not normally be
137 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
138 * three flags into a single convenience macro.
140 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
141 * BDI_CAP_NO_WRITEBACK: Don't write pages back
142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
147 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
148 #define BDI_CAP_NO_WRITEBACK 0x00000002
149 #define BDI_CAP_NO_ACCT_WB 0x00000004
150 #define BDI_CAP_STABLE_WRITES 0x00000008
151 #define BDI_CAP_STRICTLIMIT 0x00000010
152 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
154 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
157 extern struct backing_dev_info noop_backing_dev_info;
160 * writeback_in_progress - determine whether there is writeback in progress
161 * @wb: bdi_writeback of interest
163 * Determine whether there is writeback waiting to be handled against a
166 static inline bool writeback_in_progress(struct bdi_writeback *wb)
168 return test_bit(WB_writeback_running, &wb->state);
171 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
173 struct super_block *sb;
176 return &noop_backing_dev_info;
180 if (sb_is_blkdev_sb(sb))
181 return blk_get_backing_dev_info(I_BDEV(inode));
186 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
188 struct backing_dev_info *bdi = wb->bdi;
190 if (bdi->congested_fn)
191 return bdi->congested_fn(bdi->congested_data, cong_bits);
192 return wb->congested->state & cong_bits;
195 long congestion_wait(int sync, long timeout);
196 long wait_iff_congested(struct zone *zone, int sync, long timeout);
197 int pdflush_proc_obsolete(struct ctl_table *table, int write,
198 void __user *buffer, size_t *lenp, loff_t *ppos);
200 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
202 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
205 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
207 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
210 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
212 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
215 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
217 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
218 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
219 BDI_CAP_NO_WRITEBACK));
222 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
224 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
227 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
229 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
232 static inline int bdi_sched_wait(void *word)
238 #ifdef CONFIG_CGROUP_WRITEBACK
240 struct bdi_writeback_congested *
241 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
242 void wb_congested_put(struct bdi_writeback_congested *congested);
243 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
244 struct cgroup_subsys_state *memcg_css,
246 void wb_memcg_offline(struct mem_cgroup *memcg);
247 void wb_blkcg_offline(struct blkcg *blkcg);
248 int inode_congested(struct inode *inode, int cong_bits);
251 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
252 * @inode: inode of interest
254 * cgroup writeback requires support from both the bdi and filesystem.
255 * Also, both memcg and iocg have to be on the default hierarchy. Test
256 * whether all conditions are met.
258 * Note that the test result may change dynamically on the same inode
259 * depending on how memcg and iocg are configured.
261 static inline bool inode_cgwb_enabled(struct inode *inode)
263 struct backing_dev_info *bdi = inode_to_bdi(inode);
265 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
266 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
267 bdi_cap_account_dirty(bdi) &&
268 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
269 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
273 * wb_find_current - find wb for %current on a bdi
274 * @bdi: bdi of interest
276 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
277 * Must be called under rcu_read_lock() which protects the returend wb.
280 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
282 struct cgroup_subsys_state *memcg_css;
283 struct bdi_writeback *wb;
285 memcg_css = task_css(current, memory_cgrp_id);
286 if (!memcg_css->parent)
289 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
292 * %current's blkcg equals the effective blkcg of its memcg. No
293 * need to use the relatively expensive cgroup_get_e_css().
295 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
301 * wb_get_create_current - get or create wb for %current on a bdi
302 * @bdi: bdi of interest
303 * @gfp: allocation mask
305 * Equivalent to wb_get_create() on %current's memcg. This function is
306 * called from a relatively hot path and optimizes the common cases using
309 static inline struct bdi_writeback *
310 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
312 struct bdi_writeback *wb;
315 wb = wb_find_current(bdi);
316 if (wb && unlikely(!wb_tryget(wb)))
321 struct cgroup_subsys_state *memcg_css;
323 memcg_css = task_get_css(current, memory_cgrp_id);
324 wb = wb_get_create(bdi, memcg_css, gfp);
331 * inode_to_wb_is_valid - test whether an inode has a wb associated
332 * @inode: inode of interest
334 * Returns %true if @inode has a wb associated. May be called without any
337 static inline bool inode_to_wb_is_valid(struct inode *inode)
343 * inode_to_wb - determine the wb of an inode
344 * @inode: inode of interest
346 * Returns the wb @inode is currently associated with. The caller must be
347 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
348 * associated wb's list_lock.
350 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
352 #ifdef CONFIG_LOCKDEP
353 WARN_ON_ONCE(debug_locks &&
354 (!lockdep_is_held(&inode->i_lock) &&
355 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
356 !lockdep_is_held(&inode->i_wb->list_lock)));
362 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
363 * @inode: target inode
364 * @lockedp: temp bool output param, to be passed to the end function
366 * The caller wants to access the wb associated with @inode but isn't
367 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
368 * function determines the wb associated with @inode and ensures that the
369 * association doesn't change until the transaction is finished with
370 * unlocked_inode_to_wb_end().
372 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
373 * afterwards and can't sleep during transaction. IRQ may or may not be
374 * disabled on return.
376 static inline struct bdi_writeback *
377 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
382 * Paired with store_release in inode_switch_wb_work_fn() and
383 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
385 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
387 if (unlikely(*lockedp))
388 spin_lock_irq(&inode->i_mapping->tree_lock);
391 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
392 * inode_to_wb() will bark. Deref directly.
398 * unlocked_inode_to_wb_end - end inode wb access transaction
399 * @inode: target inode
400 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
402 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
404 if (unlikely(locked))
405 spin_unlock_irq(&inode->i_mapping->tree_lock);
412 struct radix_tree_iter tree_iter;
416 static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
417 struct backing_dev_info *bdi)
419 struct radix_tree_iter *titer = &iter->tree_iter;
421 WARN_ON_ONCE(!rcu_read_lock_held());
423 if (iter->start_memcg_id >= 0) {
424 iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
425 iter->start_memcg_id = -1;
427 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
431 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
437 static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
438 struct backing_dev_info *bdi,
441 iter->start_memcg_id = start_memcg_id;
444 return __wb_iter_next(iter, bdi);
450 * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
451 * @wb_cur: cursor struct bdi_writeback pointer
452 * @bdi: bdi to walk wb's of
453 * @iter: pointer to struct wb_iter to be used as iteration buffer
454 * @start_memcg_id: memcg ID to start iteration from
456 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
457 * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
458 * to be used as temp storage during iteration. rcu_read_lock() must be
459 * held throughout iteration.
461 #define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
462 for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
463 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
465 #else /* CONFIG_CGROUP_WRITEBACK */
467 static inline bool inode_cgwb_enabled(struct inode *inode)
472 static inline struct bdi_writeback_congested *
473 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
475 atomic_inc(&bdi->wb_congested->refcnt);
476 return bdi->wb_congested;
479 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
481 if (atomic_dec_and_test(&congested->refcnt))
485 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
490 static inline struct bdi_writeback *
491 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
496 static inline bool inode_to_wb_is_valid(struct inode *inode)
501 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
503 return &inode_to_bdi(inode)->wb;
506 static inline struct bdi_writeback *
507 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
509 return inode_to_wb(inode);
512 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
516 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
520 static inline void wb_blkcg_offline(struct blkcg *blkcg)
528 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
529 for ((iter)->next_id = (start_blkcg_id); \
530 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
532 static inline int inode_congested(struct inode *inode, int cong_bits)
534 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
537 #endif /* CONFIG_CGROUP_WRITEBACK */
539 static inline int inode_read_congested(struct inode *inode)
541 return inode_congested(inode, 1 << WB_sync_congested);
544 static inline int inode_write_congested(struct inode *inode)
546 return inode_congested(inode, 1 << WB_async_congested);
549 static inline int inode_rw_congested(struct inode *inode)
551 return inode_congested(inode, (1 << WB_sync_congested) |
552 (1 << WB_async_congested));
555 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
557 return wb_congested(&bdi->wb, cong_bits);
560 static inline int bdi_read_congested(struct backing_dev_info *bdi)
562 return bdi_congested(bdi, 1 << WB_sync_congested);
565 static inline int bdi_write_congested(struct backing_dev_info *bdi)
567 return bdi_congested(bdi, 1 << WB_async_congested);
570 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
572 return bdi_congested(bdi, (1 << WB_sync_congested) |
573 (1 << WB_async_congested));
576 #endif /* _LINUX_BACKING_DEV_H */