1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
5 * Copyright 1995 Linus Torvalds
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
30 static inline void mapping_set_error(struct address_space *mapping, int error)
32 if (unlikely(error)) {
34 set_bit(AS_ENOSPC, &mapping->flags);
36 set_bit(AS_EIO, &mapping->flags);
40 static inline void mapping_set_unevictable(struct address_space *mapping)
42 set_bit(AS_UNEVICTABLE, &mapping->flags);
45 static inline void mapping_clear_unevictable(struct address_space *mapping)
47 clear_bit(AS_UNEVICTABLE, &mapping->flags);
50 static inline int mapping_unevictable(struct address_space *mapping)
53 return test_bit(AS_UNEVICTABLE, &mapping->flags);
57 static inline void mapping_set_exiting(struct address_space *mapping)
59 set_bit(AS_EXITING, &mapping->flags);
62 static inline int mapping_exiting(struct address_space *mapping)
64 return test_bit(AS_EXITING, &mapping->flags);
67 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
69 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
72 /* Restricts the given gfp_mask to what the mapping allows. */
73 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
76 return mapping_gfp_mask(mapping) & gfp_mask;
80 * This is non-atomic. Only to be used before the mapping is activated.
81 * Probably needs a barrier...
83 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
85 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
86 (__force unsigned long)mask;
90 * The page cache can be done in larger chunks than
91 * one page, because it allows for more efficient
92 * throughput (it can then be mapped into user
93 * space in smaller chunks for same flexibility).
95 * Or rather, it _will_ be done in larger chunks.
97 #define PAGE_CACHE_SHIFT PAGE_SHIFT
98 #define PAGE_CACHE_SIZE PAGE_SIZE
99 #define PAGE_CACHE_MASK PAGE_MASK
100 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
102 #define page_cache_get(page) get_page(page)
103 #define page_cache_release(page) put_page(page)
104 void release_pages(struct page **pages, int nr, bool cold);
107 * speculatively take a reference to a page.
108 * If the page is free (_count == 0), then _count is untouched, and 0
109 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
111 * This function must be called inside the same rcu_read_lock() section as has
112 * been used to lookup the page in the pagecache radix-tree (or page table):
113 * this allows allocators to use a synchronize_rcu() to stabilize _count.
115 * Unless an RCU grace period has passed, the count of all pages coming out
116 * of the allocator must be considered unstable. page_count may return higher
117 * than expected, and put_page must be able to do the right thing when the
118 * page has been finished with, no matter what it is subsequently allocated
119 * for (because put_page is what is used here to drop an invalid speculative
122 * This is the interesting part of the lockless pagecache (and lockless
123 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
124 * has the following pattern:
125 * 1. find page in radix tree
126 * 2. conditionally increment refcount
127 * 3. check the page is still in pagecache (if no, goto 1)
129 * Remove-side that cares about stability of _count (eg. reclaim) has the
130 * following (with tree_lock held for write):
131 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
132 * B. remove page from pagecache
135 * There are 2 critical interleavings that matter:
136 * - 2 runs before A: in this case, A sees elevated refcount and bails out
137 * - A runs before 2: in this case, 2 sees zero refcount and retries;
138 * subsequently, B will complete and 1 will find no page, causing the
139 * lookup to return NULL.
141 * It is possible that between 1 and 2, the page is removed then the exact same
142 * page is inserted into the same position in pagecache. That's OK: the
143 * old find_get_page using tree_lock could equally have run before or after
144 * such a re-insertion, depending on order that locks are granted.
146 * Lookups racing against pagecache insertion isn't a big problem: either 1
147 * will find the page or it will not. Likewise, the old find_get_page could run
148 * either before the insertion or afterwards, depending on timing.
150 static inline int page_cache_get_speculative(struct page *page)
152 VM_BUG_ON(in_interrupt());
154 #ifdef CONFIG_TINY_RCU
155 # ifdef CONFIG_PREEMPT_COUNT
156 VM_BUG_ON(!in_atomic());
159 * Preempt must be disabled here - we rely on rcu_read_lock doing
162 * Pagecache won't be truncated from interrupt context, so if we have
163 * found a page in the radix tree here, we have pinned its refcount by
164 * disabling preempt, and hence no need for the "speculative get" that
167 VM_BUG_ON_PAGE(page_count(page) == 0, page);
168 atomic_inc(&page->_count);
171 if (unlikely(!get_page_unless_zero(page))) {
173 * Either the page has been freed, or will be freed.
174 * In either case, retry here and the caller should
175 * do the right thing (see comments above).
180 VM_BUG_ON_PAGE(PageTail(page), page);
186 * Same as above, but add instead of inc (could just be merged)
188 static inline int page_cache_add_speculative(struct page *page, int count)
190 VM_BUG_ON(in_interrupt());
192 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
193 # ifdef CONFIG_PREEMPT_COUNT
194 VM_BUG_ON(!in_atomic());
196 VM_BUG_ON_PAGE(page_count(page) == 0, page);
197 atomic_add(count, &page->_count);
200 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
203 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
208 static inline int page_freeze_refs(struct page *page, int count)
210 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
213 static inline void page_unfreeze_refs(struct page *page, int count)
215 VM_BUG_ON_PAGE(page_count(page) != 0, page);
216 VM_BUG_ON(count == 0);
218 atomic_set(&page->_count, count);
222 extern struct page *__page_cache_alloc(gfp_t gfp);
224 static inline struct page *__page_cache_alloc(gfp_t gfp)
226 return alloc_pages(gfp, 0);
230 static inline struct page *page_cache_alloc(struct address_space *x)
232 return __page_cache_alloc(mapping_gfp_mask(x));
235 static inline struct page *page_cache_alloc_cold(struct address_space *x)
237 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
240 static inline struct page *page_cache_alloc_readahead(struct address_space *x)
242 return __page_cache_alloc(mapping_gfp_mask(x) |
243 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
246 typedef int filler_t(void *, struct page *);
248 pgoff_t page_cache_next_hole(struct address_space *mapping,
249 pgoff_t index, unsigned long max_scan);
250 pgoff_t page_cache_prev_hole(struct address_space *mapping,
251 pgoff_t index, unsigned long max_scan);
253 #define FGP_ACCESSED 0x00000001
254 #define FGP_LOCK 0x00000002
255 #define FGP_CREAT 0x00000004
256 #define FGP_WRITE 0x00000008
257 #define FGP_NOFS 0x00000010
258 #define FGP_NOWAIT 0x00000020
260 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
261 int fgp_flags, gfp_t cache_gfp_mask);
264 * find_get_page - find and get a page reference
265 * @mapping: the address_space to search
266 * @offset: the page index
268 * Looks up the page cache slot at @mapping & @offset. If there is a
269 * page cache page, it is returned with an increased refcount.
271 * Otherwise, %NULL is returned.
273 static inline struct page *find_get_page(struct address_space *mapping,
276 return pagecache_get_page(mapping, offset, 0, 0);
279 static inline struct page *find_get_page_flags(struct address_space *mapping,
280 pgoff_t offset, int fgp_flags)
282 return pagecache_get_page(mapping, offset, fgp_flags, 0);
286 * find_lock_page - locate, pin and lock a pagecache page
287 * pagecache_get_page - find and get a page reference
288 * @mapping: the address_space to search
289 * @offset: the page index
291 * Looks up the page cache slot at @mapping & @offset. If there is a
292 * page cache page, it is returned locked and with an increased
295 * Otherwise, %NULL is returned.
297 * find_lock_page() may sleep.
299 static inline struct page *find_lock_page(struct address_space *mapping,
302 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
306 * find_or_create_page - locate or add a pagecache page
307 * @mapping: the page's address_space
308 * @index: the page's index into the mapping
309 * @gfp_mask: page allocation mode
311 * Looks up the page cache slot at @mapping & @offset. If there is a
312 * page cache page, it is returned locked and with an increased
315 * If the page is not present, a new page is allocated using @gfp_mask
316 * and added to the page cache and the VM's LRU list. The page is
317 * returned locked and with an increased refcount.
319 * On memory exhaustion, %NULL is returned.
321 * find_or_create_page() may sleep, even if @gfp_flags specifies an
324 static inline struct page *find_or_create_page(struct address_space *mapping,
325 pgoff_t offset, gfp_t gfp_mask)
327 return pagecache_get_page(mapping, offset,
328 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
333 * grab_cache_page_nowait - returns locked page at given index in given cache
334 * @mapping: target address_space
335 * @index: the page index
337 * Same as grab_cache_page(), but do not wait if the page is unavailable.
338 * This is intended for speculative data generators, where the data can
339 * be regenerated if the page couldn't be grabbed. This routine should
340 * be safe to call while holding the lock for another page.
342 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
343 * and deadlock against the caller's locked page.
345 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
348 return pagecache_get_page(mapping, index,
349 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
350 mapping_gfp_mask(mapping));
353 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
354 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
355 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
356 unsigned int nr_entries, struct page **entries,
358 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
359 unsigned int nr_pages, struct page **pages);
360 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
361 unsigned int nr_pages, struct page **pages);
362 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
363 int tag, unsigned int nr_pages, struct page **pages);
364 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
365 int tag, unsigned int nr_entries,
366 struct page **entries, pgoff_t *indices);
368 struct page *grab_cache_page_write_begin(struct address_space *mapping,
369 pgoff_t index, unsigned flags);
372 * Returns locked page at given index in given cache, creating it if needed.
374 static inline struct page *grab_cache_page(struct address_space *mapping,
377 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
380 extern struct page * read_cache_page(struct address_space *mapping,
381 pgoff_t index, filler_t *filler, void *data);
382 extern struct page * read_cache_page_gfp(struct address_space *mapping,
383 pgoff_t index, gfp_t gfp_mask);
384 extern int read_cache_pages(struct address_space *mapping,
385 struct list_head *pages, filler_t *filler, void *data);
387 static inline struct page *read_mapping_page(struct address_space *mapping,
388 pgoff_t index, void *data)
390 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
391 return read_cache_page(mapping, index, filler, data);
395 * Get the offset in PAGE_SIZE.
396 * (TODO: hugepage should have ->index in PAGE_SIZE)
398 static inline pgoff_t page_to_pgoff(struct page *page)
402 if (unlikely(PageHeadHuge(page)))
403 return page->index << compound_order(page);
405 if (likely(!PageTransTail(page)))
406 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
409 * We don't initialize ->index for tail pages: calculate based on
412 pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
413 pgoff += page - compound_head(page);
418 * Return byte-offset into filesystem object for page.
420 static inline loff_t page_offset(struct page *page)
422 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
425 static inline loff_t page_file_offset(struct page *page)
427 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
430 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
431 unsigned long address);
433 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
434 unsigned long address)
437 if (unlikely(is_vm_hugetlb_page(vma)))
438 return linear_hugepage_index(vma, address);
439 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
440 pgoff += vma->vm_pgoff;
441 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
444 extern void __lock_page(struct page *page);
445 extern int __lock_page_killable(struct page *page);
446 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
448 extern void unlock_page(struct page *page);
450 static inline int trylock_page(struct page *page)
452 page = compound_head(page);
453 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
457 * lock_page may only be called if we have the page's inode pinned.
459 static inline void lock_page(struct page *page)
462 if (!trylock_page(page))
467 * lock_page_killable is like lock_page but can be interrupted by fatal
468 * signals. It returns 0 if it locked the page and -EINTR if it was
469 * killed while waiting.
471 static inline int lock_page_killable(struct page *page)
474 if (!trylock_page(page))
475 return __lock_page_killable(page);
480 * lock_page_or_retry - Lock the page, unless this would block and the
481 * caller indicated that it can handle a retry.
483 * Return value and mmap_sem implications depend on flags; see
484 * __lock_page_or_retry().
486 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
490 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
494 * This is exported only for wait_on_page_locked/wait_on_page_writeback,
495 * and for filesystems which need to wait on PG_private.
497 extern void wait_on_page_bit(struct page *page, int bit_nr);
499 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
500 extern int wait_on_page_bit_killable_timeout(struct page *page,
501 int bit_nr, unsigned long timeout);
503 static inline int wait_on_page_locked_killable(struct page *page)
505 if (!PageLocked(page))
507 return wait_on_page_bit_killable(compound_head(page), PG_locked);
510 extern wait_queue_head_t *page_waitqueue(struct page *page);
511 static inline void wake_up_page(struct page *page, int bit)
513 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
517 * Wait for a page to be unlocked.
519 * This must be called with the caller "holding" the page,
520 * ie with increased "page->count" so that the page won't
521 * go away during the wait..
523 static inline void wait_on_page_locked(struct page *page)
525 if (PageLocked(page))
526 wait_on_page_bit(compound_head(page), PG_locked);
530 * Wait for a page to complete writeback
532 static inline void wait_on_page_writeback(struct page *page)
534 if (PageWriteback(page))
535 wait_on_page_bit(page, PG_writeback);
538 extern void end_page_writeback(struct page *page);
539 void wait_for_stable_page(struct page *page);
541 void page_endio(struct page *page, int rw, int err);
544 * Add an arbitrary waiter to a page's wait queue
546 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
549 * Fault a userspace page into pagetables. Return non-zero on a fault.
551 * This assumes that two userspace pages are always sufficient. That's
552 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
554 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
558 if (unlikely(size == 0))
562 * Writing zeroes into userspace here is OK, because we know that if
563 * the zero gets there, we'll be overwriting it.
565 ret = __put_user(0, uaddr);
567 char __user *end = uaddr + size - 1;
570 * If the page was already mapped, this will get a cache miss
571 * for sure, so try to avoid doing it.
573 if (((unsigned long)uaddr & PAGE_MASK) !=
574 ((unsigned long)end & PAGE_MASK))
575 ret = __put_user(0, end);
580 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
585 if (unlikely(size == 0))
588 ret = __get_user(c, uaddr);
590 const char __user *end = uaddr + size - 1;
592 if (((unsigned long)uaddr & PAGE_MASK) !=
593 ((unsigned long)end & PAGE_MASK)) {
594 ret = __get_user(c, end);
602 * Multipage variants of the above prefault helpers, useful if more than
603 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
604 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
605 * filemap.c hotpaths.
607 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
610 char __user *end = uaddr + size - 1;
612 if (unlikely(size == 0))
616 * Writing zeroes into userspace here is OK, because we know that if
617 * the zero gets there, we'll be overwriting it.
619 while (uaddr <= end) {
620 ret = __put_user(0, uaddr);
626 /* Check whether the range spilled into the next page. */
627 if (((unsigned long)uaddr & PAGE_MASK) ==
628 ((unsigned long)end & PAGE_MASK))
629 ret = __put_user(0, end);
634 static inline int fault_in_multipages_readable(const char __user *uaddr,
639 const char __user *end = uaddr + size - 1;
641 if (unlikely(size == 0))
644 while (uaddr <= end) {
645 ret = __get_user(c, uaddr);
651 /* Check whether the range spilled into the next page. */
652 if (((unsigned long)uaddr & PAGE_MASK) ==
653 ((unsigned long)end & PAGE_MASK)) {
654 ret = __get_user(c, end);
661 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
662 pgoff_t index, gfp_t gfp_mask);
663 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
664 pgoff_t index, gfp_t gfp_mask);
665 extern void delete_from_page_cache(struct page *page);
666 extern void __delete_from_page_cache(struct page *page, void *shadow);
667 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
670 * Like add_to_page_cache_locked, but used to add newly allocated pages:
671 * the page is new, so we can just run __SetPageLocked() against it.
673 static inline int add_to_page_cache(struct page *page,
674 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
678 __SetPageLocked(page);
679 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
681 __ClearPageLocked(page);
685 static inline unsigned long dir_pages(struct inode *inode)
687 return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
691 #endif /* _LINUX_PAGEMAP_H */