]> git.karo-electronics.de Git - mv-sheeva.git/blob - mm/shmem.c
mm: fix fault vs invalidate race for linear mappings
[mv-sheeva.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/exportfs.h>
31 #include <linux/generic_acl.h>
32 #include <linux/mm.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/pagemap.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/backing-dev.h>
40 #include <linux/shmem_fs.h>
41 #include <linux/mount.h>
42 #include <linux/writeback.h>
43 #include <linux/vfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/security.h>
46 #include <linux/swapops.h>
47 #include <linux/mempolicy.h>
48 #include <linux/namei.h>
49 #include <linux/ctype.h>
50 #include <linux/migrate.h>
51 #include <linux/highmem.h>
52 #include <linux/backing-dev.h>
53
54 #include <asm/uaccess.h>
55 #include <asm/div64.h>
56 #include <asm/pgtable.h>
57
58 /* This magic number is used in glibc for posix shared memory */
59 #define TMPFS_MAGIC     0x01021994
60
61 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
62 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
63 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
64
65 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
66 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
67
68 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
69
70 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
71 #define SHMEM_PAGEIN     VM_READ
72 #define SHMEM_TRUNCATE   VM_WRITE
73
74 /* Definition to limit shmem_truncate's steps between cond_rescheds */
75 #define LATENCY_LIMIT    64
76
77 /* Pretend that each entry is of this size in directory's i_size */
78 #define BOGO_DIRENT_SIZE 20
79
80 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
81 enum sgp_type {
82         SGP_QUICK,      /* don't try more than file page cache lookup */
83         SGP_READ,       /* don't exceed i_size, don't allocate page */
84         SGP_CACHE,      /* don't exceed i_size, may allocate page */
85         SGP_WRITE,      /* may exceed i_size, may allocate page */
86         SGP_NOPAGE,     /* same as SGP_CACHE, return with page locked */
87 };
88
89 static int shmem_getpage(struct inode *inode, unsigned long idx,
90                          struct page **pagep, enum sgp_type sgp, int *type);
91
92 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
93 {
94         /*
95          * The above definition of ENTRIES_PER_PAGE, and the use of
96          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
97          * might be reconsidered if it ever diverges from PAGE_SIZE.
98          *
99          * __GFP_MOVABLE is masked out as swap vectors cannot move
100          */
101         return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
102                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
103 }
104
105 static inline void shmem_dir_free(struct page *page)
106 {
107         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
108 }
109
110 static struct page **shmem_dir_map(struct page *page)
111 {
112         return (struct page **)kmap_atomic(page, KM_USER0);
113 }
114
115 static inline void shmem_dir_unmap(struct page **dir)
116 {
117         kunmap_atomic(dir, KM_USER0);
118 }
119
120 static swp_entry_t *shmem_swp_map(struct page *page)
121 {
122         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
123 }
124
125 static inline void shmem_swp_balance_unmap(void)
126 {
127         /*
128          * When passing a pointer to an i_direct entry, to code which
129          * also handles indirect entries and so will shmem_swp_unmap,
130          * we must arrange for the preempt count to remain in balance.
131          * What kmap_atomic of a lowmem page does depends on config
132          * and architecture, so pretend to kmap_atomic some lowmem page.
133          */
134         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
135 }
136
137 static inline void shmem_swp_unmap(swp_entry_t *entry)
138 {
139         kunmap_atomic(entry, KM_USER1);
140 }
141
142 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
143 {
144         return sb->s_fs_info;
145 }
146
147 /*
148  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
149  * for shared memory and for shared anonymous (/dev/zero) mappings
150  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
151  * consistent with the pre-accounting of private mappings ...
152  */
153 static inline int shmem_acct_size(unsigned long flags, loff_t size)
154 {
155         return (flags & VM_ACCOUNT)?
156                 security_vm_enough_memory(VM_ACCT(size)): 0;
157 }
158
159 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
160 {
161         if (flags & VM_ACCOUNT)
162                 vm_unacct_memory(VM_ACCT(size));
163 }
164
165 /*
166  * ... whereas tmpfs objects are accounted incrementally as
167  * pages are allocated, in order to allow huge sparse files.
168  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
169  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
170  */
171 static inline int shmem_acct_block(unsigned long flags)
172 {
173         return (flags & VM_ACCOUNT)?
174                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
175 }
176
177 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
178 {
179         if (!(flags & VM_ACCOUNT))
180                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
181 }
182
183 static const struct super_operations shmem_ops;
184 static const struct address_space_operations shmem_aops;
185 static const struct file_operations shmem_file_operations;
186 static const struct inode_operations shmem_inode_operations;
187 static const struct inode_operations shmem_dir_inode_operations;
188 static const struct inode_operations shmem_special_inode_operations;
189 static struct vm_operations_struct shmem_vm_ops;
190
191 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
192         .ra_pages       = 0,    /* No readahead */
193         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
194         .unplug_io_fn   = default_unplug_io_fn,
195 };
196
197 static LIST_HEAD(shmem_swaplist);
198 static DEFINE_SPINLOCK(shmem_swaplist_lock);
199
200 static void shmem_free_blocks(struct inode *inode, long pages)
201 {
202         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
203         if (sbinfo->max_blocks) {
204                 spin_lock(&sbinfo->stat_lock);
205                 sbinfo->free_blocks += pages;
206                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
207                 spin_unlock(&sbinfo->stat_lock);
208         }
209 }
210
211 /*
212  * shmem_recalc_inode - recalculate the size of an inode
213  *
214  * @inode: inode to recalc
215  *
216  * We have to calculate the free blocks since the mm can drop
217  * undirtied hole pages behind our back.
218  *
219  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
220  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
221  *
222  * It has to be called with the spinlock held.
223  */
224 static void shmem_recalc_inode(struct inode *inode)
225 {
226         struct shmem_inode_info *info = SHMEM_I(inode);
227         long freed;
228
229         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
230         if (freed > 0) {
231                 info->alloced -= freed;
232                 shmem_unacct_blocks(info->flags, freed);
233                 shmem_free_blocks(inode, freed);
234         }
235 }
236
237 /*
238  * shmem_swp_entry - find the swap vector position in the info structure
239  *
240  * @info:  info structure for the inode
241  * @index: index of the page to find
242  * @page:  optional page to add to the structure. Has to be preset to
243  *         all zeros
244  *
245  * If there is no space allocated yet it will return NULL when
246  * page is NULL, else it will use the page for the needed block,
247  * setting it to NULL on return to indicate that it has been used.
248  *
249  * The swap vector is organized the following way:
250  *
251  * There are SHMEM_NR_DIRECT entries directly stored in the
252  * shmem_inode_info structure. So small files do not need an addional
253  * allocation.
254  *
255  * For pages with index > SHMEM_NR_DIRECT there is the pointer
256  * i_indirect which points to a page which holds in the first half
257  * doubly indirect blocks, in the second half triple indirect blocks:
258  *
259  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
260  * following layout (for SHMEM_NR_DIRECT == 16):
261  *
262  * i_indirect -> dir --> 16-19
263  *            |      +-> 20-23
264  *            |
265  *            +-->dir2 --> 24-27
266  *            |        +-> 28-31
267  *            |        +-> 32-35
268  *            |        +-> 36-39
269  *            |
270  *            +-->dir3 --> 40-43
271  *                     +-> 44-47
272  *                     +-> 48-51
273  *                     +-> 52-55
274  */
275 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
276 {
277         unsigned long offset;
278         struct page **dir;
279         struct page *subdir;
280
281         if (index < SHMEM_NR_DIRECT) {
282                 shmem_swp_balance_unmap();
283                 return info->i_direct+index;
284         }
285         if (!info->i_indirect) {
286                 if (page) {
287                         info->i_indirect = *page;
288                         *page = NULL;
289                 }
290                 return NULL;                    /* need another page */
291         }
292
293         index -= SHMEM_NR_DIRECT;
294         offset = index % ENTRIES_PER_PAGE;
295         index /= ENTRIES_PER_PAGE;
296         dir = shmem_dir_map(info->i_indirect);
297
298         if (index >= ENTRIES_PER_PAGE/2) {
299                 index -= ENTRIES_PER_PAGE/2;
300                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
301                 index %= ENTRIES_PER_PAGE;
302                 subdir = *dir;
303                 if (!subdir) {
304                         if (page) {
305                                 *dir = *page;
306                                 *page = NULL;
307                         }
308                         shmem_dir_unmap(dir);
309                         return NULL;            /* need another page */
310                 }
311                 shmem_dir_unmap(dir);
312                 dir = shmem_dir_map(subdir);
313         }
314
315         dir += index;
316         subdir = *dir;
317         if (!subdir) {
318                 if (!page || !(subdir = *page)) {
319                         shmem_dir_unmap(dir);
320                         return NULL;            /* need a page */
321                 }
322                 *dir = subdir;
323                 *page = NULL;
324         }
325         shmem_dir_unmap(dir);
326         return shmem_swp_map(subdir) + offset;
327 }
328
329 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
330 {
331         long incdec = value? 1: -1;
332
333         entry->val = value;
334         info->swapped += incdec;
335         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
336                 struct page *page = kmap_atomic_to_page(entry);
337                 set_page_private(page, page_private(page) + incdec);
338         }
339 }
340
341 /*
342  * shmem_swp_alloc - get the position of the swap entry for the page.
343  *                   If it does not exist allocate the entry.
344  *
345  * @info:       info structure for the inode
346  * @index:      index of the page to find
347  * @sgp:        check and recheck i_size? skip allocation?
348  */
349 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
350 {
351         struct inode *inode = &info->vfs_inode;
352         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
353         struct page *page = NULL;
354         swp_entry_t *entry;
355
356         if (sgp != SGP_WRITE &&
357             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
358                 return ERR_PTR(-EINVAL);
359
360         while (!(entry = shmem_swp_entry(info, index, &page))) {
361                 if (sgp == SGP_READ)
362                         return shmem_swp_map(ZERO_PAGE(0));
363                 /*
364                  * Test free_blocks against 1 not 0, since we have 1 data
365                  * page (and perhaps indirect index pages) yet to allocate:
366                  * a waste to allocate index if we cannot allocate data.
367                  */
368                 if (sbinfo->max_blocks) {
369                         spin_lock(&sbinfo->stat_lock);
370                         if (sbinfo->free_blocks <= 1) {
371                                 spin_unlock(&sbinfo->stat_lock);
372                                 return ERR_PTR(-ENOSPC);
373                         }
374                         sbinfo->free_blocks--;
375                         inode->i_blocks += BLOCKS_PER_PAGE;
376                         spin_unlock(&sbinfo->stat_lock);
377                 }
378
379                 spin_unlock(&info->lock);
380                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
381                 if (page)
382                         set_page_private(page, 0);
383                 spin_lock(&info->lock);
384
385                 if (!page) {
386                         shmem_free_blocks(inode, 1);
387                         return ERR_PTR(-ENOMEM);
388                 }
389                 if (sgp != SGP_WRITE &&
390                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
391                         entry = ERR_PTR(-EINVAL);
392                         break;
393                 }
394                 if (info->next_index <= index)
395                         info->next_index = index + 1;
396         }
397         if (page) {
398                 /* another task gave its page, or truncated the file */
399                 shmem_free_blocks(inode, 1);
400                 shmem_dir_free(page);
401         }
402         if (info->next_index <= index && !IS_ERR(entry))
403                 info->next_index = index + 1;
404         return entry;
405 }
406
407 /*
408  * shmem_free_swp - free some swap entries in a directory
409  *
410  * @dir:        pointer to the directory
411  * @edir:       pointer after last entry of the directory
412  * @punch_lock: pointer to spinlock when needed for the holepunch case
413  */
414 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
415                                                 spinlock_t *punch_lock)
416 {
417         spinlock_t *punch_unlock = NULL;
418         swp_entry_t *ptr;
419         int freed = 0;
420
421         for (ptr = dir; ptr < edir; ptr++) {
422                 if (ptr->val) {
423                         if (unlikely(punch_lock)) {
424                                 punch_unlock = punch_lock;
425                                 punch_lock = NULL;
426                                 spin_lock(punch_unlock);
427                                 if (!ptr->val)
428                                         continue;
429                         }
430                         free_swap_and_cache(*ptr);
431                         *ptr = (swp_entry_t){0};
432                         freed++;
433                 }
434         }
435         if (punch_unlock)
436                 spin_unlock(punch_unlock);
437         return freed;
438 }
439
440 static int shmem_map_and_free_swp(struct page *subdir, int offset,
441                 int limit, struct page ***dir, spinlock_t *punch_lock)
442 {
443         swp_entry_t *ptr;
444         int freed = 0;
445
446         ptr = shmem_swp_map(subdir);
447         for (; offset < limit; offset += LATENCY_LIMIT) {
448                 int size = limit - offset;
449                 if (size > LATENCY_LIMIT)
450                         size = LATENCY_LIMIT;
451                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
452                                                         punch_lock);
453                 if (need_resched()) {
454                         shmem_swp_unmap(ptr);
455                         if (*dir) {
456                                 shmem_dir_unmap(*dir);
457                                 *dir = NULL;
458                         }
459                         cond_resched();
460                         ptr = shmem_swp_map(subdir);
461                 }
462         }
463         shmem_swp_unmap(ptr);
464         return freed;
465 }
466
467 static void shmem_free_pages(struct list_head *next)
468 {
469         struct page *page;
470         int freed = 0;
471
472         do {
473                 page = container_of(next, struct page, lru);
474                 next = next->next;
475                 shmem_dir_free(page);
476                 freed++;
477                 if (freed >= LATENCY_LIMIT) {
478                         cond_resched();
479                         freed = 0;
480                 }
481         } while (next);
482 }
483
484 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
485 {
486         struct shmem_inode_info *info = SHMEM_I(inode);
487         unsigned long idx;
488         unsigned long size;
489         unsigned long limit;
490         unsigned long stage;
491         unsigned long diroff;
492         struct page **dir;
493         struct page *topdir;
494         struct page *middir;
495         struct page *subdir;
496         swp_entry_t *ptr;
497         LIST_HEAD(pages_to_free);
498         long nr_pages_to_free = 0;
499         long nr_swaps_freed = 0;
500         int offset;
501         int freed;
502         int punch_hole;
503         spinlock_t *needs_lock;
504         spinlock_t *punch_lock;
505         unsigned long upper_limit;
506
507         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
508         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
509         if (idx >= info->next_index)
510                 return;
511
512         spin_lock(&info->lock);
513         info->flags |= SHMEM_TRUNCATE;
514         if (likely(end == (loff_t) -1)) {
515                 limit = info->next_index;
516                 upper_limit = SHMEM_MAX_INDEX;
517                 info->next_index = idx;
518                 needs_lock = NULL;
519                 punch_hole = 0;
520         } else {
521                 if (end + 1 >= inode->i_size) { /* we may free a little more */
522                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
523                                                         PAGE_CACHE_SHIFT;
524                         upper_limit = SHMEM_MAX_INDEX;
525                 } else {
526                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
527                         upper_limit = limit;
528                 }
529                 needs_lock = &info->lock;
530                 punch_hole = 1;
531         }
532
533         topdir = info->i_indirect;
534         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
535                 info->i_indirect = NULL;
536                 nr_pages_to_free++;
537                 list_add(&topdir->lru, &pages_to_free);
538         }
539         spin_unlock(&info->lock);
540
541         if (info->swapped && idx < SHMEM_NR_DIRECT) {
542                 ptr = info->i_direct;
543                 size = limit;
544                 if (size > SHMEM_NR_DIRECT)
545                         size = SHMEM_NR_DIRECT;
546                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
547         }
548
549         /*
550          * If there are no indirect blocks or we are punching a hole
551          * below indirect blocks, nothing to be done.
552          */
553         if (!topdir || limit <= SHMEM_NR_DIRECT)
554                 goto done2;
555
556         /*
557          * The truncation case has already dropped info->lock, and we're safe
558          * because i_size and next_index have already been lowered, preventing
559          * access beyond.  But in the punch_hole case, we still need to take
560          * the lock when updating the swap directory, because there might be
561          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
562          * shmem_writepage.  However, whenever we find we can remove a whole
563          * directory page (not at the misaligned start or end of the range),
564          * we first NULLify its pointer in the level above, and then have no
565          * need to take the lock when updating its contents: needs_lock and
566          * punch_lock (either pointing to info->lock or NULL) manage this.
567          */
568
569         upper_limit -= SHMEM_NR_DIRECT;
570         limit -= SHMEM_NR_DIRECT;
571         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
572         offset = idx % ENTRIES_PER_PAGE;
573         idx -= offset;
574
575         dir = shmem_dir_map(topdir);
576         stage = ENTRIES_PER_PAGEPAGE/2;
577         if (idx < ENTRIES_PER_PAGEPAGE/2) {
578                 middir = topdir;
579                 diroff = idx/ENTRIES_PER_PAGE;
580         } else {
581                 dir += ENTRIES_PER_PAGE/2;
582                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
583                 while (stage <= idx)
584                         stage += ENTRIES_PER_PAGEPAGE;
585                 middir = *dir;
586                 if (*dir) {
587                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
588                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
589                         if (!diroff && !offset && upper_limit >= stage) {
590                                 if (needs_lock) {
591                                         spin_lock(needs_lock);
592                                         *dir = NULL;
593                                         spin_unlock(needs_lock);
594                                         needs_lock = NULL;
595                                 } else
596                                         *dir = NULL;
597                                 nr_pages_to_free++;
598                                 list_add(&middir->lru, &pages_to_free);
599                         }
600                         shmem_dir_unmap(dir);
601                         dir = shmem_dir_map(middir);
602                 } else {
603                         diroff = 0;
604                         offset = 0;
605                         idx = stage;
606                 }
607         }
608
609         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
610                 if (unlikely(idx == stage)) {
611                         shmem_dir_unmap(dir);
612                         dir = shmem_dir_map(topdir) +
613                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
614                         while (!*dir) {
615                                 dir++;
616                                 idx += ENTRIES_PER_PAGEPAGE;
617                                 if (idx >= limit)
618                                         goto done1;
619                         }
620                         stage = idx + ENTRIES_PER_PAGEPAGE;
621                         middir = *dir;
622                         if (punch_hole)
623                                 needs_lock = &info->lock;
624                         if (upper_limit >= stage) {
625                                 if (needs_lock) {
626                                         spin_lock(needs_lock);
627                                         *dir = NULL;
628                                         spin_unlock(needs_lock);
629                                         needs_lock = NULL;
630                                 } else
631                                         *dir = NULL;
632                                 nr_pages_to_free++;
633                                 list_add(&middir->lru, &pages_to_free);
634                         }
635                         shmem_dir_unmap(dir);
636                         cond_resched();
637                         dir = shmem_dir_map(middir);
638                         diroff = 0;
639                 }
640                 punch_lock = needs_lock;
641                 subdir = dir[diroff];
642                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
643                         if (needs_lock) {
644                                 spin_lock(needs_lock);
645                                 dir[diroff] = NULL;
646                                 spin_unlock(needs_lock);
647                                 punch_lock = NULL;
648                         } else
649                                 dir[diroff] = NULL;
650                         nr_pages_to_free++;
651                         list_add(&subdir->lru, &pages_to_free);
652                 }
653                 if (subdir && page_private(subdir) /* has swap entries */) {
654                         size = limit - idx;
655                         if (size > ENTRIES_PER_PAGE)
656                                 size = ENTRIES_PER_PAGE;
657                         freed = shmem_map_and_free_swp(subdir,
658                                         offset, size, &dir, punch_lock);
659                         if (!dir)
660                                 dir = shmem_dir_map(middir);
661                         nr_swaps_freed += freed;
662                         if (offset || punch_lock) {
663                                 spin_lock(&info->lock);
664                                 set_page_private(subdir,
665                                         page_private(subdir) - freed);
666                                 spin_unlock(&info->lock);
667                         } else
668                                 BUG_ON(page_private(subdir) != freed);
669                 }
670                 offset = 0;
671         }
672 done1:
673         shmem_dir_unmap(dir);
674 done2:
675         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
676                 /*
677                  * Call truncate_inode_pages again: racing shmem_unuse_inode
678                  * may have swizzled a page in from swap since vmtruncate or
679                  * generic_delete_inode did it, before we lowered next_index.
680                  * Also, though shmem_getpage checks i_size before adding to
681                  * cache, no recheck after: so fix the narrow window there too.
682                  *
683                  * Recalling truncate_inode_pages_range and unmap_mapping_range
684                  * every time for punch_hole (which never got a chance to clear
685                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
686                  * yet hardly ever necessary: try to optimize them out later.
687                  */
688                 truncate_inode_pages_range(inode->i_mapping, start, end);
689                 if (punch_hole)
690                         unmap_mapping_range(inode->i_mapping, start,
691                                                         end - start, 1);
692         }
693
694         spin_lock(&info->lock);
695         info->flags &= ~SHMEM_TRUNCATE;
696         info->swapped -= nr_swaps_freed;
697         if (nr_pages_to_free)
698                 shmem_free_blocks(inode, nr_pages_to_free);
699         shmem_recalc_inode(inode);
700         spin_unlock(&info->lock);
701
702         /*
703          * Empty swap vector directory pages to be freed?
704          */
705         if (!list_empty(&pages_to_free)) {
706                 pages_to_free.prev->next = NULL;
707                 shmem_free_pages(pages_to_free.next);
708         }
709 }
710
711 static void shmem_truncate(struct inode *inode)
712 {
713         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
714 }
715
716 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
717 {
718         struct inode *inode = dentry->d_inode;
719         struct page *page = NULL;
720         int error;
721
722         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
723                 if (attr->ia_size < inode->i_size) {
724                         /*
725                          * If truncating down to a partial page, then
726                          * if that page is already allocated, hold it
727                          * in memory until the truncation is over, so
728                          * truncate_partial_page cannnot miss it were
729                          * it assigned to swap.
730                          */
731                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
732                                 (void) shmem_getpage(inode,
733                                         attr->ia_size>>PAGE_CACHE_SHIFT,
734                                                 &page, SGP_READ, NULL);
735                         }
736                         /*
737                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
738                          * detect if any pages might have been added to cache
739                          * after truncate_inode_pages.  But we needn't bother
740                          * if it's being fully truncated to zero-length: the
741                          * nrpages check is efficient enough in that case.
742                          */
743                         if (attr->ia_size) {
744                                 struct shmem_inode_info *info = SHMEM_I(inode);
745                                 spin_lock(&info->lock);
746                                 info->flags &= ~SHMEM_PAGEIN;
747                                 spin_unlock(&info->lock);
748                         }
749                 }
750         }
751
752         error = inode_change_ok(inode, attr);
753         if (!error)
754                 error = inode_setattr(inode, attr);
755 #ifdef CONFIG_TMPFS_POSIX_ACL
756         if (!error && (attr->ia_valid & ATTR_MODE))
757                 error = generic_acl_chmod(inode, &shmem_acl_ops);
758 #endif
759         if (page)
760                 page_cache_release(page);
761         return error;
762 }
763
764 static void shmem_delete_inode(struct inode *inode)
765 {
766         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
767         struct shmem_inode_info *info = SHMEM_I(inode);
768
769         if (inode->i_op->truncate == shmem_truncate) {
770                 truncate_inode_pages(inode->i_mapping, 0);
771                 shmem_unacct_size(info->flags, inode->i_size);
772                 inode->i_size = 0;
773                 shmem_truncate(inode);
774                 if (!list_empty(&info->swaplist)) {
775                         spin_lock(&shmem_swaplist_lock);
776                         list_del_init(&info->swaplist);
777                         spin_unlock(&shmem_swaplist_lock);
778                 }
779         }
780         BUG_ON(inode->i_blocks);
781         if (sbinfo->max_inodes) {
782                 spin_lock(&sbinfo->stat_lock);
783                 sbinfo->free_inodes++;
784                 spin_unlock(&sbinfo->stat_lock);
785         }
786         clear_inode(inode);
787 }
788
789 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
790 {
791         swp_entry_t *ptr;
792
793         for (ptr = dir; ptr < edir; ptr++) {
794                 if (ptr->val == entry.val)
795                         return ptr - dir;
796         }
797         return -1;
798 }
799
800 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
801 {
802         struct inode *inode;
803         unsigned long idx;
804         unsigned long size;
805         unsigned long limit;
806         unsigned long stage;
807         struct page **dir;
808         struct page *subdir;
809         swp_entry_t *ptr;
810         int offset;
811
812         idx = 0;
813         ptr = info->i_direct;
814         spin_lock(&info->lock);
815         limit = info->next_index;
816         size = limit;
817         if (size > SHMEM_NR_DIRECT)
818                 size = SHMEM_NR_DIRECT;
819         offset = shmem_find_swp(entry, ptr, ptr+size);
820         if (offset >= 0) {
821                 shmem_swp_balance_unmap();
822                 goto found;
823         }
824         if (!info->i_indirect)
825                 goto lost2;
826
827         dir = shmem_dir_map(info->i_indirect);
828         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
829
830         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
831                 if (unlikely(idx == stage)) {
832                         shmem_dir_unmap(dir-1);
833                         dir = shmem_dir_map(info->i_indirect) +
834                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
835                         while (!*dir) {
836                                 dir++;
837                                 idx += ENTRIES_PER_PAGEPAGE;
838                                 if (idx >= limit)
839                                         goto lost1;
840                         }
841                         stage = idx + ENTRIES_PER_PAGEPAGE;
842                         subdir = *dir;
843                         shmem_dir_unmap(dir);
844                         dir = shmem_dir_map(subdir);
845                 }
846                 subdir = *dir;
847                 if (subdir && page_private(subdir)) {
848                         ptr = shmem_swp_map(subdir);
849                         size = limit - idx;
850                         if (size > ENTRIES_PER_PAGE)
851                                 size = ENTRIES_PER_PAGE;
852                         offset = shmem_find_swp(entry, ptr, ptr+size);
853                         if (offset >= 0) {
854                                 shmem_dir_unmap(dir);
855                                 goto found;
856                         }
857                         shmem_swp_unmap(ptr);
858                 }
859         }
860 lost1:
861         shmem_dir_unmap(dir-1);
862 lost2:
863         spin_unlock(&info->lock);
864         return 0;
865 found:
866         idx += offset;
867         inode = &info->vfs_inode;
868         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
869                 info->flags |= SHMEM_PAGEIN;
870                 shmem_swp_set(info, ptr + offset, 0);
871         }
872         shmem_swp_unmap(ptr);
873         spin_unlock(&info->lock);
874         /*
875          * Decrement swap count even when the entry is left behind:
876          * try_to_unuse will skip over mms, then reincrement count.
877          */
878         swap_free(entry);
879         return 1;
880 }
881
882 /*
883  * shmem_unuse() search for an eventually swapped out shmem page.
884  */
885 int shmem_unuse(swp_entry_t entry, struct page *page)
886 {
887         struct list_head *p, *next;
888         struct shmem_inode_info *info;
889         int found = 0;
890
891         spin_lock(&shmem_swaplist_lock);
892         list_for_each_safe(p, next, &shmem_swaplist) {
893                 info = list_entry(p, struct shmem_inode_info, swaplist);
894                 if (!info->swapped)
895                         list_del_init(&info->swaplist);
896                 else if (shmem_unuse_inode(info, entry, page)) {
897                         /* move head to start search for next from here */
898                         list_move_tail(&shmem_swaplist, &info->swaplist);
899                         found = 1;
900                         break;
901                 }
902         }
903         spin_unlock(&shmem_swaplist_lock);
904         return found;
905 }
906
907 /*
908  * Move the page from the page cache to the swap cache.
909  */
910 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
911 {
912         struct shmem_inode_info *info;
913         swp_entry_t *entry, swap;
914         struct address_space *mapping;
915         unsigned long index;
916         struct inode *inode;
917
918         BUG_ON(!PageLocked(page));
919         BUG_ON(page_mapped(page));
920
921         mapping = page->mapping;
922         index = page->index;
923         inode = mapping->host;
924         info = SHMEM_I(inode);
925         if (info->flags & VM_LOCKED)
926                 goto redirty;
927         swap = get_swap_page();
928         if (!swap.val)
929                 goto redirty;
930
931         spin_lock(&info->lock);
932         shmem_recalc_inode(inode);
933         if (index >= info->next_index) {
934                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
935                 goto unlock;
936         }
937         entry = shmem_swp_entry(info, index, NULL);
938         BUG_ON(!entry);
939         BUG_ON(entry->val);
940
941         if (move_to_swap_cache(page, swap) == 0) {
942                 shmem_swp_set(info, entry, swap.val);
943                 shmem_swp_unmap(entry);
944                 spin_unlock(&info->lock);
945                 if (list_empty(&info->swaplist)) {
946                         spin_lock(&shmem_swaplist_lock);
947                         /* move instead of add in case we're racing */
948                         list_move_tail(&info->swaplist, &shmem_swaplist);
949                         spin_unlock(&shmem_swaplist_lock);
950                 }
951                 unlock_page(page);
952                 return 0;
953         }
954
955         shmem_swp_unmap(entry);
956 unlock:
957         spin_unlock(&info->lock);
958         swap_free(swap);
959 redirty:
960         set_page_dirty(page);
961         return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
962 }
963
964 #ifdef CONFIG_NUMA
965 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
966 {
967         char *nodelist = strchr(value, ':');
968         int err = 1;
969
970         if (nodelist) {
971                 /* NUL-terminate policy string */
972                 *nodelist++ = '\0';
973                 if (nodelist_parse(nodelist, *policy_nodes))
974                         goto out;
975                 if (!nodes_subset(*policy_nodes, node_online_map))
976                         goto out;
977         }
978         if (!strcmp(value, "default")) {
979                 *policy = MPOL_DEFAULT;
980                 /* Don't allow a nodelist */
981                 if (!nodelist)
982                         err = 0;
983         } else if (!strcmp(value, "prefer")) {
984                 *policy = MPOL_PREFERRED;
985                 /* Insist on a nodelist of one node only */
986                 if (nodelist) {
987                         char *rest = nodelist;
988                         while (isdigit(*rest))
989                                 rest++;
990                         if (!*rest)
991                                 err = 0;
992                 }
993         } else if (!strcmp(value, "bind")) {
994                 *policy = MPOL_BIND;
995                 /* Insist on a nodelist */
996                 if (nodelist)
997                         err = 0;
998         } else if (!strcmp(value, "interleave")) {
999                 *policy = MPOL_INTERLEAVE;
1000                 /* Default to nodes online if no nodelist */
1001                 if (!nodelist)
1002                         *policy_nodes = node_online_map;
1003                 err = 0;
1004         }
1005 out:
1006         /* Restore string for error message */
1007         if (nodelist)
1008                 *--nodelist = ':';
1009         return err;
1010 }
1011
1012 static struct page *shmem_swapin_async(struct shared_policy *p,
1013                                        swp_entry_t entry, unsigned long idx)
1014 {
1015         struct page *page;
1016         struct vm_area_struct pvma;
1017
1018         /* Create a pseudo vma that just contains the policy */
1019         memset(&pvma, 0, sizeof(struct vm_area_struct));
1020         pvma.vm_end = PAGE_SIZE;
1021         pvma.vm_pgoff = idx;
1022         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
1023         page = read_swap_cache_async(entry, &pvma, 0);
1024         mpol_free(pvma.vm_policy);
1025         return page;
1026 }
1027
1028 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
1029                           unsigned long idx)
1030 {
1031         struct shared_policy *p = &info->policy;
1032         int i, num;
1033         struct page *page;
1034         unsigned long offset;
1035
1036         num = valid_swaphandles(entry, &offset);
1037         for (i = 0; i < num; offset++, i++) {
1038                 page = shmem_swapin_async(p,
1039                                 swp_entry(swp_type(entry), offset), idx);
1040                 if (!page)
1041                         break;
1042                 page_cache_release(page);
1043         }
1044         lru_add_drain();        /* Push any new pages onto the LRU now */
1045         return shmem_swapin_async(p, entry, idx);
1046 }
1047
1048 static struct page *
1049 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1050                  unsigned long idx)
1051 {
1052         struct vm_area_struct pvma;
1053         struct page *page;
1054
1055         memset(&pvma, 0, sizeof(struct vm_area_struct));
1056         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1057         pvma.vm_pgoff = idx;
1058         pvma.vm_end = PAGE_SIZE;
1059         page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
1060         mpol_free(pvma.vm_policy);
1061         return page;
1062 }
1063 #else
1064 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
1065 {
1066         return 1;
1067 }
1068
1069 static inline struct page *
1070 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1071 {
1072         swapin_readahead(entry, 0, NULL);
1073         return read_swap_cache_async(entry, NULL, 0);
1074 }
1075
1076 static inline struct page *
1077 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1078 {
1079         return alloc_page(gfp | __GFP_ZERO);
1080 }
1081 #endif
1082
1083 /*
1084  * shmem_getpage - either get the page from swap or allocate a new one
1085  *
1086  * If we allocate a new one we do not mark it dirty. That's up to the
1087  * vm. If we swap it in we mark it dirty since we also free the swap
1088  * entry since a page cannot live in both the swap and page cache
1089  */
1090 static int shmem_getpage(struct inode *inode, unsigned long idx,
1091                         struct page **pagep, enum sgp_type sgp, int *type)
1092 {
1093         struct address_space *mapping = inode->i_mapping;
1094         struct shmem_inode_info *info = SHMEM_I(inode);
1095         struct shmem_sb_info *sbinfo;
1096         struct page *filepage = *pagep;
1097         struct page *swappage;
1098         swp_entry_t *entry;
1099         swp_entry_t swap;
1100         int error;
1101
1102         if (idx >= SHMEM_MAX_INDEX)
1103                 return -EFBIG;
1104         /*
1105          * Normally, filepage is NULL on entry, and either found
1106          * uptodate immediately, or allocated and zeroed, or read
1107          * in under swappage, which is then assigned to filepage.
1108          * But shmem_readpage and shmem_prepare_write pass in a locked
1109          * filepage, which may be found not uptodate by other callers
1110          * too, and may need to be copied from the swappage read in.
1111          */
1112 repeat:
1113         if (!filepage)
1114                 filepage = find_lock_page(mapping, idx);
1115         if (filepage && PageUptodate(filepage))
1116                 goto done;
1117         error = 0;
1118         if (sgp == SGP_QUICK)
1119                 goto failed;
1120
1121         spin_lock(&info->lock);
1122         shmem_recalc_inode(inode);
1123         entry = shmem_swp_alloc(info, idx, sgp);
1124         if (IS_ERR(entry)) {
1125                 spin_unlock(&info->lock);
1126                 error = PTR_ERR(entry);
1127                 goto failed;
1128         }
1129         swap = *entry;
1130
1131         if (swap.val) {
1132                 /* Look it up and read it in.. */
1133                 swappage = lookup_swap_cache(swap);
1134                 if (!swappage) {
1135                         shmem_swp_unmap(entry);
1136                         /* here we actually do the io */
1137                         if (type && *type == VM_FAULT_MINOR) {
1138                                 __count_vm_event(PGMAJFAULT);
1139                                 *type = VM_FAULT_MAJOR;
1140                         }
1141                         spin_unlock(&info->lock);
1142                         swappage = shmem_swapin(info, swap, idx);
1143                         if (!swappage) {
1144                                 spin_lock(&info->lock);
1145                                 entry = shmem_swp_alloc(info, idx, sgp);
1146                                 if (IS_ERR(entry))
1147                                         error = PTR_ERR(entry);
1148                                 else {
1149                                         if (entry->val == swap.val)
1150                                                 error = -ENOMEM;
1151                                         shmem_swp_unmap(entry);
1152                                 }
1153                                 spin_unlock(&info->lock);
1154                                 if (error)
1155                                         goto failed;
1156                                 goto repeat;
1157                         }
1158                         wait_on_page_locked(swappage);
1159                         page_cache_release(swappage);
1160                         goto repeat;
1161                 }
1162
1163                 /* We have to do this with page locked to prevent races */
1164                 if (TestSetPageLocked(swappage)) {
1165                         shmem_swp_unmap(entry);
1166                         spin_unlock(&info->lock);
1167                         wait_on_page_locked(swappage);
1168                         page_cache_release(swappage);
1169                         goto repeat;
1170                 }
1171                 if (PageWriteback(swappage)) {
1172                         shmem_swp_unmap(entry);
1173                         spin_unlock(&info->lock);
1174                         wait_on_page_writeback(swappage);
1175                         unlock_page(swappage);
1176                         page_cache_release(swappage);
1177                         goto repeat;
1178                 }
1179                 if (!PageUptodate(swappage)) {
1180                         shmem_swp_unmap(entry);
1181                         spin_unlock(&info->lock);
1182                         unlock_page(swappage);
1183                         page_cache_release(swappage);
1184                         error = -EIO;
1185                         goto failed;
1186                 }
1187
1188                 if (filepage) {
1189                         shmem_swp_set(info, entry, 0);
1190                         shmem_swp_unmap(entry);
1191                         delete_from_swap_cache(swappage);
1192                         spin_unlock(&info->lock);
1193                         copy_highpage(filepage, swappage);
1194                         unlock_page(swappage);
1195                         page_cache_release(swappage);
1196                         flush_dcache_page(filepage);
1197                         SetPageUptodate(filepage);
1198                         set_page_dirty(filepage);
1199                         swap_free(swap);
1200                 } else if (!(error = move_from_swap_cache(
1201                                 swappage, idx, mapping))) {
1202                         info->flags |= SHMEM_PAGEIN;
1203                         shmem_swp_set(info, entry, 0);
1204                         shmem_swp_unmap(entry);
1205                         spin_unlock(&info->lock);
1206                         filepage = swappage;
1207                         swap_free(swap);
1208                 } else {
1209                         shmem_swp_unmap(entry);
1210                         spin_unlock(&info->lock);
1211                         unlock_page(swappage);
1212                         page_cache_release(swappage);
1213                         if (error == -ENOMEM) {
1214                                 /* let kswapd refresh zone for GFP_ATOMICs */
1215                                 congestion_wait(WRITE, HZ/50);
1216                         }
1217                         goto repeat;
1218                 }
1219         } else if (sgp == SGP_READ && !filepage) {
1220                 shmem_swp_unmap(entry);
1221                 filepage = find_get_page(mapping, idx);
1222                 if (filepage &&
1223                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1224                         spin_unlock(&info->lock);
1225                         wait_on_page_locked(filepage);
1226                         page_cache_release(filepage);
1227                         filepage = NULL;
1228                         goto repeat;
1229                 }
1230                 spin_unlock(&info->lock);
1231         } else {
1232                 shmem_swp_unmap(entry);
1233                 sbinfo = SHMEM_SB(inode->i_sb);
1234                 if (sbinfo->max_blocks) {
1235                         spin_lock(&sbinfo->stat_lock);
1236                         if (sbinfo->free_blocks == 0 ||
1237                             shmem_acct_block(info->flags)) {
1238                                 spin_unlock(&sbinfo->stat_lock);
1239                                 spin_unlock(&info->lock);
1240                                 error = -ENOSPC;
1241                                 goto failed;
1242                         }
1243                         sbinfo->free_blocks--;
1244                         inode->i_blocks += BLOCKS_PER_PAGE;
1245                         spin_unlock(&sbinfo->stat_lock);
1246                 } else if (shmem_acct_block(info->flags)) {
1247                         spin_unlock(&info->lock);
1248                         error = -ENOSPC;
1249                         goto failed;
1250                 }
1251
1252                 if (!filepage) {
1253                         spin_unlock(&info->lock);
1254                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1255                                                     info,
1256                                                     idx);
1257                         if (!filepage) {
1258                                 shmem_unacct_blocks(info->flags, 1);
1259                                 shmem_free_blocks(inode, 1);
1260                                 error = -ENOMEM;
1261                                 goto failed;
1262                         }
1263
1264                         spin_lock(&info->lock);
1265                         entry = shmem_swp_alloc(info, idx, sgp);
1266                         if (IS_ERR(entry))
1267                                 error = PTR_ERR(entry);
1268                         else {
1269                                 swap = *entry;
1270                                 shmem_swp_unmap(entry);
1271                         }
1272                         if (error || swap.val || 0 != add_to_page_cache_lru(
1273                                         filepage, mapping, idx, GFP_ATOMIC)) {
1274                                 spin_unlock(&info->lock);
1275                                 page_cache_release(filepage);
1276                                 shmem_unacct_blocks(info->flags, 1);
1277                                 shmem_free_blocks(inode, 1);
1278                                 filepage = NULL;
1279                                 if (error)
1280                                         goto failed;
1281                                 goto repeat;
1282                         }
1283                         info->flags |= SHMEM_PAGEIN;
1284                 }
1285
1286                 info->alloced++;
1287                 spin_unlock(&info->lock);
1288                 flush_dcache_page(filepage);
1289                 SetPageUptodate(filepage);
1290         }
1291 done:
1292         if (*pagep != filepage) {
1293                 *pagep = filepage;
1294                 if (sgp != SGP_NOPAGE)
1295                         unlock_page(filepage);
1296
1297         }
1298         return 0;
1299
1300 failed:
1301         if (*pagep != filepage) {
1302                 unlock_page(filepage);
1303                 page_cache_release(filepage);
1304         }
1305         return error;
1306 }
1307
1308 static struct page *shmem_nopage(struct vm_area_struct *vma,
1309                                  unsigned long address, int *type)
1310 {
1311         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1312         struct page *page = NULL;
1313         unsigned long idx;
1314         int error;
1315
1316         BUG_ON(!(vma->vm_flags & VM_CAN_INVALIDATE));
1317
1318         idx = (address - vma->vm_start) >> PAGE_SHIFT;
1319         idx += vma->vm_pgoff;
1320         idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1321         if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1322                 return NOPAGE_SIGBUS;
1323
1324         error = shmem_getpage(inode, idx, &page, SGP_NOPAGE, type);
1325         if (error)
1326                 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1327
1328         mark_page_accessed(page);
1329         return page;
1330 }
1331
1332 static int shmem_populate(struct vm_area_struct *vma,
1333         unsigned long addr, unsigned long len,
1334         pgprot_t prot, unsigned long pgoff, int nonblock)
1335 {
1336         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1337         struct mm_struct *mm = vma->vm_mm;
1338         enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1339         unsigned long size;
1340
1341         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1342         if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1343                 return -EINVAL;
1344
1345         while ((long) len > 0) {
1346                 struct page *page = NULL;
1347                 int err;
1348                 /*
1349                  * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1350                  */
1351                 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1352                 if (err)
1353                         return err;
1354                 /* Page may still be null, but only if nonblock was set. */
1355                 if (page) {
1356                         mark_page_accessed(page);
1357                         err = install_page(mm, vma, addr, page, prot);
1358                         if (err) {
1359                                 page_cache_release(page);
1360                                 return err;
1361                         }
1362                 } else if (vma->vm_flags & VM_NONLINEAR) {
1363                         /* No page was found just because we can't read it in
1364                          * now (being here implies nonblock != 0), but the page
1365                          * may exist, so set the PTE to fault it in later. */
1366                         err = install_file_pte(mm, vma, addr, pgoff, prot);
1367                         if (err)
1368                                 return err;
1369                 }
1370
1371                 len -= PAGE_SIZE;
1372                 addr += PAGE_SIZE;
1373                 pgoff++;
1374         }
1375         return 0;
1376 }
1377
1378 #ifdef CONFIG_NUMA
1379 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1380 {
1381         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1382         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1383 }
1384
1385 struct mempolicy *
1386 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1387 {
1388         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1389         unsigned long idx;
1390
1391         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1392         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1393 }
1394 #endif
1395
1396 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1397 {
1398         struct inode *inode = file->f_path.dentry->d_inode;
1399         struct shmem_inode_info *info = SHMEM_I(inode);
1400         int retval = -ENOMEM;
1401
1402         spin_lock(&info->lock);
1403         if (lock && !(info->flags & VM_LOCKED)) {
1404                 if (!user_shm_lock(inode->i_size, user))
1405                         goto out_nomem;
1406                 info->flags |= VM_LOCKED;
1407         }
1408         if (!lock && (info->flags & VM_LOCKED) && user) {
1409                 user_shm_unlock(inode->i_size, user);
1410                 info->flags &= ~VM_LOCKED;
1411         }
1412         retval = 0;
1413 out_nomem:
1414         spin_unlock(&info->lock);
1415         return retval;
1416 }
1417
1418 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1419 {
1420         file_accessed(file);
1421         vma->vm_ops = &shmem_vm_ops;
1422         vma->vm_flags |= VM_CAN_INVALIDATE;
1423         return 0;
1424 }
1425
1426 static struct inode *
1427 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1428 {
1429         struct inode *inode;
1430         struct shmem_inode_info *info;
1431         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1432
1433         if (sbinfo->max_inodes) {
1434                 spin_lock(&sbinfo->stat_lock);
1435                 if (!sbinfo->free_inodes) {
1436                         spin_unlock(&sbinfo->stat_lock);
1437                         return NULL;
1438                 }
1439                 sbinfo->free_inodes--;
1440                 spin_unlock(&sbinfo->stat_lock);
1441         }
1442
1443         inode = new_inode(sb);
1444         if (inode) {
1445                 inode->i_mode = mode;
1446                 inode->i_uid = current->fsuid;
1447                 inode->i_gid = current->fsgid;
1448                 inode->i_blocks = 0;
1449                 inode->i_mapping->a_ops = &shmem_aops;
1450                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1451                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1452                 inode->i_generation = get_seconds();
1453                 info = SHMEM_I(inode);
1454                 memset(info, 0, (char *)inode - (char *)info);
1455                 spin_lock_init(&info->lock);
1456                 INIT_LIST_HEAD(&info->swaplist);
1457
1458                 switch (mode & S_IFMT) {
1459                 default:
1460                         inode->i_op = &shmem_special_inode_operations;
1461                         init_special_inode(inode, mode, dev);
1462                         break;
1463                 case S_IFREG:
1464                         inode->i_op = &shmem_inode_operations;
1465                         inode->i_fop = &shmem_file_operations;
1466                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1467                                                         &sbinfo->policy_nodes);
1468                         break;
1469                 case S_IFDIR:
1470                         inc_nlink(inode);
1471                         /* Some things misbehave if size == 0 on a directory */
1472                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1473                         inode->i_op = &shmem_dir_inode_operations;
1474                         inode->i_fop = &simple_dir_operations;
1475                         break;
1476                 case S_IFLNK:
1477                         /*
1478                          * Must not load anything in the rbtree,
1479                          * mpol_free_shared_policy will not be called.
1480                          */
1481                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1482                                                 NULL);
1483                         break;
1484                 }
1485         } else if (sbinfo->max_inodes) {
1486                 spin_lock(&sbinfo->stat_lock);
1487                 sbinfo->free_inodes++;
1488                 spin_unlock(&sbinfo->stat_lock);
1489         }
1490         return inode;
1491 }
1492
1493 #ifdef CONFIG_TMPFS
1494 static const struct inode_operations shmem_symlink_inode_operations;
1495 static const struct inode_operations shmem_symlink_inline_operations;
1496
1497 /*
1498  * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write;
1499  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1500  * below the loop driver, in the generic fashion that many filesystems support.
1501  */
1502 static int shmem_readpage(struct file *file, struct page *page)
1503 {
1504         struct inode *inode = page->mapping->host;
1505         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1506         unlock_page(page);
1507         return error;
1508 }
1509
1510 static int
1511 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1512 {
1513         struct inode *inode = page->mapping->host;
1514         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1515 }
1516
1517 static ssize_t
1518 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1519 {
1520         struct inode    *inode = file->f_path.dentry->d_inode;
1521         loff_t          pos;
1522         unsigned long   written;
1523         ssize_t         err;
1524
1525         if ((ssize_t) count < 0)
1526                 return -EINVAL;
1527
1528         if (!access_ok(VERIFY_READ, buf, count))
1529                 return -EFAULT;
1530
1531         mutex_lock(&inode->i_mutex);
1532
1533         pos = *ppos;
1534         written = 0;
1535
1536         err = generic_write_checks(file, &pos, &count, 0);
1537         if (err || !count)
1538                 goto out;
1539
1540         err = remove_suid(file->f_path.dentry);
1541         if (err)
1542                 goto out;
1543
1544         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1545
1546         do {
1547                 struct page *page = NULL;
1548                 unsigned long bytes, index, offset;
1549                 char *kaddr;
1550                 int left;
1551
1552                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1553                 index = pos >> PAGE_CACHE_SHIFT;
1554                 bytes = PAGE_CACHE_SIZE - offset;
1555                 if (bytes > count)
1556                         bytes = count;
1557
1558                 /*
1559                  * We don't hold page lock across copy from user -
1560                  * what would it guard against? - so no deadlock here.
1561                  * But it still may be a good idea to prefault below.
1562                  */
1563
1564                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1565                 if (err)
1566                         break;
1567
1568                 left = bytes;
1569                 if (PageHighMem(page)) {
1570                         volatile unsigned char dummy;
1571                         __get_user(dummy, buf);
1572                         __get_user(dummy, buf + bytes - 1);
1573
1574                         kaddr = kmap_atomic(page, KM_USER0);
1575                         left = __copy_from_user_inatomic(kaddr + offset,
1576                                                         buf, bytes);
1577                         kunmap_atomic(kaddr, KM_USER0);
1578                 }
1579                 if (left) {
1580                         kaddr = kmap(page);
1581                         left = __copy_from_user(kaddr + offset, buf, bytes);
1582                         kunmap(page);
1583                 }
1584
1585                 written += bytes;
1586                 count -= bytes;
1587                 pos += bytes;
1588                 buf += bytes;
1589                 if (pos > inode->i_size)
1590                         i_size_write(inode, pos);
1591
1592                 flush_dcache_page(page);
1593                 set_page_dirty(page);
1594                 mark_page_accessed(page);
1595                 page_cache_release(page);
1596
1597                 if (left) {
1598                         pos -= left;
1599                         written -= left;
1600                         err = -EFAULT;
1601                         break;
1602                 }
1603
1604                 /*
1605                  * Our dirty pages are not counted in nr_dirty,
1606                  * and we do not attempt to balance dirty pages.
1607                  */
1608
1609                 cond_resched();
1610         } while (count);
1611
1612         *ppos = pos;
1613         if (written)
1614                 err = written;
1615 out:
1616         mutex_unlock(&inode->i_mutex);
1617         return err;
1618 }
1619
1620 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1621 {
1622         struct inode *inode = filp->f_path.dentry->d_inode;
1623         struct address_space *mapping = inode->i_mapping;
1624         unsigned long index, offset;
1625
1626         index = *ppos >> PAGE_CACHE_SHIFT;
1627         offset = *ppos & ~PAGE_CACHE_MASK;
1628
1629         for (;;) {
1630                 struct page *page = NULL;
1631                 unsigned long end_index, nr, ret;
1632                 loff_t i_size = i_size_read(inode);
1633
1634                 end_index = i_size >> PAGE_CACHE_SHIFT;
1635                 if (index > end_index)
1636                         break;
1637                 if (index == end_index) {
1638                         nr = i_size & ~PAGE_CACHE_MASK;
1639                         if (nr <= offset)
1640                                 break;
1641                 }
1642
1643                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1644                 if (desc->error) {
1645                         if (desc->error == -EINVAL)
1646                                 desc->error = 0;
1647                         break;
1648                 }
1649
1650                 /*
1651                  * We must evaluate after, since reads (unlike writes)
1652                  * are called without i_mutex protection against truncate
1653                  */
1654                 nr = PAGE_CACHE_SIZE;
1655                 i_size = i_size_read(inode);
1656                 end_index = i_size >> PAGE_CACHE_SHIFT;
1657                 if (index == end_index) {
1658                         nr = i_size & ~PAGE_CACHE_MASK;
1659                         if (nr <= offset) {
1660                                 if (page)
1661                                         page_cache_release(page);
1662                                 break;
1663                         }
1664                 }
1665                 nr -= offset;
1666
1667                 if (page) {
1668                         /*
1669                          * If users can be writing to this page using arbitrary
1670                          * virtual addresses, take care about potential aliasing
1671                          * before reading the page on the kernel side.
1672                          */
1673                         if (mapping_writably_mapped(mapping))
1674                                 flush_dcache_page(page);
1675                         /*
1676                          * Mark the page accessed if we read the beginning.
1677                          */
1678                         if (!offset)
1679                                 mark_page_accessed(page);
1680                 } else {
1681                         page = ZERO_PAGE(0);
1682                         page_cache_get(page);
1683                 }
1684
1685                 /*
1686                  * Ok, we have the page, and it's up-to-date, so
1687                  * now we can copy it to user space...
1688                  *
1689                  * The actor routine returns how many bytes were actually used..
1690                  * NOTE! This may not be the same as how much of a user buffer
1691                  * we filled up (we may be padding etc), so we can only update
1692                  * "pos" here (the actor routine has to update the user buffer
1693                  * pointers and the remaining count).
1694                  */
1695                 ret = actor(desc, page, offset, nr);
1696                 offset += ret;
1697                 index += offset >> PAGE_CACHE_SHIFT;
1698                 offset &= ~PAGE_CACHE_MASK;
1699
1700                 page_cache_release(page);
1701                 if (ret != nr || !desc->count)
1702                         break;
1703
1704                 cond_resched();
1705         }
1706
1707         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1708         file_accessed(filp);
1709 }
1710
1711 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1712 {
1713         read_descriptor_t desc;
1714
1715         if ((ssize_t) count < 0)
1716                 return -EINVAL;
1717         if (!access_ok(VERIFY_WRITE, buf, count))
1718                 return -EFAULT;
1719         if (!count)
1720                 return 0;
1721
1722         desc.written = 0;
1723         desc.count = count;
1724         desc.arg.buf = buf;
1725         desc.error = 0;
1726
1727         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1728         if (desc.written)
1729                 return desc.written;
1730         return desc.error;
1731 }
1732
1733 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1734 {
1735         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1736
1737         buf->f_type = TMPFS_MAGIC;
1738         buf->f_bsize = PAGE_CACHE_SIZE;
1739         buf->f_namelen = NAME_MAX;
1740         spin_lock(&sbinfo->stat_lock);
1741         if (sbinfo->max_blocks) {
1742                 buf->f_blocks = sbinfo->max_blocks;
1743                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1744         }
1745         if (sbinfo->max_inodes) {
1746                 buf->f_files = sbinfo->max_inodes;
1747                 buf->f_ffree = sbinfo->free_inodes;
1748         }
1749         /* else leave those fields 0 like simple_statfs */
1750         spin_unlock(&sbinfo->stat_lock);
1751         return 0;
1752 }
1753
1754 /*
1755  * File creation. Allocate an inode, and we're done..
1756  */
1757 static int
1758 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1759 {
1760         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1761         int error = -ENOSPC;
1762
1763         if (inode) {
1764                 error = security_inode_init_security(inode, dir, NULL, NULL,
1765                                                      NULL);
1766                 if (error) {
1767                         if (error != -EOPNOTSUPP) {
1768                                 iput(inode);
1769                                 return error;
1770                         }
1771                 }
1772                 error = shmem_acl_init(inode, dir);
1773                 if (error) {
1774                         iput(inode);
1775                         return error;
1776                 }
1777                 if (dir->i_mode & S_ISGID) {
1778                         inode->i_gid = dir->i_gid;
1779                         if (S_ISDIR(mode))
1780                                 inode->i_mode |= S_ISGID;
1781                 }
1782                 dir->i_size += BOGO_DIRENT_SIZE;
1783                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1784                 d_instantiate(dentry, inode);
1785                 dget(dentry); /* Extra count - pin the dentry in core */
1786         }
1787         return error;
1788 }
1789
1790 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1791 {
1792         int error;
1793
1794         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1795                 return error;
1796         inc_nlink(dir);
1797         return 0;
1798 }
1799
1800 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1801                 struct nameidata *nd)
1802 {
1803         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1804 }
1805
1806 /*
1807  * Link a file..
1808  */
1809 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1810 {
1811         struct inode *inode = old_dentry->d_inode;
1812         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1813
1814         /*
1815          * No ordinary (disk based) filesystem counts links as inodes;
1816          * but each new link needs a new dentry, pinning lowmem, and
1817          * tmpfs dentries cannot be pruned until they are unlinked.
1818          */
1819         if (sbinfo->max_inodes) {
1820                 spin_lock(&sbinfo->stat_lock);
1821                 if (!sbinfo->free_inodes) {
1822                         spin_unlock(&sbinfo->stat_lock);
1823                         return -ENOSPC;
1824                 }
1825                 sbinfo->free_inodes--;
1826                 spin_unlock(&sbinfo->stat_lock);
1827         }
1828
1829         dir->i_size += BOGO_DIRENT_SIZE;
1830         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1831         inc_nlink(inode);
1832         atomic_inc(&inode->i_count);    /* New dentry reference */
1833         dget(dentry);           /* Extra pinning count for the created dentry */
1834         d_instantiate(dentry, inode);
1835         return 0;
1836 }
1837
1838 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1839 {
1840         struct inode *inode = dentry->d_inode;
1841
1842         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1843                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1844                 if (sbinfo->max_inodes) {
1845                         spin_lock(&sbinfo->stat_lock);
1846                         sbinfo->free_inodes++;
1847                         spin_unlock(&sbinfo->stat_lock);
1848                 }
1849         }
1850
1851         dir->i_size -= BOGO_DIRENT_SIZE;
1852         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1853         drop_nlink(inode);
1854         dput(dentry);   /* Undo the count from "create" - this does all the work */
1855         return 0;
1856 }
1857
1858 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1859 {
1860         if (!simple_empty(dentry))
1861                 return -ENOTEMPTY;
1862
1863         drop_nlink(dentry->d_inode);
1864         drop_nlink(dir);
1865         return shmem_unlink(dir, dentry);
1866 }
1867
1868 /*
1869  * The VFS layer already does all the dentry stuff for rename,
1870  * we just have to decrement the usage count for the target if
1871  * it exists so that the VFS layer correctly free's it when it
1872  * gets overwritten.
1873  */
1874 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1875 {
1876         struct inode *inode = old_dentry->d_inode;
1877         int they_are_dirs = S_ISDIR(inode->i_mode);
1878
1879         if (!simple_empty(new_dentry))
1880                 return -ENOTEMPTY;
1881
1882         if (new_dentry->d_inode) {
1883                 (void) shmem_unlink(new_dir, new_dentry);
1884                 if (they_are_dirs)
1885                         drop_nlink(old_dir);
1886         } else if (they_are_dirs) {
1887                 drop_nlink(old_dir);
1888                 inc_nlink(new_dir);
1889         }
1890
1891         old_dir->i_size -= BOGO_DIRENT_SIZE;
1892         new_dir->i_size += BOGO_DIRENT_SIZE;
1893         old_dir->i_ctime = old_dir->i_mtime =
1894         new_dir->i_ctime = new_dir->i_mtime =
1895         inode->i_ctime = CURRENT_TIME;
1896         return 0;
1897 }
1898
1899 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1900 {
1901         int error;
1902         int len;
1903         struct inode *inode;
1904         struct page *page = NULL;
1905         char *kaddr;
1906         struct shmem_inode_info *info;
1907
1908         len = strlen(symname) + 1;
1909         if (len > PAGE_CACHE_SIZE)
1910                 return -ENAMETOOLONG;
1911
1912         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1913         if (!inode)
1914                 return -ENOSPC;
1915
1916         error = security_inode_init_security(inode, dir, NULL, NULL,
1917                                              NULL);
1918         if (error) {
1919                 if (error != -EOPNOTSUPP) {
1920                         iput(inode);
1921                         return error;
1922                 }
1923                 error = 0;
1924         }
1925
1926         info = SHMEM_I(inode);
1927         inode->i_size = len-1;
1928         if (len <= (char *)inode - (char *)info) {
1929                 /* do it inline */
1930                 memcpy(info, symname, len);
1931                 inode->i_op = &shmem_symlink_inline_operations;
1932         } else {
1933                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1934                 if (error) {
1935                         iput(inode);
1936                         return error;
1937                 }
1938                 inode->i_op = &shmem_symlink_inode_operations;
1939                 kaddr = kmap_atomic(page, KM_USER0);
1940                 memcpy(kaddr, symname, len);
1941                 kunmap_atomic(kaddr, KM_USER0);
1942                 set_page_dirty(page);
1943                 page_cache_release(page);
1944         }
1945         if (dir->i_mode & S_ISGID)
1946                 inode->i_gid = dir->i_gid;
1947         dir->i_size += BOGO_DIRENT_SIZE;
1948         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1949         d_instantiate(dentry, inode);
1950         dget(dentry);
1951         return 0;
1952 }
1953
1954 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1955 {
1956         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1957         return NULL;
1958 }
1959
1960 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1961 {
1962         struct page *page = NULL;
1963         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1964         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1965         return page;
1966 }
1967
1968 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1969 {
1970         if (!IS_ERR(nd_get_link(nd))) {
1971                 struct page *page = cookie;
1972                 kunmap(page);
1973                 mark_page_accessed(page);
1974                 page_cache_release(page);
1975         }
1976 }
1977
1978 static const struct inode_operations shmem_symlink_inline_operations = {
1979         .readlink       = generic_readlink,
1980         .follow_link    = shmem_follow_link_inline,
1981 };
1982
1983 static const struct inode_operations shmem_symlink_inode_operations = {
1984         .truncate       = shmem_truncate,
1985         .readlink       = generic_readlink,
1986         .follow_link    = shmem_follow_link,
1987         .put_link       = shmem_put_link,
1988 };
1989
1990 #ifdef CONFIG_TMPFS_POSIX_ACL
1991 /**
1992  * Superblocks without xattr inode operations will get security.* xattr
1993  * support from the VFS "for free". As soon as we have any other xattrs
1994  * like ACLs, we also need to implement the security.* handlers at
1995  * filesystem level, though.
1996  */
1997
1998 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1999                                         size_t list_len, const char *name,
2000                                         size_t name_len)
2001 {
2002         return security_inode_listsecurity(inode, list, list_len);
2003 }
2004
2005 static int shmem_xattr_security_get(struct inode *inode, const char *name,
2006                                     void *buffer, size_t size)
2007 {
2008         if (strcmp(name, "") == 0)
2009                 return -EINVAL;
2010         return security_inode_getsecurity(inode, name, buffer, size,
2011                                           -EOPNOTSUPP);
2012 }
2013
2014 static int shmem_xattr_security_set(struct inode *inode, const char *name,
2015                                     const void *value, size_t size, int flags)
2016 {
2017         if (strcmp(name, "") == 0)
2018                 return -EINVAL;
2019         return security_inode_setsecurity(inode, name, value, size, flags);
2020 }
2021
2022 static struct xattr_handler shmem_xattr_security_handler = {
2023         .prefix = XATTR_SECURITY_PREFIX,
2024         .list   = shmem_xattr_security_list,
2025         .get    = shmem_xattr_security_get,
2026         .set    = shmem_xattr_security_set,
2027 };
2028
2029 static struct xattr_handler *shmem_xattr_handlers[] = {
2030         &shmem_xattr_acl_access_handler,
2031         &shmem_xattr_acl_default_handler,
2032         &shmem_xattr_security_handler,
2033         NULL
2034 };
2035 #endif
2036
2037 static struct dentry *shmem_get_parent(struct dentry *child)
2038 {
2039         return ERR_PTR(-ESTALE);
2040 }
2041
2042 static int shmem_match(struct inode *ino, void *vfh)
2043 {
2044         __u32 *fh = vfh;
2045         __u64 inum = fh[2];
2046         inum = (inum << 32) | fh[1];
2047         return ino->i_ino == inum && fh[0] == ino->i_generation;
2048 }
2049
2050 static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
2051 {
2052         struct dentry *de = NULL;
2053         struct inode *inode;
2054         __u32 *fh = vfh;
2055         __u64 inum = fh[2];
2056         inum = (inum << 32) | fh[1];
2057
2058         inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
2059         if (inode) {
2060                 de = d_find_alias(inode);
2061                 iput(inode);
2062         }
2063
2064         return de? de: ERR_PTR(-ESTALE);
2065 }
2066
2067 static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
2068                 int len, int type,
2069                 int (*acceptable)(void *context, struct dentry *de),
2070                 void *context)
2071 {
2072         if (len < 3)
2073                 return ERR_PTR(-ESTALE);
2074
2075         return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
2076                                                         context);
2077 }
2078
2079 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2080                                 int connectable)
2081 {
2082         struct inode *inode = dentry->d_inode;
2083
2084         if (*len < 3)
2085                 return 255;
2086
2087         if (hlist_unhashed(&inode->i_hash)) {
2088                 /* Unfortunately insert_inode_hash is not idempotent,
2089                  * so as we hash inodes here rather than at creation
2090                  * time, we need a lock to ensure we only try
2091                  * to do it once
2092                  */
2093                 static DEFINE_SPINLOCK(lock);
2094                 spin_lock(&lock);
2095                 if (hlist_unhashed(&inode->i_hash))
2096                         __insert_inode_hash(inode,
2097                                             inode->i_ino + inode->i_generation);
2098                 spin_unlock(&lock);
2099         }
2100
2101         fh[0] = inode->i_generation;
2102         fh[1] = inode->i_ino;
2103         fh[2] = ((__u64)inode->i_ino) >> 32;
2104
2105         *len = 3;
2106         return 1;
2107 }
2108
2109 static struct export_operations shmem_export_ops = {
2110         .get_parent     = shmem_get_parent,
2111         .get_dentry     = shmem_get_dentry,
2112         .encode_fh      = shmem_encode_fh,
2113         .decode_fh      = shmem_decode_fh,
2114 };
2115
2116 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2117         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2118         int *policy, nodemask_t *policy_nodes)
2119 {
2120         char *this_char, *value, *rest;
2121
2122         while (options != NULL) {
2123                 this_char = options;
2124                 for (;;) {
2125                         /*
2126                          * NUL-terminate this option: unfortunately,
2127                          * mount options form a comma-separated list,
2128                          * but mpol's nodelist may also contain commas.
2129                          */
2130                         options = strchr(options, ',');
2131                         if (options == NULL)
2132                                 break;
2133                         options++;
2134                         if (!isdigit(*options)) {
2135                                 options[-1] = '\0';
2136                                 break;
2137                         }
2138                 }
2139                 if (!*this_char)
2140                         continue;
2141                 if ((value = strchr(this_char,'=')) != NULL) {
2142                         *value++ = 0;
2143                 } else {
2144                         printk(KERN_ERR
2145                             "tmpfs: No value for mount option '%s'\n",
2146                             this_char);
2147                         return 1;
2148                 }
2149
2150                 if (!strcmp(this_char,"size")) {
2151                         unsigned long long size;
2152                         size = memparse(value,&rest);
2153                         if (*rest == '%') {
2154                                 size <<= PAGE_SHIFT;
2155                                 size *= totalram_pages;
2156                                 do_div(size, 100);
2157                                 rest++;
2158                         }
2159                         if (*rest)
2160                                 goto bad_val;
2161                         *blocks = size >> PAGE_CACHE_SHIFT;
2162                 } else if (!strcmp(this_char,"nr_blocks")) {
2163                         *blocks = memparse(value,&rest);
2164                         if (*rest)
2165                                 goto bad_val;
2166                 } else if (!strcmp(this_char,"nr_inodes")) {
2167                         *inodes = memparse(value,&rest);
2168                         if (*rest)
2169                                 goto bad_val;
2170                 } else if (!strcmp(this_char,"mode")) {
2171                         if (!mode)
2172                                 continue;
2173                         *mode = simple_strtoul(value,&rest,8);
2174                         if (*rest)
2175                                 goto bad_val;
2176                 } else if (!strcmp(this_char,"uid")) {
2177                         if (!uid)
2178                                 continue;
2179                         *uid = simple_strtoul(value,&rest,0);
2180                         if (*rest)
2181                                 goto bad_val;
2182                 } else if (!strcmp(this_char,"gid")) {
2183                         if (!gid)
2184                                 continue;
2185                         *gid = simple_strtoul(value,&rest,0);
2186                         if (*rest)
2187                                 goto bad_val;
2188                 } else if (!strcmp(this_char,"mpol")) {
2189                         if (shmem_parse_mpol(value,policy,policy_nodes))
2190                                 goto bad_val;
2191                 } else {
2192                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2193                                this_char);
2194                         return 1;
2195                 }
2196         }
2197         return 0;
2198
2199 bad_val:
2200         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2201                value, this_char);
2202         return 1;
2203
2204 }
2205
2206 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2207 {
2208         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2209         unsigned long max_blocks = sbinfo->max_blocks;
2210         unsigned long max_inodes = sbinfo->max_inodes;
2211         int policy = sbinfo->policy;
2212         nodemask_t policy_nodes = sbinfo->policy_nodes;
2213         unsigned long blocks;
2214         unsigned long inodes;
2215         int error = -EINVAL;
2216
2217         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2218                                 &max_inodes, &policy, &policy_nodes))
2219                 return error;
2220
2221         spin_lock(&sbinfo->stat_lock);
2222         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2223         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2224         if (max_blocks < blocks)
2225                 goto out;
2226         if (max_inodes < inodes)
2227                 goto out;
2228         /*
2229          * Those tests also disallow limited->unlimited while any are in
2230          * use, so i_blocks will always be zero when max_blocks is zero;
2231          * but we must separately disallow unlimited->limited, because
2232          * in that case we have no record of how much is already in use.
2233          */
2234         if (max_blocks && !sbinfo->max_blocks)
2235                 goto out;
2236         if (max_inodes && !sbinfo->max_inodes)
2237                 goto out;
2238
2239         error = 0;
2240         sbinfo->max_blocks  = max_blocks;
2241         sbinfo->free_blocks = max_blocks - blocks;
2242         sbinfo->max_inodes  = max_inodes;
2243         sbinfo->free_inodes = max_inodes - inodes;
2244         sbinfo->policy = policy;
2245         sbinfo->policy_nodes = policy_nodes;
2246 out:
2247         spin_unlock(&sbinfo->stat_lock);
2248         return error;
2249 }
2250 #endif
2251
2252 static void shmem_put_super(struct super_block *sb)
2253 {
2254         kfree(sb->s_fs_info);
2255         sb->s_fs_info = NULL;
2256 }
2257
2258 static int shmem_fill_super(struct super_block *sb,
2259                             void *data, int silent)
2260 {
2261         struct inode *inode;
2262         struct dentry *root;
2263         int mode   = S_IRWXUGO | S_ISVTX;
2264         uid_t uid = current->fsuid;
2265         gid_t gid = current->fsgid;
2266         int err = -ENOMEM;
2267         struct shmem_sb_info *sbinfo;
2268         unsigned long blocks = 0;
2269         unsigned long inodes = 0;
2270         int policy = MPOL_DEFAULT;
2271         nodemask_t policy_nodes = node_online_map;
2272
2273 #ifdef CONFIG_TMPFS
2274         /*
2275          * Per default we only allow half of the physical ram per
2276          * tmpfs instance, limiting inodes to one per page of lowmem;
2277          * but the internal instance is left unlimited.
2278          */
2279         if (!(sb->s_flags & MS_NOUSER)) {
2280                 blocks = totalram_pages / 2;
2281                 inodes = totalram_pages - totalhigh_pages;
2282                 if (inodes > blocks)
2283                         inodes = blocks;
2284                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2285                                         &inodes, &policy, &policy_nodes))
2286                         return -EINVAL;
2287         }
2288         sb->s_export_op = &shmem_export_ops;
2289 #else
2290         sb->s_flags |= MS_NOUSER;
2291 #endif
2292
2293         /* Round up to L1_CACHE_BYTES to resist false sharing */
2294         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2295                                 L1_CACHE_BYTES), GFP_KERNEL);
2296         if (!sbinfo)
2297                 return -ENOMEM;
2298
2299         spin_lock_init(&sbinfo->stat_lock);
2300         sbinfo->max_blocks = blocks;
2301         sbinfo->free_blocks = blocks;
2302         sbinfo->max_inodes = inodes;
2303         sbinfo->free_inodes = inodes;
2304         sbinfo->policy = policy;
2305         sbinfo->policy_nodes = policy_nodes;
2306
2307         sb->s_fs_info = sbinfo;
2308         sb->s_maxbytes = SHMEM_MAX_BYTES;
2309         sb->s_blocksize = PAGE_CACHE_SIZE;
2310         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2311         sb->s_magic = TMPFS_MAGIC;
2312         sb->s_op = &shmem_ops;
2313         sb->s_time_gran = 1;
2314 #ifdef CONFIG_TMPFS_POSIX_ACL
2315         sb->s_xattr = shmem_xattr_handlers;
2316         sb->s_flags |= MS_POSIXACL;
2317 #endif
2318
2319         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2320         if (!inode)
2321                 goto failed;
2322         inode->i_uid = uid;
2323         inode->i_gid = gid;
2324         root = d_alloc_root(inode);
2325         if (!root)
2326                 goto failed_iput;
2327         sb->s_root = root;
2328         return 0;
2329
2330 failed_iput:
2331         iput(inode);
2332 failed:
2333         shmem_put_super(sb);
2334         return err;
2335 }
2336
2337 static struct kmem_cache *shmem_inode_cachep;
2338
2339 static struct inode *shmem_alloc_inode(struct super_block *sb)
2340 {
2341         struct shmem_inode_info *p;
2342         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2343         if (!p)
2344                 return NULL;
2345         return &p->vfs_inode;
2346 }
2347
2348 static void shmem_destroy_inode(struct inode *inode)
2349 {
2350         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2351                 /* only struct inode is valid if it's an inline symlink */
2352                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2353         }
2354         shmem_acl_destroy_inode(inode);
2355         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2356 }
2357
2358 static void init_once(void *foo, struct kmem_cache *cachep,
2359                       unsigned long flags)
2360 {
2361         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2362
2363         inode_init_once(&p->vfs_inode);
2364 #ifdef CONFIG_TMPFS_POSIX_ACL
2365         p->i_acl = NULL;
2366         p->i_default_acl = NULL;
2367 #endif
2368 }
2369
2370 static int init_inodecache(void)
2371 {
2372         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2373                                 sizeof(struct shmem_inode_info),
2374                                 0, 0, init_once, NULL);
2375         if (shmem_inode_cachep == NULL)
2376                 return -ENOMEM;
2377         return 0;
2378 }
2379
2380 static void destroy_inodecache(void)
2381 {
2382         kmem_cache_destroy(shmem_inode_cachep);
2383 }
2384
2385 static const struct address_space_operations shmem_aops = {
2386         .writepage      = shmem_writepage,
2387         .set_page_dirty = __set_page_dirty_no_writeback,
2388 #ifdef CONFIG_TMPFS
2389         .readpage       = shmem_readpage,
2390         .prepare_write  = shmem_prepare_write,
2391         .commit_write   = simple_commit_write,
2392 #endif
2393         .migratepage    = migrate_page,
2394 };
2395
2396 static const struct file_operations shmem_file_operations = {
2397         .mmap           = shmem_mmap,
2398 #ifdef CONFIG_TMPFS
2399         .llseek         = generic_file_llseek,
2400         .read           = shmem_file_read,
2401         .write          = shmem_file_write,
2402         .fsync          = simple_sync_file,
2403         .splice_read    = generic_file_splice_read,
2404         .splice_write   = generic_file_splice_write,
2405 #endif
2406 };
2407
2408 static const struct inode_operations shmem_inode_operations = {
2409         .truncate       = shmem_truncate,
2410         .setattr        = shmem_notify_change,
2411         .truncate_range = shmem_truncate_range,
2412 #ifdef CONFIG_TMPFS_POSIX_ACL
2413         .setxattr       = generic_setxattr,
2414         .getxattr       = generic_getxattr,
2415         .listxattr      = generic_listxattr,
2416         .removexattr    = generic_removexattr,
2417         .permission     = shmem_permission,
2418 #endif
2419
2420 };
2421
2422 static const struct inode_operations shmem_dir_inode_operations = {
2423 #ifdef CONFIG_TMPFS
2424         .create         = shmem_create,
2425         .lookup         = simple_lookup,
2426         .link           = shmem_link,
2427         .unlink         = shmem_unlink,
2428         .symlink        = shmem_symlink,
2429         .mkdir          = shmem_mkdir,
2430         .rmdir          = shmem_rmdir,
2431         .mknod          = shmem_mknod,
2432         .rename         = shmem_rename,
2433 #endif
2434 #ifdef CONFIG_TMPFS_POSIX_ACL
2435         .setattr        = shmem_notify_change,
2436         .setxattr       = generic_setxattr,
2437         .getxattr       = generic_getxattr,
2438         .listxattr      = generic_listxattr,
2439         .removexattr    = generic_removexattr,
2440         .permission     = shmem_permission,
2441 #endif
2442 };
2443
2444 static const struct inode_operations shmem_special_inode_operations = {
2445 #ifdef CONFIG_TMPFS_POSIX_ACL
2446         .setattr        = shmem_notify_change,
2447         .setxattr       = generic_setxattr,
2448         .getxattr       = generic_getxattr,
2449         .listxattr      = generic_listxattr,
2450         .removexattr    = generic_removexattr,
2451         .permission     = shmem_permission,
2452 #endif
2453 };
2454
2455 static const struct super_operations shmem_ops = {
2456         .alloc_inode    = shmem_alloc_inode,
2457         .destroy_inode  = shmem_destroy_inode,
2458 #ifdef CONFIG_TMPFS
2459         .statfs         = shmem_statfs,
2460         .remount_fs     = shmem_remount_fs,
2461 #endif
2462         .delete_inode   = shmem_delete_inode,
2463         .drop_inode     = generic_delete_inode,
2464         .put_super      = shmem_put_super,
2465 };
2466
2467 static struct vm_operations_struct shmem_vm_ops = {
2468         .nopage         = shmem_nopage,
2469         .populate       = shmem_populate,
2470 #ifdef CONFIG_NUMA
2471         .set_policy     = shmem_set_policy,
2472         .get_policy     = shmem_get_policy,
2473 #endif
2474 };
2475
2476
2477 static int shmem_get_sb(struct file_system_type *fs_type,
2478         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2479 {
2480         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2481 }
2482
2483 static struct file_system_type tmpfs_fs_type = {
2484         .owner          = THIS_MODULE,
2485         .name           = "tmpfs",
2486         .get_sb         = shmem_get_sb,
2487         .kill_sb        = kill_litter_super,
2488 };
2489 static struct vfsmount *shm_mnt;
2490
2491 static int __init init_tmpfs(void)
2492 {
2493         int error;
2494
2495         error = init_inodecache();
2496         if (error)
2497                 goto out3;
2498
2499         error = register_filesystem(&tmpfs_fs_type);
2500         if (error) {
2501                 printk(KERN_ERR "Could not register tmpfs\n");
2502                 goto out2;
2503         }
2504
2505         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2506                                 tmpfs_fs_type.name, NULL);
2507         if (IS_ERR(shm_mnt)) {
2508                 error = PTR_ERR(shm_mnt);
2509                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2510                 goto out1;
2511         }
2512         return 0;
2513
2514 out1:
2515         unregister_filesystem(&tmpfs_fs_type);
2516 out2:
2517         destroy_inodecache();
2518 out3:
2519         shm_mnt = ERR_PTR(error);
2520         return error;
2521 }
2522 module_init(init_tmpfs)
2523
2524 /*
2525  * shmem_file_setup - get an unlinked file living in tmpfs
2526  *
2527  * @name: name for dentry (to be seen in /proc/<pid>/maps
2528  * @size: size to be set for the file
2529  *
2530  */
2531 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2532 {
2533         int error;
2534         struct file *file;
2535         struct inode *inode;
2536         struct dentry *dentry, *root;
2537         struct qstr this;
2538
2539         if (IS_ERR(shm_mnt))
2540                 return (void *)shm_mnt;
2541
2542         if (size < 0 || size > SHMEM_MAX_BYTES)
2543                 return ERR_PTR(-EINVAL);
2544
2545         if (shmem_acct_size(flags, size))
2546                 return ERR_PTR(-ENOMEM);
2547
2548         error = -ENOMEM;
2549         this.name = name;
2550         this.len = strlen(name);
2551         this.hash = 0; /* will go */
2552         root = shm_mnt->mnt_root;
2553         dentry = d_alloc(root, &this);
2554         if (!dentry)
2555                 goto put_memory;
2556
2557         error = -ENFILE;
2558         file = get_empty_filp();
2559         if (!file)
2560                 goto put_dentry;
2561
2562         error = -ENOSPC;
2563         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2564         if (!inode)
2565                 goto close_file;
2566
2567         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2568         d_instantiate(dentry, inode);
2569         inode->i_size = size;
2570         inode->i_nlink = 0;     /* It is unlinked */
2571         file->f_path.mnt = mntget(shm_mnt);
2572         file->f_path.dentry = dentry;
2573         file->f_mapping = inode->i_mapping;
2574         file->f_op = &shmem_file_operations;
2575         file->f_mode = FMODE_WRITE | FMODE_READ;
2576         return file;
2577
2578 close_file:
2579         put_filp(file);
2580 put_dentry:
2581         dput(dentry);
2582 put_memory:
2583         shmem_unacct_size(flags, size);
2584         return ERR_PTR(error);
2585 }
2586
2587 /*
2588  * shmem_zero_setup - setup a shared anonymous mapping
2589  *
2590  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2591  */
2592 int shmem_zero_setup(struct vm_area_struct *vma)
2593 {
2594         struct file *file;
2595         loff_t size = vma->vm_end - vma->vm_start;
2596
2597         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2598         if (IS_ERR(file))
2599                 return PTR_ERR(file);
2600
2601         if (vma->vm_file)
2602                 fput(vma->vm_file);
2603         vma->vm_file = file;
2604         vma->vm_ops = &shmem_vm_ops;
2605         vma->vm_flags |= VM_CAN_INVALIDATE;
2606         return 0;
2607 }