]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/shmem.c
[PATCH] holepunch: fix shmem_truncate_range punching too far
[karo-tx-linux.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/generic_acl.h>
31 #include <linux/mm.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/ctype.h>
49 #include <linux/migrate.h>
50 #include <linux/highmem.h>
51 #include <linux/backing-dev.h>
52
53 #include <asm/uaccess.h>
54 #include <asm/div64.h>
55 #include <asm/pgtable.h>
56
57 /* This magic number is used in glibc for posix shared memory */
58 #define TMPFS_MAGIC     0x01021994
59
60 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
63
64 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
66
67 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
68
69 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70 #define SHMEM_PAGEIN     VM_READ
71 #define SHMEM_TRUNCATE   VM_WRITE
72
73 /* Definition to limit shmem_truncate's steps between cond_rescheds */
74 #define LATENCY_LIMIT    64
75
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
78
79 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80 enum sgp_type {
81         SGP_QUICK,      /* don't try more than file page cache lookup */
82         SGP_READ,       /* don't exceed i_size, don't allocate page */
83         SGP_CACHE,      /* don't exceed i_size, may allocate page */
84         SGP_WRITE,      /* may exceed i_size, may allocate page */
85 };
86
87 static int shmem_getpage(struct inode *inode, unsigned long idx,
88                          struct page **pagep, enum sgp_type sgp, int *type);
89
90 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
91 {
92         /*
93          * The above definition of ENTRIES_PER_PAGE, and the use of
94          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95          * might be reconsidered if it ever diverges from PAGE_SIZE.
96          */
97         return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
98 }
99
100 static inline void shmem_dir_free(struct page *page)
101 {
102         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
103 }
104
105 static struct page **shmem_dir_map(struct page *page)
106 {
107         return (struct page **)kmap_atomic(page, KM_USER0);
108 }
109
110 static inline void shmem_dir_unmap(struct page **dir)
111 {
112         kunmap_atomic(dir, KM_USER0);
113 }
114
115 static swp_entry_t *shmem_swp_map(struct page *page)
116 {
117         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
118 }
119
120 static inline void shmem_swp_balance_unmap(void)
121 {
122         /*
123          * When passing a pointer to an i_direct entry, to code which
124          * also handles indirect entries and so will shmem_swp_unmap,
125          * we must arrange for the preempt count to remain in balance.
126          * What kmap_atomic of a lowmem page does depends on config
127          * and architecture, so pretend to kmap_atomic some lowmem page.
128          */
129         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
130 }
131
132 static inline void shmem_swp_unmap(swp_entry_t *entry)
133 {
134         kunmap_atomic(entry, KM_USER1);
135 }
136
137 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
138 {
139         return sb->s_fs_info;
140 }
141
142 /*
143  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
144  * for shared memory and for shared anonymous (/dev/zero) mappings
145  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
146  * consistent with the pre-accounting of private mappings ...
147  */
148 static inline int shmem_acct_size(unsigned long flags, loff_t size)
149 {
150         return (flags & VM_ACCOUNT)?
151                 security_vm_enough_memory(VM_ACCT(size)): 0;
152 }
153
154 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
155 {
156         if (flags & VM_ACCOUNT)
157                 vm_unacct_memory(VM_ACCT(size));
158 }
159
160 /*
161  * ... whereas tmpfs objects are accounted incrementally as
162  * pages are allocated, in order to allow huge sparse files.
163  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
164  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
165  */
166 static inline int shmem_acct_block(unsigned long flags)
167 {
168         return (flags & VM_ACCOUNT)?
169                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
170 }
171
172 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
173 {
174         if (!(flags & VM_ACCOUNT))
175                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
176 }
177
178 static const struct super_operations shmem_ops;
179 static const struct address_space_operations shmem_aops;
180 static const struct file_operations shmem_file_operations;
181 static const struct inode_operations shmem_inode_operations;
182 static const struct inode_operations shmem_dir_inode_operations;
183 static const struct inode_operations shmem_special_inode_operations;
184 static struct vm_operations_struct shmem_vm_ops;
185
186 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
187         .ra_pages       = 0,    /* No readahead */
188         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
189         .unplug_io_fn   = default_unplug_io_fn,
190 };
191
192 static LIST_HEAD(shmem_swaplist);
193 static DEFINE_SPINLOCK(shmem_swaplist_lock);
194
195 static void shmem_free_blocks(struct inode *inode, long pages)
196 {
197         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
198         if (sbinfo->max_blocks) {
199                 spin_lock(&sbinfo->stat_lock);
200                 sbinfo->free_blocks += pages;
201                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
202                 spin_unlock(&sbinfo->stat_lock);
203         }
204 }
205
206 /*
207  * shmem_recalc_inode - recalculate the size of an inode
208  *
209  * @inode: inode to recalc
210  *
211  * We have to calculate the free blocks since the mm can drop
212  * undirtied hole pages behind our back.
213  *
214  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
215  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
216  *
217  * It has to be called with the spinlock held.
218  */
219 static void shmem_recalc_inode(struct inode *inode)
220 {
221         struct shmem_inode_info *info = SHMEM_I(inode);
222         long freed;
223
224         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
225         if (freed > 0) {
226                 info->alloced -= freed;
227                 shmem_unacct_blocks(info->flags, freed);
228                 shmem_free_blocks(inode, freed);
229         }
230 }
231
232 /*
233  * shmem_swp_entry - find the swap vector position in the info structure
234  *
235  * @info:  info structure for the inode
236  * @index: index of the page to find
237  * @page:  optional page to add to the structure. Has to be preset to
238  *         all zeros
239  *
240  * If there is no space allocated yet it will return NULL when
241  * page is NULL, else it will use the page for the needed block,
242  * setting it to NULL on return to indicate that it has been used.
243  *
244  * The swap vector is organized the following way:
245  *
246  * There are SHMEM_NR_DIRECT entries directly stored in the
247  * shmem_inode_info structure. So small files do not need an addional
248  * allocation.
249  *
250  * For pages with index > SHMEM_NR_DIRECT there is the pointer
251  * i_indirect which points to a page which holds in the first half
252  * doubly indirect blocks, in the second half triple indirect blocks:
253  *
254  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
255  * following layout (for SHMEM_NR_DIRECT == 16):
256  *
257  * i_indirect -> dir --> 16-19
258  *            |      +-> 20-23
259  *            |
260  *            +-->dir2 --> 24-27
261  *            |        +-> 28-31
262  *            |        +-> 32-35
263  *            |        +-> 36-39
264  *            |
265  *            +-->dir3 --> 40-43
266  *                     +-> 44-47
267  *                     +-> 48-51
268  *                     +-> 52-55
269  */
270 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
271 {
272         unsigned long offset;
273         struct page **dir;
274         struct page *subdir;
275
276         if (index < SHMEM_NR_DIRECT) {
277                 shmem_swp_balance_unmap();
278                 return info->i_direct+index;
279         }
280         if (!info->i_indirect) {
281                 if (page) {
282                         info->i_indirect = *page;
283                         *page = NULL;
284                 }
285                 return NULL;                    /* need another page */
286         }
287
288         index -= SHMEM_NR_DIRECT;
289         offset = index % ENTRIES_PER_PAGE;
290         index /= ENTRIES_PER_PAGE;
291         dir = shmem_dir_map(info->i_indirect);
292
293         if (index >= ENTRIES_PER_PAGE/2) {
294                 index -= ENTRIES_PER_PAGE/2;
295                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
296                 index %= ENTRIES_PER_PAGE;
297                 subdir = *dir;
298                 if (!subdir) {
299                         if (page) {
300                                 *dir = *page;
301                                 *page = NULL;
302                         }
303                         shmem_dir_unmap(dir);
304                         return NULL;            /* need another page */
305                 }
306                 shmem_dir_unmap(dir);
307                 dir = shmem_dir_map(subdir);
308         }
309
310         dir += index;
311         subdir = *dir;
312         if (!subdir) {
313                 if (!page || !(subdir = *page)) {
314                         shmem_dir_unmap(dir);
315                         return NULL;            /* need a page */
316                 }
317                 *dir = subdir;
318                 *page = NULL;
319         }
320         shmem_dir_unmap(dir);
321         return shmem_swp_map(subdir) + offset;
322 }
323
324 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
325 {
326         long incdec = value? 1: -1;
327
328         entry->val = value;
329         info->swapped += incdec;
330         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
331                 struct page *page = kmap_atomic_to_page(entry);
332                 set_page_private(page, page_private(page) + incdec);
333         }
334 }
335
336 /*
337  * shmem_swp_alloc - get the position of the swap entry for the page.
338  *                   If it does not exist allocate the entry.
339  *
340  * @info:       info structure for the inode
341  * @index:      index of the page to find
342  * @sgp:        check and recheck i_size? skip allocation?
343  */
344 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
345 {
346         struct inode *inode = &info->vfs_inode;
347         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
348         struct page *page = NULL;
349         swp_entry_t *entry;
350
351         if (sgp != SGP_WRITE &&
352             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
353                 return ERR_PTR(-EINVAL);
354
355         while (!(entry = shmem_swp_entry(info, index, &page))) {
356                 if (sgp == SGP_READ)
357                         return shmem_swp_map(ZERO_PAGE(0));
358                 /*
359                  * Test free_blocks against 1 not 0, since we have 1 data
360                  * page (and perhaps indirect index pages) yet to allocate:
361                  * a waste to allocate index if we cannot allocate data.
362                  */
363                 if (sbinfo->max_blocks) {
364                         spin_lock(&sbinfo->stat_lock);
365                         if (sbinfo->free_blocks <= 1) {
366                                 spin_unlock(&sbinfo->stat_lock);
367                                 return ERR_PTR(-ENOSPC);
368                         }
369                         sbinfo->free_blocks--;
370                         inode->i_blocks += BLOCKS_PER_PAGE;
371                         spin_unlock(&sbinfo->stat_lock);
372                 }
373
374                 spin_unlock(&info->lock);
375                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
376                 if (page)
377                         set_page_private(page, 0);
378                 spin_lock(&info->lock);
379
380                 if (!page) {
381                         shmem_free_blocks(inode, 1);
382                         return ERR_PTR(-ENOMEM);
383                 }
384                 if (sgp != SGP_WRITE &&
385                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
386                         entry = ERR_PTR(-EINVAL);
387                         break;
388                 }
389                 if (info->next_index <= index)
390                         info->next_index = index + 1;
391         }
392         if (page) {
393                 /* another task gave its page, or truncated the file */
394                 shmem_free_blocks(inode, 1);
395                 shmem_dir_free(page);
396         }
397         if (info->next_index <= index && !IS_ERR(entry))
398                 info->next_index = index + 1;
399         return entry;
400 }
401
402 /*
403  * shmem_free_swp - free some swap entries in a directory
404  *
405  * @dir:   pointer to the directory
406  * @edir:  pointer after last entry of the directory
407  */
408 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
409 {
410         swp_entry_t *ptr;
411         int freed = 0;
412
413         for (ptr = dir; ptr < edir; ptr++) {
414                 if (ptr->val) {
415                         free_swap_and_cache(*ptr);
416                         *ptr = (swp_entry_t){0};
417                         freed++;
418                 }
419         }
420         return freed;
421 }
422
423 static int shmem_map_and_free_swp(struct page *subdir,
424                 int offset, int limit, struct page ***dir)
425 {
426         swp_entry_t *ptr;
427         int freed = 0;
428
429         ptr = shmem_swp_map(subdir);
430         for (; offset < limit; offset += LATENCY_LIMIT) {
431                 int size = limit - offset;
432                 if (size > LATENCY_LIMIT)
433                         size = LATENCY_LIMIT;
434                 freed += shmem_free_swp(ptr+offset, ptr+offset+size);
435                 if (need_resched()) {
436                         shmem_swp_unmap(ptr);
437                         if (*dir) {
438                                 shmem_dir_unmap(*dir);
439                                 *dir = NULL;
440                         }
441                         cond_resched();
442                         ptr = shmem_swp_map(subdir);
443                 }
444         }
445         shmem_swp_unmap(ptr);
446         return freed;
447 }
448
449 static void shmem_free_pages(struct list_head *next)
450 {
451         struct page *page;
452         int freed = 0;
453
454         do {
455                 page = container_of(next, struct page, lru);
456                 next = next->next;
457                 shmem_dir_free(page);
458                 freed++;
459                 if (freed >= LATENCY_LIMIT) {
460                         cond_resched();
461                         freed = 0;
462                 }
463         } while (next);
464 }
465
466 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
467 {
468         struct shmem_inode_info *info = SHMEM_I(inode);
469         unsigned long idx;
470         unsigned long size;
471         unsigned long limit;
472         unsigned long stage;
473         unsigned long diroff;
474         struct page **dir;
475         struct page *topdir;
476         struct page *middir;
477         struct page *subdir;
478         swp_entry_t *ptr;
479         LIST_HEAD(pages_to_free);
480         long nr_pages_to_free = 0;
481         long nr_swaps_freed = 0;
482         int offset;
483         int freed;
484         int punch_hole;
485         unsigned long upper_limit;
486
487         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
488         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
489         if (idx >= info->next_index)
490                 return;
491
492         spin_lock(&info->lock);
493         info->flags |= SHMEM_TRUNCATE;
494         if (likely(end == (loff_t) -1)) {
495                 limit = info->next_index;
496                 upper_limit = SHMEM_MAX_INDEX;
497                 info->next_index = idx;
498                 punch_hole = 0;
499         } else {
500                 if (end + 1 >= inode->i_size) { /* we may free a little more */
501                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
502                                                         PAGE_CACHE_SHIFT;
503                         upper_limit = SHMEM_MAX_INDEX;
504                 } else {
505                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
506                         upper_limit = limit;
507                 }
508                 punch_hole = 1;
509         }
510
511         topdir = info->i_indirect;
512         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
513                 info->i_indirect = NULL;
514                 nr_pages_to_free++;
515                 list_add(&topdir->lru, &pages_to_free);
516         }
517         spin_unlock(&info->lock);
518
519         if (info->swapped && idx < SHMEM_NR_DIRECT) {
520                 ptr = info->i_direct;
521                 size = limit;
522                 if (size > SHMEM_NR_DIRECT)
523                         size = SHMEM_NR_DIRECT;
524                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
525         }
526
527         /*
528          * If there are no indirect blocks or we are punching a hole
529          * below indirect blocks, nothing to be done.
530          */
531         if (!topdir || limit <= SHMEM_NR_DIRECT)
532                 goto done2;
533
534         upper_limit -= SHMEM_NR_DIRECT;
535         limit -= SHMEM_NR_DIRECT;
536         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
537         offset = idx % ENTRIES_PER_PAGE;
538         idx -= offset;
539
540         dir = shmem_dir_map(topdir);
541         stage = ENTRIES_PER_PAGEPAGE/2;
542         if (idx < ENTRIES_PER_PAGEPAGE/2) {
543                 middir = topdir;
544                 diroff = idx/ENTRIES_PER_PAGE;
545         } else {
546                 dir += ENTRIES_PER_PAGE/2;
547                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
548                 while (stage <= idx)
549                         stage += ENTRIES_PER_PAGEPAGE;
550                 middir = *dir;
551                 if (*dir) {
552                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
553                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
554                         if (!diroff && !offset && upper_limit >= stage) {
555                                 *dir = NULL;
556                                 nr_pages_to_free++;
557                                 list_add(&middir->lru, &pages_to_free);
558                         }
559                         shmem_dir_unmap(dir);
560                         dir = shmem_dir_map(middir);
561                 } else {
562                         diroff = 0;
563                         offset = 0;
564                         idx = stage;
565                 }
566         }
567
568         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
569                 if (unlikely(idx == stage)) {
570                         shmem_dir_unmap(dir);
571                         dir = shmem_dir_map(topdir) +
572                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
573                         while (!*dir) {
574                                 dir++;
575                                 idx += ENTRIES_PER_PAGEPAGE;
576                                 if (idx >= limit)
577                                         goto done1;
578                         }
579                         stage = idx + ENTRIES_PER_PAGEPAGE;
580                         middir = *dir;
581                         if (upper_limit >= stage) {
582                                 *dir = NULL;
583                                 nr_pages_to_free++;
584                                 list_add(&middir->lru, &pages_to_free);
585                         }
586                         shmem_dir_unmap(dir);
587                         cond_resched();
588                         dir = shmem_dir_map(middir);
589                         diroff = 0;
590                 }
591                 subdir = dir[diroff];
592                 if (subdir && page_private(subdir)) {
593                         size = limit - idx;
594                         if (size > ENTRIES_PER_PAGE)
595                                 size = ENTRIES_PER_PAGE;
596                         freed = shmem_map_and_free_swp(subdir,
597                                                 offset, size, &dir);
598                         if (!dir)
599                                 dir = shmem_dir_map(middir);
600                         nr_swaps_freed += freed;
601                         if (offset)
602                                 spin_lock(&info->lock);
603                         set_page_private(subdir, page_private(subdir) - freed);
604                         if (offset)
605                                 spin_unlock(&info->lock);
606                         if (!punch_hole)
607                                 BUG_ON(page_private(subdir) > offset);
608                 }
609                 if (offset)
610                         offset = 0;
611                 else if (subdir && upper_limit - idx >= ENTRIES_PER_PAGE) {
612                         dir[diroff] = NULL;
613                         nr_pages_to_free++;
614                         list_add(&subdir->lru, &pages_to_free);
615                 }
616         }
617 done1:
618         shmem_dir_unmap(dir);
619 done2:
620         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
621                 /*
622                  * Call truncate_inode_pages again: racing shmem_unuse_inode
623                  * may have swizzled a page in from swap since vmtruncate or
624                  * generic_delete_inode did it, before we lowered next_index.
625                  * Also, though shmem_getpage checks i_size before adding to
626                  * cache, no recheck after: so fix the narrow window there too.
627                  */
628                 truncate_inode_pages_range(inode->i_mapping, start, end);
629         }
630
631         spin_lock(&info->lock);
632         info->flags &= ~SHMEM_TRUNCATE;
633         info->swapped -= nr_swaps_freed;
634         if (nr_pages_to_free)
635                 shmem_free_blocks(inode, nr_pages_to_free);
636         shmem_recalc_inode(inode);
637         spin_unlock(&info->lock);
638
639         /*
640          * Empty swap vector directory pages to be freed?
641          */
642         if (!list_empty(&pages_to_free)) {
643                 pages_to_free.prev->next = NULL;
644                 shmem_free_pages(pages_to_free.next);
645         }
646 }
647
648 static void shmem_truncate(struct inode *inode)
649 {
650         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
651 }
652
653 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
654 {
655         struct inode *inode = dentry->d_inode;
656         struct page *page = NULL;
657         int error;
658
659         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
660                 if (attr->ia_size < inode->i_size) {
661                         /*
662                          * If truncating down to a partial page, then
663                          * if that page is already allocated, hold it
664                          * in memory until the truncation is over, so
665                          * truncate_partial_page cannnot miss it were
666                          * it assigned to swap.
667                          */
668                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
669                                 (void) shmem_getpage(inode,
670                                         attr->ia_size>>PAGE_CACHE_SHIFT,
671                                                 &page, SGP_READ, NULL);
672                         }
673                         /*
674                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
675                          * detect if any pages might have been added to cache
676                          * after truncate_inode_pages.  But we needn't bother
677                          * if it's being fully truncated to zero-length: the
678                          * nrpages check is efficient enough in that case.
679                          */
680                         if (attr->ia_size) {
681                                 struct shmem_inode_info *info = SHMEM_I(inode);
682                                 spin_lock(&info->lock);
683                                 info->flags &= ~SHMEM_PAGEIN;
684                                 spin_unlock(&info->lock);
685                         }
686                 }
687         }
688
689         error = inode_change_ok(inode, attr);
690         if (!error)
691                 error = inode_setattr(inode, attr);
692 #ifdef CONFIG_TMPFS_POSIX_ACL
693         if (!error && (attr->ia_valid & ATTR_MODE))
694                 error = generic_acl_chmod(inode, &shmem_acl_ops);
695 #endif
696         if (page)
697                 page_cache_release(page);
698         return error;
699 }
700
701 static void shmem_delete_inode(struct inode *inode)
702 {
703         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
704         struct shmem_inode_info *info = SHMEM_I(inode);
705
706         if (inode->i_op->truncate == shmem_truncate) {
707                 truncate_inode_pages(inode->i_mapping, 0);
708                 shmem_unacct_size(info->flags, inode->i_size);
709                 inode->i_size = 0;
710                 shmem_truncate(inode);
711                 if (!list_empty(&info->swaplist)) {
712                         spin_lock(&shmem_swaplist_lock);
713                         list_del_init(&info->swaplist);
714                         spin_unlock(&shmem_swaplist_lock);
715                 }
716         }
717         BUG_ON(inode->i_blocks);
718         if (sbinfo->max_inodes) {
719                 spin_lock(&sbinfo->stat_lock);
720                 sbinfo->free_inodes++;
721                 spin_unlock(&sbinfo->stat_lock);
722         }
723         clear_inode(inode);
724 }
725
726 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
727 {
728         swp_entry_t *ptr;
729
730         for (ptr = dir; ptr < edir; ptr++) {
731                 if (ptr->val == entry.val)
732                         return ptr - dir;
733         }
734         return -1;
735 }
736
737 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
738 {
739         struct inode *inode;
740         unsigned long idx;
741         unsigned long size;
742         unsigned long limit;
743         unsigned long stage;
744         struct page **dir;
745         struct page *subdir;
746         swp_entry_t *ptr;
747         int offset;
748
749         idx = 0;
750         ptr = info->i_direct;
751         spin_lock(&info->lock);
752         limit = info->next_index;
753         size = limit;
754         if (size > SHMEM_NR_DIRECT)
755                 size = SHMEM_NR_DIRECT;
756         offset = shmem_find_swp(entry, ptr, ptr+size);
757         if (offset >= 0) {
758                 shmem_swp_balance_unmap();
759                 goto found;
760         }
761         if (!info->i_indirect)
762                 goto lost2;
763
764         dir = shmem_dir_map(info->i_indirect);
765         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
766
767         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
768                 if (unlikely(idx == stage)) {
769                         shmem_dir_unmap(dir-1);
770                         dir = shmem_dir_map(info->i_indirect) +
771                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
772                         while (!*dir) {
773                                 dir++;
774                                 idx += ENTRIES_PER_PAGEPAGE;
775                                 if (idx >= limit)
776                                         goto lost1;
777                         }
778                         stage = idx + ENTRIES_PER_PAGEPAGE;
779                         subdir = *dir;
780                         shmem_dir_unmap(dir);
781                         dir = shmem_dir_map(subdir);
782                 }
783                 subdir = *dir;
784                 if (subdir && page_private(subdir)) {
785                         ptr = shmem_swp_map(subdir);
786                         size = limit - idx;
787                         if (size > ENTRIES_PER_PAGE)
788                                 size = ENTRIES_PER_PAGE;
789                         offset = shmem_find_swp(entry, ptr, ptr+size);
790                         if (offset >= 0) {
791                                 shmem_dir_unmap(dir);
792                                 goto found;
793                         }
794                         shmem_swp_unmap(ptr);
795                 }
796         }
797 lost1:
798         shmem_dir_unmap(dir-1);
799 lost2:
800         spin_unlock(&info->lock);
801         return 0;
802 found:
803         idx += offset;
804         inode = &info->vfs_inode;
805         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
806                 info->flags |= SHMEM_PAGEIN;
807                 shmem_swp_set(info, ptr + offset, 0);
808         }
809         shmem_swp_unmap(ptr);
810         spin_unlock(&info->lock);
811         /*
812          * Decrement swap count even when the entry is left behind:
813          * try_to_unuse will skip over mms, then reincrement count.
814          */
815         swap_free(entry);
816         return 1;
817 }
818
819 /*
820  * shmem_unuse() search for an eventually swapped out shmem page.
821  */
822 int shmem_unuse(swp_entry_t entry, struct page *page)
823 {
824         struct list_head *p, *next;
825         struct shmem_inode_info *info;
826         int found = 0;
827
828         spin_lock(&shmem_swaplist_lock);
829         list_for_each_safe(p, next, &shmem_swaplist) {
830                 info = list_entry(p, struct shmem_inode_info, swaplist);
831                 if (!info->swapped)
832                         list_del_init(&info->swaplist);
833                 else if (shmem_unuse_inode(info, entry, page)) {
834                         /* move head to start search for next from here */
835                         list_move_tail(&shmem_swaplist, &info->swaplist);
836                         found = 1;
837                         break;
838                 }
839         }
840         spin_unlock(&shmem_swaplist_lock);
841         return found;
842 }
843
844 /*
845  * Move the page from the page cache to the swap cache.
846  */
847 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
848 {
849         struct shmem_inode_info *info;
850         swp_entry_t *entry, swap;
851         struct address_space *mapping;
852         unsigned long index;
853         struct inode *inode;
854
855         BUG_ON(!PageLocked(page));
856         BUG_ON(page_mapped(page));
857
858         mapping = page->mapping;
859         index = page->index;
860         inode = mapping->host;
861         info = SHMEM_I(inode);
862         if (info->flags & VM_LOCKED)
863                 goto redirty;
864         swap = get_swap_page();
865         if (!swap.val)
866                 goto redirty;
867
868         spin_lock(&info->lock);
869         shmem_recalc_inode(inode);
870         if (index >= info->next_index) {
871                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
872                 goto unlock;
873         }
874         entry = shmem_swp_entry(info, index, NULL);
875         BUG_ON(!entry);
876         BUG_ON(entry->val);
877
878         if (move_to_swap_cache(page, swap) == 0) {
879                 shmem_swp_set(info, entry, swap.val);
880                 shmem_swp_unmap(entry);
881                 spin_unlock(&info->lock);
882                 if (list_empty(&info->swaplist)) {
883                         spin_lock(&shmem_swaplist_lock);
884                         /* move instead of add in case we're racing */
885                         list_move_tail(&info->swaplist, &shmem_swaplist);
886                         spin_unlock(&shmem_swaplist_lock);
887                 }
888                 unlock_page(page);
889                 return 0;
890         }
891
892         shmem_swp_unmap(entry);
893 unlock:
894         spin_unlock(&info->lock);
895         swap_free(swap);
896 redirty:
897         set_page_dirty(page);
898         return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
899 }
900
901 #ifdef CONFIG_NUMA
902 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
903 {
904         char *nodelist = strchr(value, ':');
905         int err = 1;
906
907         if (nodelist) {
908                 /* NUL-terminate policy string */
909                 *nodelist++ = '\0';
910                 if (nodelist_parse(nodelist, *policy_nodes))
911                         goto out;
912         }
913         if (!strcmp(value, "default")) {
914                 *policy = MPOL_DEFAULT;
915                 /* Don't allow a nodelist */
916                 if (!nodelist)
917                         err = 0;
918         } else if (!strcmp(value, "prefer")) {
919                 *policy = MPOL_PREFERRED;
920                 /* Insist on a nodelist of one node only */
921                 if (nodelist) {
922                         char *rest = nodelist;
923                         while (isdigit(*rest))
924                                 rest++;
925                         if (!*rest)
926                                 err = 0;
927                 }
928         } else if (!strcmp(value, "bind")) {
929                 *policy = MPOL_BIND;
930                 /* Insist on a nodelist */
931                 if (nodelist)
932                         err = 0;
933         } else if (!strcmp(value, "interleave")) {
934                 *policy = MPOL_INTERLEAVE;
935                 /* Default to nodes online if no nodelist */
936                 if (!nodelist)
937                         *policy_nodes = node_online_map;
938                 err = 0;
939         }
940 out:
941         /* Restore string for error message */
942         if (nodelist)
943                 *--nodelist = ':';
944         return err;
945 }
946
947 static struct page *shmem_swapin_async(struct shared_policy *p,
948                                        swp_entry_t entry, unsigned long idx)
949 {
950         struct page *page;
951         struct vm_area_struct pvma;
952
953         /* Create a pseudo vma that just contains the policy */
954         memset(&pvma, 0, sizeof(struct vm_area_struct));
955         pvma.vm_end = PAGE_SIZE;
956         pvma.vm_pgoff = idx;
957         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
958         page = read_swap_cache_async(entry, &pvma, 0);
959         mpol_free(pvma.vm_policy);
960         return page;
961 }
962
963 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
964                           unsigned long idx)
965 {
966         struct shared_policy *p = &info->policy;
967         int i, num;
968         struct page *page;
969         unsigned long offset;
970
971         num = valid_swaphandles(entry, &offset);
972         for (i = 0; i < num; offset++, i++) {
973                 page = shmem_swapin_async(p,
974                                 swp_entry(swp_type(entry), offset), idx);
975                 if (!page)
976                         break;
977                 page_cache_release(page);
978         }
979         lru_add_drain();        /* Push any new pages onto the LRU now */
980         return shmem_swapin_async(p, entry, idx);
981 }
982
983 static struct page *
984 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
985                  unsigned long idx)
986 {
987         struct vm_area_struct pvma;
988         struct page *page;
989
990         memset(&pvma, 0, sizeof(struct vm_area_struct));
991         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
992         pvma.vm_pgoff = idx;
993         pvma.vm_end = PAGE_SIZE;
994         page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
995         mpol_free(pvma.vm_policy);
996         return page;
997 }
998 #else
999 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
1000 {
1001         return 1;
1002 }
1003
1004 static inline struct page *
1005 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1006 {
1007         swapin_readahead(entry, 0, NULL);
1008         return read_swap_cache_async(entry, NULL, 0);
1009 }
1010
1011 static inline struct page *
1012 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1013 {
1014         return alloc_page(gfp | __GFP_ZERO);
1015 }
1016 #endif
1017
1018 /*
1019  * shmem_getpage - either get the page from swap or allocate a new one
1020  *
1021  * If we allocate a new one we do not mark it dirty. That's up to the
1022  * vm. If we swap it in we mark it dirty since we also free the swap
1023  * entry since a page cannot live in both the swap and page cache
1024  */
1025 static int shmem_getpage(struct inode *inode, unsigned long idx,
1026                         struct page **pagep, enum sgp_type sgp, int *type)
1027 {
1028         struct address_space *mapping = inode->i_mapping;
1029         struct shmem_inode_info *info = SHMEM_I(inode);
1030         struct shmem_sb_info *sbinfo;
1031         struct page *filepage = *pagep;
1032         struct page *swappage;
1033         swp_entry_t *entry;
1034         swp_entry_t swap;
1035         int error;
1036
1037         if (idx >= SHMEM_MAX_INDEX)
1038                 return -EFBIG;
1039         /*
1040          * Normally, filepage is NULL on entry, and either found
1041          * uptodate immediately, or allocated and zeroed, or read
1042          * in under swappage, which is then assigned to filepage.
1043          * But shmem_prepare_write passes in a locked filepage,
1044          * which may be found not uptodate by other callers too,
1045          * and may need to be copied from the swappage read in.
1046          */
1047 repeat:
1048         if (!filepage)
1049                 filepage = find_lock_page(mapping, idx);
1050         if (filepage && PageUptodate(filepage))
1051                 goto done;
1052         error = 0;
1053         if (sgp == SGP_QUICK)
1054                 goto failed;
1055
1056         spin_lock(&info->lock);
1057         shmem_recalc_inode(inode);
1058         entry = shmem_swp_alloc(info, idx, sgp);
1059         if (IS_ERR(entry)) {
1060                 spin_unlock(&info->lock);
1061                 error = PTR_ERR(entry);
1062                 goto failed;
1063         }
1064         swap = *entry;
1065
1066         if (swap.val) {
1067                 /* Look it up and read it in.. */
1068                 swappage = lookup_swap_cache(swap);
1069                 if (!swappage) {
1070                         shmem_swp_unmap(entry);
1071                         /* here we actually do the io */
1072                         if (type && *type == VM_FAULT_MINOR) {
1073                                 __count_vm_event(PGMAJFAULT);
1074                                 *type = VM_FAULT_MAJOR;
1075                         }
1076                         spin_unlock(&info->lock);
1077                         swappage = shmem_swapin(info, swap, idx);
1078                         if (!swappage) {
1079                                 spin_lock(&info->lock);
1080                                 entry = shmem_swp_alloc(info, idx, sgp);
1081                                 if (IS_ERR(entry))
1082                                         error = PTR_ERR(entry);
1083                                 else {
1084                                         if (entry->val == swap.val)
1085                                                 error = -ENOMEM;
1086                                         shmem_swp_unmap(entry);
1087                                 }
1088                                 spin_unlock(&info->lock);
1089                                 if (error)
1090                                         goto failed;
1091                                 goto repeat;
1092                         }
1093                         wait_on_page_locked(swappage);
1094                         page_cache_release(swappage);
1095                         goto repeat;
1096                 }
1097
1098                 /* We have to do this with page locked to prevent races */
1099                 if (TestSetPageLocked(swappage)) {
1100                         shmem_swp_unmap(entry);
1101                         spin_unlock(&info->lock);
1102                         wait_on_page_locked(swappage);
1103                         page_cache_release(swappage);
1104                         goto repeat;
1105                 }
1106                 if (PageWriteback(swappage)) {
1107                         shmem_swp_unmap(entry);
1108                         spin_unlock(&info->lock);
1109                         wait_on_page_writeback(swappage);
1110                         unlock_page(swappage);
1111                         page_cache_release(swappage);
1112                         goto repeat;
1113                 }
1114                 if (!PageUptodate(swappage)) {
1115                         shmem_swp_unmap(entry);
1116                         spin_unlock(&info->lock);
1117                         unlock_page(swappage);
1118                         page_cache_release(swappage);
1119                         error = -EIO;
1120                         goto failed;
1121                 }
1122
1123                 if (filepage) {
1124                         shmem_swp_set(info, entry, 0);
1125                         shmem_swp_unmap(entry);
1126                         delete_from_swap_cache(swappage);
1127                         spin_unlock(&info->lock);
1128                         copy_highpage(filepage, swappage);
1129                         unlock_page(swappage);
1130                         page_cache_release(swappage);
1131                         flush_dcache_page(filepage);
1132                         SetPageUptodate(filepage);
1133                         set_page_dirty(filepage);
1134                         swap_free(swap);
1135                 } else if (!(error = move_from_swap_cache(
1136                                 swappage, idx, mapping))) {
1137                         info->flags |= SHMEM_PAGEIN;
1138                         shmem_swp_set(info, entry, 0);
1139                         shmem_swp_unmap(entry);
1140                         spin_unlock(&info->lock);
1141                         filepage = swappage;
1142                         swap_free(swap);
1143                 } else {
1144                         shmem_swp_unmap(entry);
1145                         spin_unlock(&info->lock);
1146                         unlock_page(swappage);
1147                         page_cache_release(swappage);
1148                         if (error == -ENOMEM) {
1149                                 /* let kswapd refresh zone for GFP_ATOMICs */
1150                                 congestion_wait(WRITE, HZ/50);
1151                         }
1152                         goto repeat;
1153                 }
1154         } else if (sgp == SGP_READ && !filepage) {
1155                 shmem_swp_unmap(entry);
1156                 filepage = find_get_page(mapping, idx);
1157                 if (filepage &&
1158                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1159                         spin_unlock(&info->lock);
1160                         wait_on_page_locked(filepage);
1161                         page_cache_release(filepage);
1162                         filepage = NULL;
1163                         goto repeat;
1164                 }
1165                 spin_unlock(&info->lock);
1166         } else {
1167                 shmem_swp_unmap(entry);
1168                 sbinfo = SHMEM_SB(inode->i_sb);
1169                 if (sbinfo->max_blocks) {
1170                         spin_lock(&sbinfo->stat_lock);
1171                         if (sbinfo->free_blocks == 0 ||
1172                             shmem_acct_block(info->flags)) {
1173                                 spin_unlock(&sbinfo->stat_lock);
1174                                 spin_unlock(&info->lock);
1175                                 error = -ENOSPC;
1176                                 goto failed;
1177                         }
1178                         sbinfo->free_blocks--;
1179                         inode->i_blocks += BLOCKS_PER_PAGE;
1180                         spin_unlock(&sbinfo->stat_lock);
1181                 } else if (shmem_acct_block(info->flags)) {
1182                         spin_unlock(&info->lock);
1183                         error = -ENOSPC;
1184                         goto failed;
1185                 }
1186
1187                 if (!filepage) {
1188                         spin_unlock(&info->lock);
1189                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1190                                                     info,
1191                                                     idx);
1192                         if (!filepage) {
1193                                 shmem_unacct_blocks(info->flags, 1);
1194                                 shmem_free_blocks(inode, 1);
1195                                 error = -ENOMEM;
1196                                 goto failed;
1197                         }
1198
1199                         spin_lock(&info->lock);
1200                         entry = shmem_swp_alloc(info, idx, sgp);
1201                         if (IS_ERR(entry))
1202                                 error = PTR_ERR(entry);
1203                         else {
1204                                 swap = *entry;
1205                                 shmem_swp_unmap(entry);
1206                         }
1207                         if (error || swap.val || 0 != add_to_page_cache_lru(
1208                                         filepage, mapping, idx, GFP_ATOMIC)) {
1209                                 spin_unlock(&info->lock);
1210                                 page_cache_release(filepage);
1211                                 shmem_unacct_blocks(info->flags, 1);
1212                                 shmem_free_blocks(inode, 1);
1213                                 filepage = NULL;
1214                                 if (error)
1215                                         goto failed;
1216                                 goto repeat;
1217                         }
1218                         info->flags |= SHMEM_PAGEIN;
1219                 }
1220
1221                 info->alloced++;
1222                 spin_unlock(&info->lock);
1223                 flush_dcache_page(filepage);
1224                 SetPageUptodate(filepage);
1225         }
1226 done:
1227         if (*pagep != filepage) {
1228                 unlock_page(filepage);
1229                 *pagep = filepage;
1230         }
1231         return 0;
1232
1233 failed:
1234         if (*pagep != filepage) {
1235                 unlock_page(filepage);
1236                 page_cache_release(filepage);
1237         }
1238         return error;
1239 }
1240
1241 static struct page *shmem_nopage(struct vm_area_struct *vma,
1242                                  unsigned long address, int *type)
1243 {
1244         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1245         struct page *page = NULL;
1246         unsigned long idx;
1247         int error;
1248
1249         idx = (address - vma->vm_start) >> PAGE_SHIFT;
1250         idx += vma->vm_pgoff;
1251         idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1252         if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1253                 return NOPAGE_SIGBUS;
1254
1255         error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1256         if (error)
1257                 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1258
1259         mark_page_accessed(page);
1260         return page;
1261 }
1262
1263 static int shmem_populate(struct vm_area_struct *vma,
1264         unsigned long addr, unsigned long len,
1265         pgprot_t prot, unsigned long pgoff, int nonblock)
1266 {
1267         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1268         struct mm_struct *mm = vma->vm_mm;
1269         enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1270         unsigned long size;
1271
1272         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1273         if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1274                 return -EINVAL;
1275
1276         while ((long) len > 0) {
1277                 struct page *page = NULL;
1278                 int err;
1279                 /*
1280                  * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1281                  */
1282                 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1283                 if (err)
1284                         return err;
1285                 /* Page may still be null, but only if nonblock was set. */
1286                 if (page) {
1287                         mark_page_accessed(page);
1288                         err = install_page(mm, vma, addr, page, prot);
1289                         if (err) {
1290                                 page_cache_release(page);
1291                                 return err;
1292                         }
1293                 } else if (vma->vm_flags & VM_NONLINEAR) {
1294                         /* No page was found just because we can't read it in
1295                          * now (being here implies nonblock != 0), but the page
1296                          * may exist, so set the PTE to fault it in later. */
1297                         err = install_file_pte(mm, vma, addr, pgoff, prot);
1298                         if (err)
1299                                 return err;
1300                 }
1301
1302                 len -= PAGE_SIZE;
1303                 addr += PAGE_SIZE;
1304                 pgoff++;
1305         }
1306         return 0;
1307 }
1308
1309 #ifdef CONFIG_NUMA
1310 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1311 {
1312         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1313         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1314 }
1315
1316 struct mempolicy *
1317 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1318 {
1319         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1320         unsigned long idx;
1321
1322         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1323         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1324 }
1325 #endif
1326
1327 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1328 {
1329         struct inode *inode = file->f_path.dentry->d_inode;
1330         struct shmem_inode_info *info = SHMEM_I(inode);
1331         int retval = -ENOMEM;
1332
1333         spin_lock(&info->lock);
1334         if (lock && !(info->flags & VM_LOCKED)) {
1335                 if (!user_shm_lock(inode->i_size, user))
1336                         goto out_nomem;
1337                 info->flags |= VM_LOCKED;
1338         }
1339         if (!lock && (info->flags & VM_LOCKED) && user) {
1340                 user_shm_unlock(inode->i_size, user);
1341                 info->flags &= ~VM_LOCKED;
1342         }
1343         retval = 0;
1344 out_nomem:
1345         spin_unlock(&info->lock);
1346         return retval;
1347 }
1348
1349 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1350 {
1351         file_accessed(file);
1352         vma->vm_ops = &shmem_vm_ops;
1353         return 0;
1354 }
1355
1356 static struct inode *
1357 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1358 {
1359         struct inode *inode;
1360         struct shmem_inode_info *info;
1361         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1362
1363         if (sbinfo->max_inodes) {
1364                 spin_lock(&sbinfo->stat_lock);
1365                 if (!sbinfo->free_inodes) {
1366                         spin_unlock(&sbinfo->stat_lock);
1367                         return NULL;
1368                 }
1369                 sbinfo->free_inodes--;
1370                 spin_unlock(&sbinfo->stat_lock);
1371         }
1372
1373         inode = new_inode(sb);
1374         if (inode) {
1375                 inode->i_mode = mode;
1376                 inode->i_uid = current->fsuid;
1377                 inode->i_gid = current->fsgid;
1378                 inode->i_blocks = 0;
1379                 inode->i_mapping->a_ops = &shmem_aops;
1380                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1381                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1382                 inode->i_generation = get_seconds();
1383                 info = SHMEM_I(inode);
1384                 memset(info, 0, (char *)inode - (char *)info);
1385                 spin_lock_init(&info->lock);
1386                 INIT_LIST_HEAD(&info->swaplist);
1387
1388                 switch (mode & S_IFMT) {
1389                 default:
1390                         inode->i_op = &shmem_special_inode_operations;
1391                         init_special_inode(inode, mode, dev);
1392                         break;
1393                 case S_IFREG:
1394                         inode->i_op = &shmem_inode_operations;
1395                         inode->i_fop = &shmem_file_operations;
1396                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1397                                                         &sbinfo->policy_nodes);
1398                         break;
1399                 case S_IFDIR:
1400                         inc_nlink(inode);
1401                         /* Some things misbehave if size == 0 on a directory */
1402                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1403                         inode->i_op = &shmem_dir_inode_operations;
1404                         inode->i_fop = &simple_dir_operations;
1405                         break;
1406                 case S_IFLNK:
1407                         /*
1408                          * Must not load anything in the rbtree,
1409                          * mpol_free_shared_policy will not be called.
1410                          */
1411                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1412                                                 NULL);
1413                         break;
1414                 }
1415         } else if (sbinfo->max_inodes) {
1416                 spin_lock(&sbinfo->stat_lock);
1417                 sbinfo->free_inodes++;
1418                 spin_unlock(&sbinfo->stat_lock);
1419         }
1420         return inode;
1421 }
1422
1423 #ifdef CONFIG_TMPFS
1424 static const struct inode_operations shmem_symlink_inode_operations;
1425 static const struct inode_operations shmem_symlink_inline_operations;
1426
1427 /*
1428  * Normally tmpfs makes no use of shmem_prepare_write, but it
1429  * lets a tmpfs file be used read-write below the loop driver.
1430  */
1431 static int
1432 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1433 {
1434         struct inode *inode = page->mapping->host;
1435         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1436 }
1437
1438 static ssize_t
1439 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1440 {
1441         struct inode    *inode = file->f_path.dentry->d_inode;
1442         loff_t          pos;
1443         unsigned long   written;
1444         ssize_t         err;
1445
1446         if ((ssize_t) count < 0)
1447                 return -EINVAL;
1448
1449         if (!access_ok(VERIFY_READ, buf, count))
1450                 return -EFAULT;
1451
1452         mutex_lock(&inode->i_mutex);
1453
1454         pos = *ppos;
1455         written = 0;
1456
1457         err = generic_write_checks(file, &pos, &count, 0);
1458         if (err || !count)
1459                 goto out;
1460
1461         err = remove_suid(file->f_path.dentry);
1462         if (err)
1463                 goto out;
1464
1465         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1466
1467         do {
1468                 struct page *page = NULL;
1469                 unsigned long bytes, index, offset;
1470                 char *kaddr;
1471                 int left;
1472
1473                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1474                 index = pos >> PAGE_CACHE_SHIFT;
1475                 bytes = PAGE_CACHE_SIZE - offset;
1476                 if (bytes > count)
1477                         bytes = count;
1478
1479                 /*
1480                  * We don't hold page lock across copy from user -
1481                  * what would it guard against? - so no deadlock here.
1482                  * But it still may be a good idea to prefault below.
1483                  */
1484
1485                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1486                 if (err)
1487                         break;
1488
1489                 left = bytes;
1490                 if (PageHighMem(page)) {
1491                         volatile unsigned char dummy;
1492                         __get_user(dummy, buf);
1493                         __get_user(dummy, buf + bytes - 1);
1494
1495                         kaddr = kmap_atomic(page, KM_USER0);
1496                         left = __copy_from_user_inatomic(kaddr + offset,
1497                                                         buf, bytes);
1498                         kunmap_atomic(kaddr, KM_USER0);
1499                 }
1500                 if (left) {
1501                         kaddr = kmap(page);
1502                         left = __copy_from_user(kaddr + offset, buf, bytes);
1503                         kunmap(page);
1504                 }
1505
1506                 written += bytes;
1507                 count -= bytes;
1508                 pos += bytes;
1509                 buf += bytes;
1510                 if (pos > inode->i_size)
1511                         i_size_write(inode, pos);
1512
1513                 flush_dcache_page(page);
1514                 set_page_dirty(page);
1515                 mark_page_accessed(page);
1516                 page_cache_release(page);
1517
1518                 if (left) {
1519                         pos -= left;
1520                         written -= left;
1521                         err = -EFAULT;
1522                         break;
1523                 }
1524
1525                 /*
1526                  * Our dirty pages are not counted in nr_dirty,
1527                  * and we do not attempt to balance dirty pages.
1528                  */
1529
1530                 cond_resched();
1531         } while (count);
1532
1533         *ppos = pos;
1534         if (written)
1535                 err = written;
1536 out:
1537         mutex_unlock(&inode->i_mutex);
1538         return err;
1539 }
1540
1541 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1542 {
1543         struct inode *inode = filp->f_path.dentry->d_inode;
1544         struct address_space *mapping = inode->i_mapping;
1545         unsigned long index, offset;
1546
1547         index = *ppos >> PAGE_CACHE_SHIFT;
1548         offset = *ppos & ~PAGE_CACHE_MASK;
1549
1550         for (;;) {
1551                 struct page *page = NULL;
1552                 unsigned long end_index, nr, ret;
1553                 loff_t i_size = i_size_read(inode);
1554
1555                 end_index = i_size >> PAGE_CACHE_SHIFT;
1556                 if (index > end_index)
1557                         break;
1558                 if (index == end_index) {
1559                         nr = i_size & ~PAGE_CACHE_MASK;
1560                         if (nr <= offset)
1561                                 break;
1562                 }
1563
1564                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1565                 if (desc->error) {
1566                         if (desc->error == -EINVAL)
1567                                 desc->error = 0;
1568                         break;
1569                 }
1570
1571                 /*
1572                  * We must evaluate after, since reads (unlike writes)
1573                  * are called without i_mutex protection against truncate
1574                  */
1575                 nr = PAGE_CACHE_SIZE;
1576                 i_size = i_size_read(inode);
1577                 end_index = i_size >> PAGE_CACHE_SHIFT;
1578                 if (index == end_index) {
1579                         nr = i_size & ~PAGE_CACHE_MASK;
1580                         if (nr <= offset) {
1581                                 if (page)
1582                                         page_cache_release(page);
1583                                 break;
1584                         }
1585                 }
1586                 nr -= offset;
1587
1588                 if (page) {
1589                         /*
1590                          * If users can be writing to this page using arbitrary
1591                          * virtual addresses, take care about potential aliasing
1592                          * before reading the page on the kernel side.
1593                          */
1594                         if (mapping_writably_mapped(mapping))
1595                                 flush_dcache_page(page);
1596                         /*
1597                          * Mark the page accessed if we read the beginning.
1598                          */
1599                         if (!offset)
1600                                 mark_page_accessed(page);
1601                 } else {
1602                         page = ZERO_PAGE(0);
1603                         page_cache_get(page);
1604                 }
1605
1606                 /*
1607                  * Ok, we have the page, and it's up-to-date, so
1608                  * now we can copy it to user space...
1609                  *
1610                  * The actor routine returns how many bytes were actually used..
1611                  * NOTE! This may not be the same as how much of a user buffer
1612                  * we filled up (we may be padding etc), so we can only update
1613                  * "pos" here (the actor routine has to update the user buffer
1614                  * pointers and the remaining count).
1615                  */
1616                 ret = actor(desc, page, offset, nr);
1617                 offset += ret;
1618                 index += offset >> PAGE_CACHE_SHIFT;
1619                 offset &= ~PAGE_CACHE_MASK;
1620
1621                 page_cache_release(page);
1622                 if (ret != nr || !desc->count)
1623                         break;
1624
1625                 cond_resched();
1626         }
1627
1628         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1629         file_accessed(filp);
1630 }
1631
1632 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1633 {
1634         read_descriptor_t desc;
1635
1636         if ((ssize_t) count < 0)
1637                 return -EINVAL;
1638         if (!access_ok(VERIFY_WRITE, buf, count))
1639                 return -EFAULT;
1640         if (!count)
1641                 return 0;
1642
1643         desc.written = 0;
1644         desc.count = count;
1645         desc.arg.buf = buf;
1646         desc.error = 0;
1647
1648         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1649         if (desc.written)
1650                 return desc.written;
1651         return desc.error;
1652 }
1653
1654 static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1655                          size_t count, read_actor_t actor, void *target)
1656 {
1657         read_descriptor_t desc;
1658
1659         if (!count)
1660                 return 0;
1661
1662         desc.written = 0;
1663         desc.count = count;
1664         desc.arg.data = target;
1665         desc.error = 0;
1666
1667         do_shmem_file_read(in_file, ppos, &desc, actor);
1668         if (desc.written)
1669                 return desc.written;
1670         return desc.error;
1671 }
1672
1673 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1674 {
1675         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1676
1677         buf->f_type = TMPFS_MAGIC;
1678         buf->f_bsize = PAGE_CACHE_SIZE;
1679         buf->f_namelen = NAME_MAX;
1680         spin_lock(&sbinfo->stat_lock);
1681         if (sbinfo->max_blocks) {
1682                 buf->f_blocks = sbinfo->max_blocks;
1683                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1684         }
1685         if (sbinfo->max_inodes) {
1686                 buf->f_files = sbinfo->max_inodes;
1687                 buf->f_ffree = sbinfo->free_inodes;
1688         }
1689         /* else leave those fields 0 like simple_statfs */
1690         spin_unlock(&sbinfo->stat_lock);
1691         return 0;
1692 }
1693
1694 /*
1695  * File creation. Allocate an inode, and we're done..
1696  */
1697 static int
1698 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1699 {
1700         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1701         int error = -ENOSPC;
1702
1703         if (inode) {
1704                 error = security_inode_init_security(inode, dir, NULL, NULL,
1705                                                      NULL);
1706                 if (error) {
1707                         if (error != -EOPNOTSUPP) {
1708                                 iput(inode);
1709                                 return error;
1710                         }
1711                 }
1712                 error = shmem_acl_init(inode, dir);
1713                 if (error) {
1714                         iput(inode);
1715                         return error;
1716                 }
1717                 if (dir->i_mode & S_ISGID) {
1718                         inode->i_gid = dir->i_gid;
1719                         if (S_ISDIR(mode))
1720                                 inode->i_mode |= S_ISGID;
1721                 }
1722                 dir->i_size += BOGO_DIRENT_SIZE;
1723                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1724                 d_instantiate(dentry, inode);
1725                 dget(dentry); /* Extra count - pin the dentry in core */
1726         }
1727         return error;
1728 }
1729
1730 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1731 {
1732         int error;
1733
1734         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1735                 return error;
1736         inc_nlink(dir);
1737         return 0;
1738 }
1739
1740 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1741                 struct nameidata *nd)
1742 {
1743         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1744 }
1745
1746 /*
1747  * Link a file..
1748  */
1749 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1750 {
1751         struct inode *inode = old_dentry->d_inode;
1752         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1753
1754         /*
1755          * No ordinary (disk based) filesystem counts links as inodes;
1756          * but each new link needs a new dentry, pinning lowmem, and
1757          * tmpfs dentries cannot be pruned until they are unlinked.
1758          */
1759         if (sbinfo->max_inodes) {
1760                 spin_lock(&sbinfo->stat_lock);
1761                 if (!sbinfo->free_inodes) {
1762                         spin_unlock(&sbinfo->stat_lock);
1763                         return -ENOSPC;
1764                 }
1765                 sbinfo->free_inodes--;
1766                 spin_unlock(&sbinfo->stat_lock);
1767         }
1768
1769         dir->i_size += BOGO_DIRENT_SIZE;
1770         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1771         inc_nlink(inode);
1772         atomic_inc(&inode->i_count);    /* New dentry reference */
1773         dget(dentry);           /* Extra pinning count for the created dentry */
1774         d_instantiate(dentry, inode);
1775         return 0;
1776 }
1777
1778 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1779 {
1780         struct inode *inode = dentry->d_inode;
1781
1782         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1783                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1784                 if (sbinfo->max_inodes) {
1785                         spin_lock(&sbinfo->stat_lock);
1786                         sbinfo->free_inodes++;
1787                         spin_unlock(&sbinfo->stat_lock);
1788                 }
1789         }
1790
1791         dir->i_size -= BOGO_DIRENT_SIZE;
1792         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1793         drop_nlink(inode);
1794         dput(dentry);   /* Undo the count from "create" - this does all the work */
1795         return 0;
1796 }
1797
1798 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1799 {
1800         if (!simple_empty(dentry))
1801                 return -ENOTEMPTY;
1802
1803         drop_nlink(dentry->d_inode);
1804         drop_nlink(dir);
1805         return shmem_unlink(dir, dentry);
1806 }
1807
1808 /*
1809  * The VFS layer already does all the dentry stuff for rename,
1810  * we just have to decrement the usage count for the target if
1811  * it exists so that the VFS layer correctly free's it when it
1812  * gets overwritten.
1813  */
1814 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1815 {
1816         struct inode *inode = old_dentry->d_inode;
1817         int they_are_dirs = S_ISDIR(inode->i_mode);
1818
1819         if (!simple_empty(new_dentry))
1820                 return -ENOTEMPTY;
1821
1822         if (new_dentry->d_inode) {
1823                 (void) shmem_unlink(new_dir, new_dentry);
1824                 if (they_are_dirs)
1825                         drop_nlink(old_dir);
1826         } else if (they_are_dirs) {
1827                 drop_nlink(old_dir);
1828                 inc_nlink(new_dir);
1829         }
1830
1831         old_dir->i_size -= BOGO_DIRENT_SIZE;
1832         new_dir->i_size += BOGO_DIRENT_SIZE;
1833         old_dir->i_ctime = old_dir->i_mtime =
1834         new_dir->i_ctime = new_dir->i_mtime =
1835         inode->i_ctime = CURRENT_TIME;
1836         return 0;
1837 }
1838
1839 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1840 {
1841         int error;
1842         int len;
1843         struct inode *inode;
1844         struct page *page = NULL;
1845         char *kaddr;
1846         struct shmem_inode_info *info;
1847
1848         len = strlen(symname) + 1;
1849         if (len > PAGE_CACHE_SIZE)
1850                 return -ENAMETOOLONG;
1851
1852         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1853         if (!inode)
1854                 return -ENOSPC;
1855
1856         error = security_inode_init_security(inode, dir, NULL, NULL,
1857                                              NULL);
1858         if (error) {
1859                 if (error != -EOPNOTSUPP) {
1860                         iput(inode);
1861                         return error;
1862                 }
1863                 error = 0;
1864         }
1865
1866         info = SHMEM_I(inode);
1867         inode->i_size = len-1;
1868         if (len <= (char *)inode - (char *)info) {
1869                 /* do it inline */
1870                 memcpy(info, symname, len);
1871                 inode->i_op = &shmem_symlink_inline_operations;
1872         } else {
1873                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1874                 if (error) {
1875                         iput(inode);
1876                         return error;
1877                 }
1878                 inode->i_op = &shmem_symlink_inode_operations;
1879                 kaddr = kmap_atomic(page, KM_USER0);
1880                 memcpy(kaddr, symname, len);
1881                 kunmap_atomic(kaddr, KM_USER0);
1882                 set_page_dirty(page);
1883                 page_cache_release(page);
1884         }
1885         if (dir->i_mode & S_ISGID)
1886                 inode->i_gid = dir->i_gid;
1887         dir->i_size += BOGO_DIRENT_SIZE;
1888         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1889         d_instantiate(dentry, inode);
1890         dget(dentry);
1891         return 0;
1892 }
1893
1894 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1895 {
1896         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1897         return NULL;
1898 }
1899
1900 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1901 {
1902         struct page *page = NULL;
1903         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1904         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1905         return page;
1906 }
1907
1908 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1909 {
1910         if (!IS_ERR(nd_get_link(nd))) {
1911                 struct page *page = cookie;
1912                 kunmap(page);
1913                 mark_page_accessed(page);
1914                 page_cache_release(page);
1915         }
1916 }
1917
1918 static const struct inode_operations shmem_symlink_inline_operations = {
1919         .readlink       = generic_readlink,
1920         .follow_link    = shmem_follow_link_inline,
1921 };
1922
1923 static const struct inode_operations shmem_symlink_inode_operations = {
1924         .truncate       = shmem_truncate,
1925         .readlink       = generic_readlink,
1926         .follow_link    = shmem_follow_link,
1927         .put_link       = shmem_put_link,
1928 };
1929
1930 #ifdef CONFIG_TMPFS_POSIX_ACL
1931 /**
1932  * Superblocks without xattr inode operations will get security.* xattr
1933  * support from the VFS "for free". As soon as we have any other xattrs
1934  * like ACLs, we also need to implement the security.* handlers at
1935  * filesystem level, though.
1936  */
1937
1938 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1939                                         size_t list_len, const char *name,
1940                                         size_t name_len)
1941 {
1942         return security_inode_listsecurity(inode, list, list_len);
1943 }
1944
1945 static int shmem_xattr_security_get(struct inode *inode, const char *name,
1946                                     void *buffer, size_t size)
1947 {
1948         if (strcmp(name, "") == 0)
1949                 return -EINVAL;
1950         return security_inode_getsecurity(inode, name, buffer, size,
1951                                           -EOPNOTSUPP);
1952 }
1953
1954 static int shmem_xattr_security_set(struct inode *inode, const char *name,
1955                                     const void *value, size_t size, int flags)
1956 {
1957         if (strcmp(name, "") == 0)
1958                 return -EINVAL;
1959         return security_inode_setsecurity(inode, name, value, size, flags);
1960 }
1961
1962 static struct xattr_handler shmem_xattr_security_handler = {
1963         .prefix = XATTR_SECURITY_PREFIX,
1964         .list   = shmem_xattr_security_list,
1965         .get    = shmem_xattr_security_get,
1966         .set    = shmem_xattr_security_set,
1967 };
1968
1969 static struct xattr_handler *shmem_xattr_handlers[] = {
1970         &shmem_xattr_acl_access_handler,
1971         &shmem_xattr_acl_default_handler,
1972         &shmem_xattr_security_handler,
1973         NULL
1974 };
1975 #endif
1976
1977 static struct dentry *shmem_get_parent(struct dentry *child)
1978 {
1979         return ERR_PTR(-ESTALE);
1980 }
1981
1982 static int shmem_match(struct inode *ino, void *vfh)
1983 {
1984         __u32 *fh = vfh;
1985         __u64 inum = fh[2];
1986         inum = (inum << 32) | fh[1];
1987         return ino->i_ino == inum && fh[0] == ino->i_generation;
1988 }
1989
1990 static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
1991 {
1992         struct dentry *de = NULL;
1993         struct inode *inode;
1994         __u32 *fh = vfh;
1995         __u64 inum = fh[2];
1996         inum = (inum << 32) | fh[1];
1997
1998         inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
1999         if (inode) {
2000                 de = d_find_alias(inode);
2001                 iput(inode);
2002         }
2003
2004         return de? de: ERR_PTR(-ESTALE);
2005 }
2006
2007 static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
2008                 int len, int type,
2009                 int (*acceptable)(void *context, struct dentry *de),
2010                 void *context)
2011 {
2012         if (len < 3)
2013                 return ERR_PTR(-ESTALE);
2014
2015         return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
2016                                                         context);
2017 }
2018
2019 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2020                                 int connectable)
2021 {
2022         struct inode *inode = dentry->d_inode;
2023
2024         if (*len < 3)
2025                 return 255;
2026
2027         if (hlist_unhashed(&inode->i_hash)) {
2028                 /* Unfortunately insert_inode_hash is not idempotent,
2029                  * so as we hash inodes here rather than at creation
2030                  * time, we need a lock to ensure we only try
2031                  * to do it once
2032                  */
2033                 static DEFINE_SPINLOCK(lock);
2034                 spin_lock(&lock);
2035                 if (hlist_unhashed(&inode->i_hash))
2036                         __insert_inode_hash(inode,
2037                                             inode->i_ino + inode->i_generation);
2038                 spin_unlock(&lock);
2039         }
2040
2041         fh[0] = inode->i_generation;
2042         fh[1] = inode->i_ino;
2043         fh[2] = ((__u64)inode->i_ino) >> 32;
2044
2045         *len = 3;
2046         return 1;
2047 }
2048
2049 static struct export_operations shmem_export_ops = {
2050         .get_parent     = shmem_get_parent,
2051         .get_dentry     = shmem_get_dentry,
2052         .encode_fh      = shmem_encode_fh,
2053         .decode_fh      = shmem_decode_fh,
2054 };
2055
2056 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2057         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2058         int *policy, nodemask_t *policy_nodes)
2059 {
2060         char *this_char, *value, *rest;
2061
2062         while (options != NULL) {
2063                 this_char = options;
2064                 for (;;) {
2065                         /*
2066                          * NUL-terminate this option: unfortunately,
2067                          * mount options form a comma-separated list,
2068                          * but mpol's nodelist may also contain commas.
2069                          */
2070                         options = strchr(options, ',');
2071                         if (options == NULL)
2072                                 break;
2073                         options++;
2074                         if (!isdigit(*options)) {
2075                                 options[-1] = '\0';
2076                                 break;
2077                         }
2078                 }
2079                 if (!*this_char)
2080                         continue;
2081                 if ((value = strchr(this_char,'=')) != NULL) {
2082                         *value++ = 0;
2083                 } else {
2084                         printk(KERN_ERR
2085                             "tmpfs: No value for mount option '%s'\n",
2086                             this_char);
2087                         return 1;
2088                 }
2089
2090                 if (!strcmp(this_char,"size")) {
2091                         unsigned long long size;
2092                         size = memparse(value,&rest);
2093                         if (*rest == '%') {
2094                                 size <<= PAGE_SHIFT;
2095                                 size *= totalram_pages;
2096                                 do_div(size, 100);
2097                                 rest++;
2098                         }
2099                         if (*rest)
2100                                 goto bad_val;
2101                         *blocks = size >> PAGE_CACHE_SHIFT;
2102                 } else if (!strcmp(this_char,"nr_blocks")) {
2103                         *blocks = memparse(value,&rest);
2104                         if (*rest)
2105                                 goto bad_val;
2106                 } else if (!strcmp(this_char,"nr_inodes")) {
2107                         *inodes = memparse(value,&rest);
2108                         if (*rest)
2109                                 goto bad_val;
2110                 } else if (!strcmp(this_char,"mode")) {
2111                         if (!mode)
2112                                 continue;
2113                         *mode = simple_strtoul(value,&rest,8);
2114                         if (*rest)
2115                                 goto bad_val;
2116                 } else if (!strcmp(this_char,"uid")) {
2117                         if (!uid)
2118                                 continue;
2119                         *uid = simple_strtoul(value,&rest,0);
2120                         if (*rest)
2121                                 goto bad_val;
2122                 } else if (!strcmp(this_char,"gid")) {
2123                         if (!gid)
2124                                 continue;
2125                         *gid = simple_strtoul(value,&rest,0);
2126                         if (*rest)
2127                                 goto bad_val;
2128                 } else if (!strcmp(this_char,"mpol")) {
2129                         if (shmem_parse_mpol(value,policy,policy_nodes))
2130                                 goto bad_val;
2131                 } else {
2132                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2133                                this_char);
2134                         return 1;
2135                 }
2136         }
2137         return 0;
2138
2139 bad_val:
2140         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2141                value, this_char);
2142         return 1;
2143
2144 }
2145
2146 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2147 {
2148         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2149         unsigned long max_blocks = sbinfo->max_blocks;
2150         unsigned long max_inodes = sbinfo->max_inodes;
2151         int policy = sbinfo->policy;
2152         nodemask_t policy_nodes = sbinfo->policy_nodes;
2153         unsigned long blocks;
2154         unsigned long inodes;
2155         int error = -EINVAL;
2156
2157         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2158                                 &max_inodes, &policy, &policy_nodes))
2159                 return error;
2160
2161         spin_lock(&sbinfo->stat_lock);
2162         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2163         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2164         if (max_blocks < blocks)
2165                 goto out;
2166         if (max_inodes < inodes)
2167                 goto out;
2168         /*
2169          * Those tests also disallow limited->unlimited while any are in
2170          * use, so i_blocks will always be zero when max_blocks is zero;
2171          * but we must separately disallow unlimited->limited, because
2172          * in that case we have no record of how much is already in use.
2173          */
2174         if (max_blocks && !sbinfo->max_blocks)
2175                 goto out;
2176         if (max_inodes && !sbinfo->max_inodes)
2177                 goto out;
2178
2179         error = 0;
2180         sbinfo->max_blocks  = max_blocks;
2181         sbinfo->free_blocks = max_blocks - blocks;
2182         sbinfo->max_inodes  = max_inodes;
2183         sbinfo->free_inodes = max_inodes - inodes;
2184         sbinfo->policy = policy;
2185         sbinfo->policy_nodes = policy_nodes;
2186 out:
2187         spin_unlock(&sbinfo->stat_lock);
2188         return error;
2189 }
2190 #endif
2191
2192 static void shmem_put_super(struct super_block *sb)
2193 {
2194         kfree(sb->s_fs_info);
2195         sb->s_fs_info = NULL;
2196 }
2197
2198 static int shmem_fill_super(struct super_block *sb,
2199                             void *data, int silent)
2200 {
2201         struct inode *inode;
2202         struct dentry *root;
2203         int mode   = S_IRWXUGO | S_ISVTX;
2204         uid_t uid = current->fsuid;
2205         gid_t gid = current->fsgid;
2206         int err = -ENOMEM;
2207         struct shmem_sb_info *sbinfo;
2208         unsigned long blocks = 0;
2209         unsigned long inodes = 0;
2210         int policy = MPOL_DEFAULT;
2211         nodemask_t policy_nodes = node_online_map;
2212
2213 #ifdef CONFIG_TMPFS
2214         /*
2215          * Per default we only allow half of the physical ram per
2216          * tmpfs instance, limiting inodes to one per page of lowmem;
2217          * but the internal instance is left unlimited.
2218          */
2219         if (!(sb->s_flags & MS_NOUSER)) {
2220                 blocks = totalram_pages / 2;
2221                 inodes = totalram_pages - totalhigh_pages;
2222                 if (inodes > blocks)
2223                         inodes = blocks;
2224                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2225                                         &inodes, &policy, &policy_nodes))
2226                         return -EINVAL;
2227         }
2228         sb->s_export_op = &shmem_export_ops;
2229 #else
2230         sb->s_flags |= MS_NOUSER;
2231 #endif
2232
2233         /* Round up to L1_CACHE_BYTES to resist false sharing */
2234         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2235                                 L1_CACHE_BYTES), GFP_KERNEL);
2236         if (!sbinfo)
2237                 return -ENOMEM;
2238
2239         spin_lock_init(&sbinfo->stat_lock);
2240         sbinfo->max_blocks = blocks;
2241         sbinfo->free_blocks = blocks;
2242         sbinfo->max_inodes = inodes;
2243         sbinfo->free_inodes = inodes;
2244         sbinfo->policy = policy;
2245         sbinfo->policy_nodes = policy_nodes;
2246
2247         sb->s_fs_info = sbinfo;
2248         sb->s_maxbytes = SHMEM_MAX_BYTES;
2249         sb->s_blocksize = PAGE_CACHE_SIZE;
2250         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2251         sb->s_magic = TMPFS_MAGIC;
2252         sb->s_op = &shmem_ops;
2253         sb->s_time_gran = 1;
2254 #ifdef CONFIG_TMPFS_POSIX_ACL
2255         sb->s_xattr = shmem_xattr_handlers;
2256         sb->s_flags |= MS_POSIXACL;
2257 #endif
2258
2259         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2260         if (!inode)
2261                 goto failed;
2262         inode->i_uid = uid;
2263         inode->i_gid = gid;
2264         root = d_alloc_root(inode);
2265         if (!root)
2266                 goto failed_iput;
2267         sb->s_root = root;
2268         return 0;
2269
2270 failed_iput:
2271         iput(inode);
2272 failed:
2273         shmem_put_super(sb);
2274         return err;
2275 }
2276
2277 static struct kmem_cache *shmem_inode_cachep;
2278
2279 static struct inode *shmem_alloc_inode(struct super_block *sb)
2280 {
2281         struct shmem_inode_info *p;
2282         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2283         if (!p)
2284                 return NULL;
2285         return &p->vfs_inode;
2286 }
2287
2288 static void shmem_destroy_inode(struct inode *inode)
2289 {
2290         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2291                 /* only struct inode is valid if it's an inline symlink */
2292                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2293         }
2294         shmem_acl_destroy_inode(inode);
2295         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2296 }
2297
2298 static void init_once(void *foo, struct kmem_cache *cachep,
2299                       unsigned long flags)
2300 {
2301         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2302
2303         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2304             SLAB_CTOR_CONSTRUCTOR) {
2305                 inode_init_once(&p->vfs_inode);
2306 #ifdef CONFIG_TMPFS_POSIX_ACL
2307                 p->i_acl = NULL;
2308                 p->i_default_acl = NULL;
2309 #endif
2310         }
2311 }
2312
2313 static int init_inodecache(void)
2314 {
2315         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2316                                 sizeof(struct shmem_inode_info),
2317                                 0, 0, init_once, NULL);
2318         if (shmem_inode_cachep == NULL)
2319                 return -ENOMEM;
2320         return 0;
2321 }
2322
2323 static void destroy_inodecache(void)
2324 {
2325         kmem_cache_destroy(shmem_inode_cachep);
2326 }
2327
2328 static const struct address_space_operations shmem_aops = {
2329         .writepage      = shmem_writepage,
2330         .set_page_dirty = __set_page_dirty_no_writeback,
2331 #ifdef CONFIG_TMPFS
2332         .prepare_write  = shmem_prepare_write,
2333         .commit_write   = simple_commit_write,
2334 #endif
2335         .migratepage    = migrate_page,
2336 };
2337
2338 static const struct file_operations shmem_file_operations = {
2339         .mmap           = shmem_mmap,
2340 #ifdef CONFIG_TMPFS
2341         .llseek         = generic_file_llseek,
2342         .read           = shmem_file_read,
2343         .write          = shmem_file_write,
2344         .fsync          = simple_sync_file,
2345         .sendfile       = shmem_file_sendfile,
2346 #endif
2347 };
2348
2349 static const struct inode_operations shmem_inode_operations = {
2350         .truncate       = shmem_truncate,
2351         .setattr        = shmem_notify_change,
2352         .truncate_range = shmem_truncate_range,
2353 #ifdef CONFIG_TMPFS_POSIX_ACL
2354         .setxattr       = generic_setxattr,
2355         .getxattr       = generic_getxattr,
2356         .listxattr      = generic_listxattr,
2357         .removexattr    = generic_removexattr,
2358         .permission     = shmem_permission,
2359 #endif
2360
2361 };
2362
2363 static const struct inode_operations shmem_dir_inode_operations = {
2364 #ifdef CONFIG_TMPFS
2365         .create         = shmem_create,
2366         .lookup         = simple_lookup,
2367         .link           = shmem_link,
2368         .unlink         = shmem_unlink,
2369         .symlink        = shmem_symlink,
2370         .mkdir          = shmem_mkdir,
2371         .rmdir          = shmem_rmdir,
2372         .mknod          = shmem_mknod,
2373         .rename         = shmem_rename,
2374 #endif
2375 #ifdef CONFIG_TMPFS_POSIX_ACL
2376         .setattr        = shmem_notify_change,
2377         .setxattr       = generic_setxattr,
2378         .getxattr       = generic_getxattr,
2379         .listxattr      = generic_listxattr,
2380         .removexattr    = generic_removexattr,
2381         .permission     = shmem_permission,
2382 #endif
2383 };
2384
2385 static const struct inode_operations shmem_special_inode_operations = {
2386 #ifdef CONFIG_TMPFS_POSIX_ACL
2387         .setattr        = shmem_notify_change,
2388         .setxattr       = generic_setxattr,
2389         .getxattr       = generic_getxattr,
2390         .listxattr      = generic_listxattr,
2391         .removexattr    = generic_removexattr,
2392         .permission     = shmem_permission,
2393 #endif
2394 };
2395
2396 static const struct super_operations shmem_ops = {
2397         .alloc_inode    = shmem_alloc_inode,
2398         .destroy_inode  = shmem_destroy_inode,
2399 #ifdef CONFIG_TMPFS
2400         .statfs         = shmem_statfs,
2401         .remount_fs     = shmem_remount_fs,
2402 #endif
2403         .delete_inode   = shmem_delete_inode,
2404         .drop_inode     = generic_delete_inode,
2405         .put_super      = shmem_put_super,
2406 };
2407
2408 static struct vm_operations_struct shmem_vm_ops = {
2409         .nopage         = shmem_nopage,
2410         .populate       = shmem_populate,
2411 #ifdef CONFIG_NUMA
2412         .set_policy     = shmem_set_policy,
2413         .get_policy     = shmem_get_policy,
2414 #endif
2415 };
2416
2417
2418 static int shmem_get_sb(struct file_system_type *fs_type,
2419         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2420 {
2421         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2422 }
2423
2424 static struct file_system_type tmpfs_fs_type = {
2425         .owner          = THIS_MODULE,
2426         .name           = "tmpfs",
2427         .get_sb         = shmem_get_sb,
2428         .kill_sb        = kill_litter_super,
2429 };
2430 static struct vfsmount *shm_mnt;
2431
2432 static int __init init_tmpfs(void)
2433 {
2434         int error;
2435
2436         error = init_inodecache();
2437         if (error)
2438                 goto out3;
2439
2440         error = register_filesystem(&tmpfs_fs_type);
2441         if (error) {
2442                 printk(KERN_ERR "Could not register tmpfs\n");
2443                 goto out2;
2444         }
2445
2446         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2447                                 tmpfs_fs_type.name, NULL);
2448         if (IS_ERR(shm_mnt)) {
2449                 error = PTR_ERR(shm_mnt);
2450                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2451                 goto out1;
2452         }
2453         return 0;
2454
2455 out1:
2456         unregister_filesystem(&tmpfs_fs_type);
2457 out2:
2458         destroy_inodecache();
2459 out3:
2460         shm_mnt = ERR_PTR(error);
2461         return error;
2462 }
2463 module_init(init_tmpfs)
2464
2465 /*
2466  * shmem_file_setup - get an unlinked file living in tmpfs
2467  *
2468  * @name: name for dentry (to be seen in /proc/<pid>/maps
2469  * @size: size to be set for the file
2470  *
2471  */
2472 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2473 {
2474         int error;
2475         struct file *file;
2476         struct inode *inode;
2477         struct dentry *dentry, *root;
2478         struct qstr this;
2479
2480         if (IS_ERR(shm_mnt))
2481                 return (void *)shm_mnt;
2482
2483         if (size < 0 || size > SHMEM_MAX_BYTES)
2484                 return ERR_PTR(-EINVAL);
2485
2486         if (shmem_acct_size(flags, size))
2487                 return ERR_PTR(-ENOMEM);
2488
2489         error = -ENOMEM;
2490         this.name = name;
2491         this.len = strlen(name);
2492         this.hash = 0; /* will go */
2493         root = shm_mnt->mnt_root;
2494         dentry = d_alloc(root, &this);
2495         if (!dentry)
2496                 goto put_memory;
2497
2498         error = -ENFILE;
2499         file = get_empty_filp();
2500         if (!file)
2501                 goto put_dentry;
2502
2503         error = -ENOSPC;
2504         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2505         if (!inode)
2506                 goto close_file;
2507
2508         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2509         d_instantiate(dentry, inode);
2510         inode->i_size = size;
2511         inode->i_nlink = 0;     /* It is unlinked */
2512         file->f_path.mnt = mntget(shm_mnt);
2513         file->f_path.dentry = dentry;
2514         file->f_mapping = inode->i_mapping;
2515         file->f_op = &shmem_file_operations;
2516         file->f_mode = FMODE_WRITE | FMODE_READ;
2517         return file;
2518
2519 close_file:
2520         put_filp(file);
2521 put_dentry:
2522         dput(dentry);
2523 put_memory:
2524         shmem_unacct_size(flags, size);
2525         return ERR_PTR(error);
2526 }
2527
2528 /*
2529  * shmem_zero_setup - setup a shared anonymous mapping
2530  *
2531  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2532  */
2533 int shmem_zero_setup(struct vm_area_struct *vma)
2534 {
2535         struct file *file;
2536         loff_t size = vma->vm_end - vma->vm_start;
2537
2538         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2539         if (IS_ERR(file))
2540                 return PTR_ERR(file);
2541
2542         if (vma->vm_file)
2543                 fput(vma->vm_file);
2544         vma->vm_file = file;
2545         vma->vm_ops = &shmem_vm_ops;
2546         return 0;
2547 }