]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
tmpfs: interleave the starting node of /dev/shmem
authorNathan Zimmer <nzimmer@sgi.com>
Sat, 21 Jul 2012 00:54:35 +0000 (10:54 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Jul 2012 03:53:08 +0000 (13:53 +1000)
The tmpfs superblock grants an offset for each inode as they are created.
Each inode then uses that offset to provide a preferred first node for its
interleave in the newly provided shmem_interleave.

Signed-off-by: Nathan Zimmer <nzimmer@sgi.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/shmem_fs.h
mm/mempolicy.c
mm/shmem.c

index bd079a1b0fdca27cf896e86f7ffb212302f6be73..e932adb331f7ebe3bd8ac8d8be8fbd8a6250ca12 100644 (file)
@@ -238,6 +238,13 @@ struct vm_operations_struct {
         */
        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
                                        unsigned long addr);
+
+       /*
+        * If the policy is interleave allow the vma to suggest a node.
+        */
+       unsigned long (*interleave)(struct vm_area_struct *vma,
+                                       unsigned long addr);
+
        int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
                const nodemask_t *to, unsigned long flags);
 #endif
index bef2cf00b3be68f3d3ed69dd78f977e737ad84ce..6995556d5691cf7b019f671ec941d1871bdf0dd7 100644 (file)
@@ -17,6 +17,7 @@ struct shmem_inode_info {
                char            *symlink;       /* unswappable short symlink */
        };
        struct shared_policy    policy;         /* NUMA memory alloc policy */
+       unsigned long           node_offset;    /* bias for interleaved nodes */
        struct list_head        swaplist;       /* chain of maybes on swap */
        struct list_head        xattr_list;     /* list of shmem_xattr */
        struct inode            vfs_inode;
@@ -32,6 +33,8 @@ struct shmem_sb_info {
        kgid_t gid;                 /* Mount gid for root directory */
        umode_t mode;               /* Mount mode for root directory */
        struct mempolicy *mpol;     /* default memory policy for mappings */
+       unsigned long next_pref_node;
+                        /* next interleave bias to suggest for inodes */
 };
 
 static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
index bd92431d4c49a8e29f4b46d50d6d0e66c69098d5..b66a59def30d51d5f9b4e1aca3708fcce93e5adc 100644 (file)
@@ -1669,6 +1669,10 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
 {
        if (vma) {
                unsigned long off;
+               if (vma->vm_ops && vma->vm_ops->interleave) {
+                       off = vma->vm_ops->interleave(vma, addr);
+                       return offset_il_node(pol, vma, off);
+               }
 
                /*
                 * for small pages, there is no difference between
index deb2e8b8e23be5d26e28556bbb96b55782864b27..cee03c024cbd39d01de8aaad05aa46f90ea88e12 100644 (file)
@@ -931,6 +931,7 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
        pvma.vm_start = 0;
        pvma.vm_pgoff = index;
        pvma.vm_policy = spol;
+       pvma.vm_private_data = (void *) info->node_offset;
        if (pvma.vm_policy)
                pvma.vm_ops = &shmem_vm_ops;
        else
@@ -947,6 +948,7 @@ static struct page *shmem_alloc_page(gfp_t gfp,
        pvma.vm_start = 0;
        pvma.vm_pgoff = index;
        pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+       pvma.vm_private_data = (void *) info->node_offset;
        if (pvma.vm_policy)
                pvma.vm_ops = &shmem_vm_ops;
        else
@@ -1329,6 +1331,19 @@ static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
        index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
 }
+
+static unsigned long shmem_interleave(struct vm_area_struct *vma,
+                                       unsigned long addr)
+{
+       unsigned long offset;
+
+       /* Use the vm_files prefered node as the initial offset. */
+       offset = (unsigned long *) vma->vm_private_data;
+
+       offset += ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+
+       return offset;
+}
 #endif
 
 int shmem_lock(struct file *file, int lock, struct user_struct *user)
@@ -1401,6 +1416,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                        inode->i_fop = &shmem_file_operations;
                        mpol_shared_policy_init(&info->policy,
                                                 shmem_get_sbmpol(sbinfo));
+                       info->node_offset = ++(sbinfo->next_pref_node);
                        break;
                case S_IFDIR:
                        inc_nlink(inode);
@@ -2795,6 +2811,7 @@ static const struct super_operations shmem_ops = {
 static const struct vm_operations_struct shmem_vm_ops = {
        .fault          = shmem_fault,
 #ifdef CONFIG_NUMA
+       .interleave     = shmem_interleave,
        .set_policy     = shmem_set_policy,
        .get_policy     = shmem_get_policy,
 #endif