]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/memcontrol.c
Merge tag 'squashfs-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/pkl...
[karo-tx-linux.git] / mm / memcontrol.c
index 796820925de0b758c5eea87a4e3484de07e0e56f..f1a0ae6e11b86b3020c90d7241ba12d47d2bbaa8 100644 (file)
@@ -59,6 +59,7 @@
 #include <net/sock.h>
 #include <net/ip.h>
 #include <net/tcp_memcontrol.h>
+#include "slab.h"
 
 #include <asm/uaccess.h>
 
@@ -312,7 +313,7 @@ struct mem_cgroup {
 
        atomic_t        dead_count;
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
-       struct tcp_memcontrol tcp_mem;
+       struct cg_proto tcp_mem;
 #endif
 #if defined(CONFIG_MEMCG_KMEM)
        /* analogous to slab_common's slab_caches list. per-memcg */
@@ -574,13 +575,13 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
        if (!memcg || mem_cgroup_is_root(memcg))
                return NULL;
 
-       return &memcg->tcp_mem.cg_proto;
+       return &memcg->tcp_mem;
 }
 EXPORT_SYMBOL(tcp_proto_cgroup);
 
 static void disarm_sock_keys(struct mem_cgroup *memcg)
 {
-       if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+       if (!memcg_proto_activated(&memcg->tcp_mem))
                return;
        static_key_slow_dec(&memcg_socket_limit_enabled);
 }
@@ -2968,7 +2969,7 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
 
        VM_BUG_ON(p->is_root_cache);
        cachep = p->root_cache;
-       return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
+       return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
 }
 
 #ifdef CONFIG_SLABINFO
@@ -2997,21 +2998,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
        struct res_counter *fail_res;
        struct mem_cgroup *_memcg;
        int ret = 0;
-       bool may_oom;
 
        ret = res_counter_charge(&memcg->kmem, size, &fail_res);
        if (ret)
                return ret;
 
-       /*
-        * Conditions under which we can wait for the oom_killer. Those are
-        * the same conditions tested by the core page allocator
-        */
-       may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
-
        _memcg = memcg;
        ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
-                                     &_memcg, may_oom);
+                                     &_memcg, oom_gfp_allowed(gfp));
 
        if (ret == -EINTR)  {
                /*
@@ -3151,7 +3145,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
 {
        struct memcg_cache_params *cur_params = s->memcg_params;
 
-       VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
+       VM_BUG_ON(!is_root_cache(s));
 
        if (num_groups > memcg_limited_groups_array_size) {
                int i;
@@ -3412,7 +3406,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
        idx = memcg_cache_id(memcg);
 
        mutex_lock(&memcg_cache_mutex);
-       new_cachep = cachep->memcg_params->memcg_caches[idx];
+       new_cachep = cache_from_memcg_idx(cachep, idx);
        if (new_cachep) {
                css_put(&memcg->css);
                goto out;
@@ -3458,8 +3452,8 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
         * we'll take the set_limit_mutex to protect ourselves against this.
         */
        mutex_lock(&set_limit_mutex);
-       for (i = 0; i < memcg_limited_groups_array_size; i++) {
-               c = s->memcg_params->memcg_caches[i];
+       for_each_memcg_cache_index(i) {
+               c = cache_from_memcg_idx(s, i);
                if (!c)
                        continue;
 
@@ -3592,8 +3586,8 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
         * code updating memcg_caches will issue a write barrier to match this.
         */
        read_barrier_depends();
-       if (likely(cachep->memcg_params->memcg_caches[idx])) {
-               cachep = cachep->memcg_params->memcg_caches[idx];
+       if (likely(cache_from_memcg_idx(cachep, idx))) {
+               cachep = cache_from_memcg_idx(cachep, idx);
                goto out;
        }
 
@@ -5389,45 +5383,50 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
 static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
                                struct cftype *cft, struct seq_file *m)
 {
+       struct numa_stat {
+               const char *name;
+               unsigned int lru_mask;
+       };
+
+       static const struct numa_stat stats[] = {
+               { "total", LRU_ALL },
+               { "file", LRU_ALL_FILE },
+               { "anon", LRU_ALL_ANON },
+               { "unevictable", BIT(LRU_UNEVICTABLE) },
+       };
+       const struct numa_stat *stat;
        int nid;
-       unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
-       unsigned long node_nr;
+       unsigned long nr;
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
-       total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
-       seq_printf(m, "total=%lu", total_nr);
-       for_each_node_state(nid, N_MEMORY) {
-               node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
-               seq_printf(m, " N%d=%lu", nid, node_nr);
-       }
-       seq_putc(m, '\n');
-
-       file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
-       seq_printf(m, "file=%lu", file_nr);
-       for_each_node_state(nid, N_MEMORY) {
-               node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
-                               LRU_ALL_FILE);
-               seq_printf(m, " N%d=%lu", nid, node_nr);
-       }
-       seq_putc(m, '\n');
-
-       anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
-       seq_printf(m, "anon=%lu", anon_nr);
-       for_each_node_state(nid, N_MEMORY) {
-               node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
-                               LRU_ALL_ANON);
-               seq_printf(m, " N%d=%lu", nid, node_nr);
+       for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
+               nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
+               seq_printf(m, "%s=%lu", stat->name, nr);
+               for_each_node_state(nid, N_MEMORY) {
+                       nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
+                                                         stat->lru_mask);
+                       seq_printf(m, " N%d=%lu", nid, nr);
+               }
+               seq_putc(m, '\n');
+       }
+
+       for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
+               struct mem_cgroup *iter;
+
+               nr = 0;
+               for_each_mem_cgroup_tree(iter, memcg)
+                       nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
+               seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
+               for_each_node_state(nid, N_MEMORY) {
+                       nr = 0;
+                       for_each_mem_cgroup_tree(iter, memcg)
+                               nr += mem_cgroup_node_nr_lru_pages(
+                                       iter, nid, stat->lru_mask);
+                       seq_printf(m, " N%d=%lu", nid, nr);
+               }
+               seq_putc(m, '\n');
        }
-       seq_putc(m, '\n');
 
-       unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
-       seq_printf(m, "unevictable=%lu", unevictable_nr);
-       for_each_node_state(nid, N_MEMORY) {
-               node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
-                               BIT(LRU_UNEVICTABLE));
-               seq_printf(m, " N%d=%lu", nid, node_nr);
-       }
-       seq_putc(m, '\n');
        return 0;
 }
 #endif /* CONFIG_NUMA */
@@ -6606,10 +6605,10 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
        pte_t *pte;
        spinlock_t *ptl;
 
-       if (pmd_trans_huge_lock(pmd, vma) == 1) {
+       if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
                        mc.precharge += HPAGE_PMD_NR;
-               spin_unlock(&vma->vm_mm->page_table_lock);
+               spin_unlock(ptl);
                return 0;
        }
 
@@ -6798,9 +6797,9 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
         *    to be unlocked in __split_huge_page_splitting(), where the main
         *    part of thp split is not executed yet.
         */
-       if (pmd_trans_huge_lock(pmd, vma) == 1) {
+       if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                if (mc.precharge < HPAGE_PMD_NR) {
-                       spin_unlock(&vma->vm_mm->page_table_lock);
+                       spin_unlock(ptl);
                        return 0;
                }
                target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
@@ -6817,7 +6816,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
                        }
                        put_page(page);
                }
-               spin_unlock(&vma->vm_mm->page_table_lock);
+               spin_unlock(ptl);
                return 0;
        }