]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 4 Jun 2016 17:51:29 +0000 (10:51 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 4 Jun 2016 17:51:29 +0000 (10:51 -0700)
Merge various fixes from Andrew Morton:
 "10 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm, page_alloc: recalculate the preferred zoneref if the context can ignore memory policies
  mm, page_alloc: reset zonelist iterator after resetting fair zone allocation policy
  mm, oom_reaper: do not use siglock in try_oom_reaper()
  mm, page_alloc: prevent infinite loop in buffered_rmqueue()
  checkpatch: reduce git commit description style false positives
  mm/z3fold.c: avoid modifying HEADLESS page and minor cleanup
  memcg: add RCU locking around css_for_each_descendant_pre() in memcg_offline_kmem()
  mm: check the return value of lookup_page_ext for all call sites
  kdump: fix dmesg gdbmacro to work with record based printk
  mm: fix overflow in vm_map_ram()

Documentation/kdump/gdbmacros.txt
include/linux/page_idle.h
mm/memcontrol.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_owner.c
mm/page_poison.c
mm/vmalloc.c
mm/vmstat.c
mm/z3fold.c
scripts/checkpatch.pl

index 35f6a982a0d5fdc040cfb722a071c8d707c53123..220d0a80ca2c9f45ce7f0705be1952c34df4f1dd 100644 (file)
@@ -170,21 +170,92 @@ document trapinfo
        address the kernel panicked.
 end
 
+define dump_log_idx
+       set $idx = $arg0
+       if ($argc > 1)
+               set $prev_flags = $arg1
+       else
+               set $prev_flags = 0
+       end
+       set $msg = ((struct printk_log *) (log_buf + $idx))
+       set $prefix = 1
+       set $newline = 1
+       set $log = log_buf + $idx + sizeof(*$msg)
 
-define dmesg
-       set $i = 0
-       set $end_idx = (log_end - 1) & (log_buf_len - 1)
+       # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
+       if (($prev_flags & 8) && !($msg->flags & 4))
+               set $prefix = 0
+       end
+
+       # msg->flags & LOG_CONT
+       if ($msg->flags & 8)
+               # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
+               if (($prev_flags & 8) && !($prev_flags & 2))
+                       set $prefix = 0
+               end
+               # (!(msg->flags & LOG_NEWLINE))
+               if (!($msg->flags & 2))
+                       set $newline = 0
+               end
+       end
+
+       if ($prefix)
+               printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
+       end
+       if ($msg->text_len != 0)
+               eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
+       end
+       if ($newline)
+               printf "\n"
+       end
+       if ($msg->dict_len > 0)
+               set $dict = $log + $msg->text_len
+               set $idx = 0
+               set $line = 1
+               while ($idx < $msg->dict_len)
+                       if ($line)
+                               printf " "
+                               set $line = 0
+                       end
+                       set $c = $dict[$idx]
+                       if ($c == '\0')
+                               printf "\n"
+                               set $line = 1
+                       else
+                               if ($c < ' ' || $c >= 127 || $c == '\\')
+                                       printf "\\x%02x", $c
+                               else
+                                       printf "%c", $c
+                               end
+                       end
+                       set $idx = $idx + 1
+               end
+               printf "\n"
+       end
+end
+document dump_log_idx
+       Dump a single log given its index in the log buffer.  The first
+       parameter is the index into log_buf, the second is optional and
+       specified the previous log buffer's flags, used for properly
+       formatting continued lines.
+end
 
-       while ($i < logged_chars)
-               set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
+define dmesg
+       set $i = log_first_idx
+       set $end_idx = log_first_idx
+       set $prev_flags = 0
 
-               if ($idx + 100 <= $end_idx) || \
-                  ($end_idx <= $idx && $idx + 100 < log_buf_len)
-                       printf "%.100s", &log_buf[$idx]
-                       set $i = $i + 100
+       while (1)
+               set $msg = ((struct printk_log *) (log_buf + $i))
+               if ($msg->len == 0)
+                       set $i = 0
                else
-                       printf "%c", log_buf[$idx]
-                       set $i = $i + 1
+                       dump_log_idx $i $prev_flags
+                       set $i = $i + $msg->len
+                       set $prev_flags = $msg->flags
+               end
+               if ($i == $end_idx)
+                       loop_break
                end
        end
 end
index bf268fa92c5b34a1bffce97a8cd38acf2b161059..fec40271339f8044aa1e3f1743e62017f146f9cc 100644 (file)
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
 
 static inline bool page_is_young(struct page *page)
 {
-       return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline void set_page_young(struct page *page)
 {
-       set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool test_and_clear_page_young(struct page *page)
 {
-       return test_and_clear_bit(PAGE_EXT_YOUNG,
-                                 &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool page_is_idle(struct page *page)
 {
-       return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void set_page_idle(struct page *page)
 {
-       set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       set_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void clear_page_idle(struct page *page)
 {
-       clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 #endif /* CONFIG_64BIT */
 
index 925b431f3f03cf86402240b5dcc3fa784d808882..58c69c94402a74498a488392eabeed15ddddd1ee 100644 (file)
@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
         * ordering is imposed by list_lru_node->lock taken by
         * memcg_drain_all_list_lrus().
         */
+       rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
        css_for_each_descendant_pre(css, &memcg->css) {
                child = mem_cgroup_from_css(css);
                BUG_ON(child->kmemcg_id != kmemcg_id);
@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
                if (!memcg->use_hierarchy)
                        break;
        }
+       rcu_read_unlock();
+
        memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
 
        memcg_free_cache_id(kmemcg_id);
index dfb1ab61fb2374379ca014ee4f764366dc20c081..acbc432d1a52778d746879d652fd4c2001075868 100644 (file)
@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk)
        if (atomic_read(&mm->mm_users) > 1) {
                rcu_read_lock();
                for_each_process(p) {
-                       bool exiting;
-
                        if (!process_shares_mm(p, mm))
                                continue;
                        if (fatal_signal_pending(p))
@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk)
                         * If the task is exiting make sure the whole thread group
                         * is exiting and cannot acces mm anymore.
                         */
-                       spin_lock_irq(&p->sighand->siglock);
-                       exiting = signal_group_exit(p->signal);
-                       spin_unlock_irq(&p->sighand->siglock);
-                       if (exiting)
+                       if (signal_group_exit(p->signal))
                                continue;
 
                        /* Give up */
index f8f3bfc435eec5bf0144798f6b890e6deacee0f6..6903b695ebaef81ef890f4b5e41c749d89dce2e1 100644 (file)
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        INIT_LIST_HEAD(&page->lru);
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        set_page_private(page, 0);
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                                page = list_last_entry(list, struct page, lru);
                        else
                                page = list_first_entry(list, struct page, lru);
-               } while (page && check_new_pcp(page));
 
-               __dec_zone_state(zone, NR_ALLOC_BATCH);
-               list_del(&page->lru);
-               pcp->count--;
+                       __dec_zone_state(zone, NR_ALLOC_BATCH);
+                       list_del(&page->lru);
+                       pcp->count--;
+
+               } while (check_new_pcp(page));
        } else {
                /*
                 * We most definitely don't want callers attempting to
@@ -3023,6 +3030,7 @@ reset_fair:
                apply_fair = false;
                fair_skipped = false;
                reset_alloc_batches(ac->preferred_zoneref->zone);
+               z = ac->preferred_zoneref;
                goto zonelist_scan;
        }
 
@@ -3596,6 +3604,17 @@ retry:
         */
        alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
+       /*
+        * Reset the zonelist iterators if memory policies can be ignored.
+        * These allocations are high priority and system rather than user
+        * orientated.
+        */
+       if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
+               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+               ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       }
+
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, order,
                                alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3623,6 @@ retry:
 
        /* Allocate without watermarks if the context allows */
        if (alloc_flags & ALLOC_NO_WATERMARKS) {
-               /*
-                * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
-                * the allocation is high priority and these type of
-                * allocations are system rather than user orientated
-                */
-               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
                page = get_page_from_freelist(gfp_mask, order,
                                                ALLOC_NO_WATERMARKS, ac);
                if (page)
@@ -3808,7 +3821,11 @@ retry_cpuset:
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
-       /* The preferred zone is used for statistics later */
+       /*
+        * The preferred zone is used for statistics but crucially it is
+        * also used as the starting point for the zonelist iterator. It
+        * may get reset for allocations that ignore memory policies.
+        */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
        if (!ac.preferred_zoneref) {
index 792b56da13d8564f4f6fbab68dcf46297bd13c30..c6cda3e36212833f78189c3a93b7dc80f8bc7ec9 100644 (file)
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
 
        for (i = 0; i < (1 << order); i++) {
                page_ext = lookup_page_ext(page + i);
+               if (unlikely(!page_ext))
+                       continue;
                __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
        }
 }
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+
        struct stack_trace trace = {
                .nr_entries = 0,
                .max_entries = ARRAY_SIZE(page_ext->trace_entries),
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
                .skip = 3,
        };
 
+       if (unlikely(!page_ext))
+               return;
+
        save_stack_trace(&trace);
 
        page_ext->order = order;
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
 void __set_page_owner_migrate_reason(struct page *page, int reason)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
 
        page_ext->last_migrate_reason = reason;
 }
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
 gfp_t __get_page_owner_gfp(struct page *page)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               /*
+                * The caller just returns 0 if no valid gfp
+                * So return 0 here too.
+                */
+               return 0;
 
        return page_ext->gfp_mask;
 }
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
        struct page_ext *new_ext = lookup_page_ext(newpage);
        int i;
 
+       if (unlikely(!old_ext || !new_ext))
+               return;
+
        new_ext->order = old_ext->order;
        new_ext->gfp_mask = old_ext->gfp_mask;
        new_ext->nr_entries = old_ext->nr_entries;
@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page)
        gfp_t gfp_mask = page_ext->gfp_mask;
        int mt = gfpflags_to_migratetype(gfp_mask);
 
+       if (unlikely(!page_ext)) {
+               pr_alert("There is not page extension available.\n");
+               return;
+       }
+
        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
                pr_alert("page_owner info is not active (free page?)\n");
                return;
@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                }
 
                page_ext = lookup_page_ext(page);
+               if (unlikely(!page_ext))
+                       continue;
 
                /*
                 * Some pages could be missed by concurrent allocation or free,
@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                                continue;
 
                        page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
 
                        /* Maybe overraping zone */
                        if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
index 1eae5fad2446b4b7b069fde3d0e5b373418ea7bb..2e647c65916b91b00177837370e210d71c568f5e 100644 (file)
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
-       if (!page_ext)
+       if (unlikely(!page_ext))
                return false;
 
        return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
index cf7ad1a53be03b866ecee6325c47abef278d70fc..e11475cdeb7adb66194d8fd985fcbe155be806f3 100644 (file)
@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  */
 void vm_unmap_ram(const void *mem, unsigned int count)
 {
-       unsigned long size = count << PAGE_SHIFT;
+       unsigned long size = (unsigned long)count << PAGE_SHIFT;
        unsigned long addr = (unsigned long)mem;
 
        BUG_ON(!addr);
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
  */
 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 {
-       unsigned long size = count << PAGE_SHIFT;
+       unsigned long size = (unsigned long)count << PAGE_SHIFT;
        unsigned long addr;
        void *mem;
 
@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count,
                unsigned long flags, pgprot_t prot)
 {
        struct vm_struct *area;
+       unsigned long size;             /* In bytes */
 
        might_sleep();
 
        if (count > totalram_pages)
                return NULL;
 
-       area = get_vm_area_caller((count << PAGE_SHIFT), flags,
-                                       __builtin_return_address(0));
+       size = (unsigned long)count << PAGE_SHIFT;
+       area = get_vm_area_caller(size, flags, __builtin_return_address(0));
        if (!area)
                return NULL;
 
index 77e42ef388c2a832169d4bcace8a505c45d95516..cb2a67bb41581de147427e2289190beea33f69da 100644 (file)
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                                continue;
 
                        page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
 
                        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
                                continue;
index 34917d55d311222d4255b4d2e1185e01fa173d73..8f9e89ca1d312152711b23580cacf006407dff8b 100644 (file)
@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                /* HEADLESS page stored */
                bud = HEADLESS;
        } else {
-               bud = (handle - zhdr->first_num) & BUDDY_MASK;
+               bud = handle_to_buddy(handle);
 
                switch (bud) {
                case FIRST:
@@ -572,15 +572,19 @@ next:
                        pool->pages_nr--;
                        spin_unlock(&pool->lock);
                        return 0;
-               } else if (zhdr->first_chunks != 0 &&
-                          zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) {
-                       /* Full, add to buddied list */
-                       list_add(&zhdr->buddy, &pool->buddied);
-               } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
-                       z3fold_compact_page(zhdr);
-                       /* add to unbuddied list */
-                       freechunks = num_free_chunks(zhdr);
-                       list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+               }  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
+                       if (zhdr->first_chunks != 0 &&
+                           zhdr->last_chunks != 0 &&
+                           zhdr->middle_chunks != 0) {
+                               /* Full, add to buddied list */
+                               list_add(&zhdr->buddy, &pool->buddied);
+                       } else {
+                               z3fold_compact_page(zhdr);
+                               /* add to unbuddied list */
+                               freechunks = num_free_chunks(zhdr);
+                               list_add(&zhdr->buddy,
+                                        &pool->unbuddied[freechunks]);
+                       }
                }
 
                /* add to beginning of LRU */
index 6750595bd7b819883b5b4ae8dde3fdfacf0f37fb..4904ced676d40289356aa3358f894fa7efa4b5c0 100755 (executable)
@@ -2454,6 +2454,7 @@ sub process {
 
 # Check for git id commit length and improperly formed commit descriptions
                if ($in_commit_log && !$commit_log_possible_stack_dump &&
+                   $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
                    ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
                     ($line =~ /\b[0-9a-f]{12,40}\b/i &&
                      $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&