From 3d862f5c3a1dbc1bbf409cafcf0497a1d435bcdd Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 28 Jun 2013 09:52:15 +1000 Subject: [PATCH] list_lru: per-node list infrastructure fix After a while investigating, it seems to us that the imbalance we are seeing are due to a multi-node race already in tree (our guess). Although the WARN is useful to show us the race, BUG_ON is too much, since it seems the kernel should be fine going on after that. Signed-off-by: Glauber Costa Cc: Dave Chinner Signed-off-by: Andrew Morton --- mm/list_lru.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/list_lru.c b/mm/list_lru.c index f2d1d6e78eb6..1efe4ecc02b1 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -15,7 +15,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) struct list_lru_node *nlru = &lru->node[nid]; spin_lock(&nlru->lock); - BUG_ON(nlru->nr_items < 0); + WARN_ON_ONCE(nlru->nr_items < 0); if (list_empty(item)) { list_add_tail(item, &nlru->list); if (nlru->nr_items++ == 0) @@ -38,7 +38,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) list_del_init(item); if (--nlru->nr_items == 0) node_clear(nid, lru->active_nodes); - BUG_ON(nlru->nr_items < 0); + WARN_ON_ONCE(nlru->nr_items < 0); spin_unlock(&nlru->lock); return true; } @@ -56,7 +56,7 @@ unsigned long list_lru_count(struct list_lru *lru) struct list_lru_node *nlru = &lru->node[nid]; spin_lock(&nlru->lock); - BUG_ON(nlru->nr_items < 0); + WARN_ON_ONCE(nlru->nr_items < 0); count += nlru->nr_items; spin_unlock(&nlru->lock); } @@ -91,7 +91,7 @@ restart: case LRU_REMOVED: if (--nlru->nr_items == 0) node_clear(nid, lru->active_nodes); - BUG_ON(nlru->nr_items < 0); + WARN_ON_ONCE(nlru->nr_items < 0); isolated++; break; case LRU_ROTATE: -- 2.39.5