bool need_unlock = false;
unsigned long uninitialized_var(flags);
- if (unlikely(!pc))
- return;
-
rcu_read_lock();
memcg = pc->mem_cgroup;
if (unlikely(!memcg || !PageCgroupUsed(pc)))
}
pc = lookup_page_cgroup(page);
- BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
-
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
if (ret || !memcg)
return ret;
* Check if our page_cgroup is valid
*/
pc = lookup_page_cgroup(page);
- if (unlikely(!pc || !PageCgroupUsed(pc)))
+ if (unlikely(!PageCgroupUsed(pc)))
return NULL;
lock_page_cgroup(pc);
struct page_cgroup *pc;
pc = lookup_page_cgroup(page);
+ /*
+ * Can be NULL while feeding pages into the page allocator for
+ * the first time, i.e. during boot or memory hotplug.
+ */
if (likely(pc) && PageCgroupUsed(pc))
return pc;
return NULL;