* latencies, so it's better to scan a minimum amount there as
* well.
*/
- if (current_is_kswapd())
+ if (current_is_kswapd() && mz->zone->all_unreclaimable)
force_scan = true;
if (!global_reclaim(sc))
force_scan = true;
};
struct mem_cgroup *memcg;
- if (global_reclaim(sc)) {
- struct mem_cgroup_zone mz = {
- .mem_cgroup = NULL,
- .zone = zone,
- };
-
- shrink_mem_cgroup_zone(priority, &mz, sc);
- return;
- }
-
memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
struct mem_cgroup_zone mz = {
* scanned it with decreasing priority levels until
* nr_to_reclaim had been reclaimed. This priority
* cycle is thus over after a single memcg.
+ *
+ * Direct reclaim and kswapd, on the other hand, have
+ * to scan all memory cgroups to fulfill the overall
+ * scan target for the zone.
*/
if (!global_reclaim(sc)) {
mem_cgroup_iter_break(root, memcg);
static void age_active_anon(struct zone *zone, struct scan_control *sc,
int priority)
{
- struct mem_cgroup_zone mz = {
- .mem_cgroup = NULL,
- .zone = zone,
- };
+ struct mem_cgroup *memcg;
- if (inactive_anon_is_low(&mz))
- shrink_active_list(SWAP_CLUSTER_MAX, &mz, sc, priority, 0);
+ if (!total_swap_pages)
+ return;
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ struct mem_cgroup_zone mz = {
+ .mem_cgroup = memcg,
+ .zone = zone,
+ };
+
+ if (inactive_anon_is_low(&mz))
+ shrink_active_list(SWAP_CLUSTER_MAX, &mz,
+ sc, priority, 0);
+
+ memcg = mem_cgroup_iter(NULL, memcg, NULL);
+ } while (memcg);
}
/*