From: Andrew Morton Date: Fri, 7 Jun 2013 00:08:31 +0000 (+1000) Subject: drivers-convert-shrinkers-to-new-count-scan-api-fix X-Git-Tag: next-20130607~2^2~279 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=e68835ecf895aef6a65c28b42476a7ae0d425c89;p=karo-tx-linux.git drivers-convert-shrinkers-to-new-count-scan-api-fix fix warnings Cc: Dave Chinner Cc: Glauber Costa Signed-off-by: Andrew Morton --- diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 22a05564fbec..0b51b0820f3f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -53,10 +53,10 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable); -static long i915_gem_inactive_count(struct shrinker *shrinker, - struct shrink_control *sc); -static long i915_gem_inactive_scan(struct shrinker *shrinker, - struct shrink_control *sc); +static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, + struct shrink_control *sc); +static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, + struct shrink_control *sc); static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); @@ -4483,7 +4483,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) #endif } -static long +static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = @@ -4493,7 +4493,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj; bool unlock = true; - long cnt; + unsigned long count; if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_is_locked_by(&dev->struct_mutex, current)) @@ -4505,19 +4505,20 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) unlock = false; } - cnt = 0; + count = 0; list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) - cnt += obj->base.size >> PAGE_SHIFT; + count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) - cnt += obj->base.size >> PAGE_SHIFT; + count += obj->base.size >> PAGE_SHIFT; if (unlock) mutex_unlock(&dev->struct_mutex); - return cnt; + return count; } -static long + +static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = @@ -4526,7 +4527,7 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; int nr_to_scan = sc->nr_to_scan; - long freed; + unsigned long freed; bool unlock = true; if (!mutex_trylock(&dev->struct_mutex)) { diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 1746f30c6b63..863bef9f9234 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -388,7 +388,7 @@ out: * * This code is crying out for a shrinker per pool.... */ -static long +static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { static atomic_t start_pool = ATOMIC_INIT(0); @@ -396,7 +396,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) unsigned pool_offset = atomic_add_return(1, &start_pool); struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; - long freed = 0; + unsigned long freed = 0; pool_offset = pool_offset % NUM_POOLS; /* select start pool in round robin fashion */ @@ -412,11 +412,11 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) } -static long +static unsigned long ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned i; - long count = 0; + unsigned long count = 0; for (i = 0; i < NUM_POOLS; ++i) count += _manager->pools[i].npages; @@ -426,8 +426,8 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { - manager->mm_shrink.count_objects = &ttm_pool_shrink_count; - manager->mm_shrink.scan_objects = &ttm_pool_shrink_scan; + manager->mm_shrink.count_objects = ttm_pool_shrink_count; + manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; manager->mm_shrink.seeks = 1; register_shrinker(&manager->mm_shrink); } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index df3fa693a9bf..010ef3df80c9 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -598,12 +598,13 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) return 0; } -static long bch_mca_scan(struct shrinker *shrink, struct shrink_control *sc) +static unsigned long bch_mca_scan(struct shrinker *shrink, + struct shrink_control *sc) { struct cache_set *c = container_of(shrink, struct cache_set, shrink); struct btree *b, *t; unsigned long i, nr = sc->nr_to_scan; - long freed = 0; + unsigned long freed = 0; if (c->shrinker_disabled) return SHRINK_STOP; @@ -658,7 +659,8 @@ out: return freed; } -static long bch_mca_count(struct shrinker *shrink, struct shrink_control *sc) +static unsigned long bch_mca_count(struct shrinker *shrink, + struct shrink_control *sc) { struct cache_set *c = container_of(shrink, struct cache_set, shrink); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 2ef25c57f6ae..d5fdcf7c424f 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1419,13 +1419,13 @@ static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, return freed; } -static long +static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - struct dm_bufio_client *c = - container_of(shrink, struct dm_bufio_client, shrinker); - long freed; + struct dm_bufio_client *c; + unsigned long freed; + c = container_of(shrink, struct dm_bufio_client, shrinker); if (sc->gfp_mask & __GFP_IO) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) @@ -1436,13 +1436,13 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return freed; } -static long +static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { - struct dm_bufio_client *c = - container_of(shrink, struct dm_bufio_client, shrinker); - long count; + struct dm_bufio_client *c; + unsigned long count; + c = container_of(shrink, struct dm_bufio_client, shrinker); if (sc->gfp_mask & __GFP_IO) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) @@ -1451,7 +1451,6 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; dm_bufio_unlock(c); return count; - } /* diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 9b5186b800ec..8e76ddca0999 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -352,11 +352,11 @@ out: * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' * pages freed. */ -static long +static unsigned long ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { struct ashmem_range *range, *next; - long freed = 0; + unsigned long freed = 0; /* We might recurse into filesystem code, so bail out if necessary */ if (!(sc->gfp_mask & __GFP_FS)) @@ -381,7 +381,7 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return freed; } -static long +static unsigned long ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { /* diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index d23bfeabdcbd..6f094b37f1f1 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -66,7 +66,8 @@ static unsigned long lowmem_deathpending_timeout; pr_info(x); \ } while (0) -static long lowmem_count(struct shrinker *s, struct shrink_control *sc) +static unsigned long lowmem_count(struct shrinker *s, + struct shrink_control *sc) { return global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + @@ -74,11 +75,11 @@ static long lowmem_count(struct shrinker *s, struct shrink_control *sc) global_page_state(NR_INACTIVE_FILE); } -static long lowmem_scan(struct shrinker *s, struct shrink_control *sc) +static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; - int rem = 0; + unsigned long rem = 0; int tasksize; int i; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; @@ -163,7 +164,7 @@ static long lowmem_scan(struct shrinker *s, struct shrink_control *sc) rem += selected_tasksize; } - lowmem_print(4, "lowmem_scan %lu, %x, return %d\n", + lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n", sc->nr_to_scan, sc->gfp_mask, rem); rcu_read_unlock(); return rem; diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 4ade8e327be3..304a4b2a9a01 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1140,15 +1140,15 @@ static bool zcache_freeze; * pageframes in use. FIXME POLICY: Probably the writeback should only occur * if the eviction doesn't free enough pages. */ -static long scan_zcache_memory(struct shrinker *shrink, - struct shrink_control *sc) +static unsigned long scan_zcache_memory(struct shrinker *shrink, + struct shrink_control *sc) { static bool in_progress; int nr_evict = 0; int nr_writeback = 0; struct page *page; int file_pageframes_inuse, anon_pageframes_inuse; - long freed = 0; + unsigned long freed = 0; /* don't allow more than one eviction thread at a time */ if (in_progress) @@ -1200,10 +1200,10 @@ static long scan_zcache_memory(struct shrinker *shrink, return freed; } -static long count_zcache_memory(struct shrinker *shrink, +static unsigned long count_zcache_memory(struct shrinker *shrink, struct shrink_control *sc) { - int ret = -1; + long ret = -1; /* resample: has changed, but maybe not all the way yet */ zcache_last_active_file_pageframes =