struct drm_i915_fence_reg *fence,
bool enable);
-static long i915_gem_inactive_count(struct shrinker *shrinker,
- struct shrink_control *sc);
-static long i915_gem_inactive_scan(struct shrinker *shrinker,
- struct shrink_control *sc);
+static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
#endif
}
-static long
+static unsigned long
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
bool unlock = true;
- long cnt;
+ unsigned long count;
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
unlock = false;
}
- cnt = 0;
+ count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
if (unlock)
mutex_unlock(&dev->struct_mutex);
- return cnt;
+ return count;
}
-static long
+
+static unsigned long
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
int nr_to_scan = sc->nr_to_scan;
- long freed;
+ unsigned long freed;
bool unlock = true;
if (!mutex_trylock(&dev->struct_mutex)) {
*
* This code is crying out for a shrinker per pool....
*/
-static long
+static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
- long freed = 0;
+ unsigned long freed = 0;
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
}
-static long
+static unsigned long
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
- long count = 0;
+ unsigned long count = 0;
for (i = 0; i < NUM_POOLS; ++i)
count += _manager->pools[i].npages;
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.count_objects = &ttm_pool_shrink_count;
- manager->mm_shrink.scan_objects = &ttm_pool_shrink_scan;
+ manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+ manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
return 0;
}
-static long bch_mca_scan(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long bch_mca_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct cache_set *c = container_of(shrink, struct cache_set, shrink);
struct btree *b, *t;
unsigned long i, nr = sc->nr_to_scan;
- long freed = 0;
+ unsigned long freed = 0;
if (c->shrinker_disabled)
return SHRINK_STOP;
return freed;
}
-static long bch_mca_count(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long bch_mca_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct cache_set *c = container_of(shrink, struct cache_set, shrink);
return freed;
}
-static long
+static unsigned long
dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c =
- container_of(shrink, struct dm_bufio_client, shrinker);
- long freed;
+ struct dm_bufio_client *c;
+ unsigned long freed;
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_IO)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
return freed;
}
-static long
+static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c =
- container_of(shrink, struct dm_bufio_client, shrinker);
- long count;
+ struct dm_bufio_client *c;
+ unsigned long count;
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_IO)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
dm_bufio_unlock(c);
return count;
-
}
/*
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
-static long
+static unsigned long
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
- long freed = 0;
+ unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
if (!(sc->gfp_mask & __GFP_FS))
return freed;
}
-static long
+static unsigned long
ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
/*
pr_info(x); \
} while (0)
-static long lowmem_count(struct shrinker *s, struct shrink_control *sc)
+static unsigned long lowmem_count(struct shrinker *s,
+ struct shrink_control *sc)
{
return global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_FILE);
}
-static long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
+static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
- int rem = 0;
+ unsigned long rem = 0;
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
rem += selected_tasksize;
}
- lowmem_print(4, "lowmem_scan %lu, %x, return %d\n",
+ lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
sc->nr_to_scan, sc->gfp_mask, rem);
rcu_read_unlock();
return rem;
* pageframes in use. FIXME POLICY: Probably the writeback should only occur
* if the eviction doesn't free enough pages.
*/
-static long scan_zcache_memory(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long scan_zcache_memory(struct shrinker *shrink,
+ struct shrink_control *sc)
{
static bool in_progress;
int nr_evict = 0;
int nr_writeback = 0;
struct page *page;
int file_pageframes_inuse, anon_pageframes_inuse;
- long freed = 0;
+ unsigned long freed = 0;
/* don't allow more than one eviction thread at a time */
if (in_progress)
return freed;
}
-static long count_zcache_memory(struct shrinker *shrink,
+static unsigned long count_zcache_memory(struct shrinker *shrink,
struct shrink_control *sc)
{
- int ret = -1;
+ long ret = -1;
/* resample: has changed, but maybe not all the way yet */
zcache_last_active_file_pageframes =