]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/z3fold.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[karo-tx-linux.git] / mm / z3fold.c
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/preempt.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/zpool.h>
33
34 /*****************
35  * Structures
36 *****************/
37 /*
38  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
39  * adjusting internal fragmentation.  It also determines the number of
40  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
41  * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
42  * in allocated page is occupied by z3fold header, NCHUNKS will be calculated
43  * to 63 which shows the max number of free chunks in z3fold page, also there
44  * will be 63 freelists per pool.
45  */
46 #define NCHUNKS_ORDER   6
47
48 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
49 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
50 #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
51 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
52
53 #define BUDDY_MASK      (0x3)
54
55 struct z3fold_pool;
56 struct z3fold_ops {
57         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
58 };
59
60 /**
61  * struct z3fold_pool - stores metadata for each z3fold pool
62  * @lock:       protects all pool fields and first|last_chunk fields of any
63  *              z3fold page in the pool
64  * @unbuddied:  array of lists tracking z3fold pages that contain 2- buddies;
65  *              the lists each z3fold page is added to depends on the size of
66  *              its free region.
67  * @buddied:    list tracking the z3fold pages that contain 3 buddies;
68  *              these z3fold pages are full
69  * @lru:        list tracking the z3fold pages in LRU order by most recently
70  *              added buddy.
71  * @pages_nr:   number of z3fold pages in the pool.
72  * @ops:        pointer to a structure of user defined operations specified at
73  *              pool creation time.
74  *
75  * This structure is allocated at pool creation time and maintains metadata
76  * pertaining to a particular z3fold pool.
77  */
78 struct z3fold_pool {
79         spinlock_t lock;
80         struct list_head unbuddied[NCHUNKS];
81         struct list_head buddied;
82         struct list_head lru;
83         u64 pages_nr;
84         const struct z3fold_ops *ops;
85         struct zpool *zpool;
86         const struct zpool_ops *zpool_ops;
87 };
88
89 enum buddy {
90         HEADLESS = 0,
91         FIRST,
92         MIDDLE,
93         LAST,
94         BUDDIES_MAX
95 };
96
97 /*
98  * struct z3fold_header - z3fold page metadata occupying the first chunk of each
99  *                      z3fold page, except for HEADLESS pages
100  * @buddy:      links the z3fold page into the relevant list in the pool
101  * @first_chunks:       the size of the first buddy in chunks, 0 if free
102  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
103  * @last_chunks:        the size of the last buddy in chunks, 0 if free
104  * @first_num:          the starting number (for the first handle)
105  */
106 struct z3fold_header {
107         struct list_head buddy;
108         unsigned short first_chunks;
109         unsigned short middle_chunks;
110         unsigned short last_chunks;
111         unsigned short start_middle;
112         unsigned short first_num:2;
113 };
114
115 /*
116  * Internal z3fold page flags
117  */
118 enum z3fold_page_flags {
119         UNDER_RECLAIM = 0,
120         PAGE_HEADLESS,
121         MIDDLE_CHUNK_MAPPED,
122 };
123
124 /*****************
125  * Helpers
126 *****************/
127
128 /* Converts an allocation size in bytes to size in z3fold chunks */
129 static int size_to_chunks(size_t size)
130 {
131         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
132 }
133
134 #define for_each_unbuddied_list(_iter, _begin) \
135         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
136
137 /* Initializes the z3fold header of a newly allocated z3fold page */
138 static struct z3fold_header *init_z3fold_page(struct page *page)
139 {
140         struct z3fold_header *zhdr = page_address(page);
141
142         INIT_LIST_HEAD(&page->lru);
143         clear_bit(UNDER_RECLAIM, &page->private);
144         clear_bit(PAGE_HEADLESS, &page->private);
145         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
146
147         zhdr->first_chunks = 0;
148         zhdr->middle_chunks = 0;
149         zhdr->last_chunks = 0;
150         zhdr->first_num = 0;
151         zhdr->start_middle = 0;
152         INIT_LIST_HEAD(&zhdr->buddy);
153         return zhdr;
154 }
155
156 /* Resets the struct page fields and frees the page */
157 static void free_z3fold_page(struct z3fold_header *zhdr)
158 {
159         __free_page(virt_to_page(zhdr));
160 }
161
162 /*
163  * Encodes the handle of a particular buddy within a z3fold page
164  * Pool lock should be held as this function accesses first_num
165  */
166 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
167 {
168         unsigned long handle;
169
170         handle = (unsigned long)zhdr;
171         if (bud != HEADLESS)
172                 handle += (bud + zhdr->first_num) & BUDDY_MASK;
173         return handle;
174 }
175
176 /* Returns the z3fold page where a given handle is stored */
177 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
178 {
179         return (struct z3fold_header *)(handle & PAGE_MASK);
180 }
181
182 /*
183  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
184  *  but that doesn't matter. because the masking will result in the
185  *  correct buddy number.
186  */
187 static enum buddy handle_to_buddy(unsigned long handle)
188 {
189         struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
190         return (handle - zhdr->first_num) & BUDDY_MASK;
191 }
192
193 /*
194  * Returns the number of free chunks in a z3fold page.
195  * NB: can't be used with HEADLESS pages.
196  */
197 static int num_free_chunks(struct z3fold_header *zhdr)
198 {
199         int nfree;
200         /*
201          * If there is a middle object, pick up the bigger free space
202          * either before or after it. Otherwise just subtract the number
203          * of chunks occupied by the first and the last objects.
204          */
205         if (zhdr->middle_chunks != 0) {
206                 int nfree_before = zhdr->first_chunks ?
207                         0 : zhdr->start_middle - 1;
208                 int nfree_after = zhdr->last_chunks ?
209                         0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;
210                 nfree = max(nfree_before, nfree_after);
211         } else
212                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
213         return nfree;
214 }
215
216 /*****************
217  * API Functions
218 *****************/
219 /**
220  * z3fold_create_pool() - create a new z3fold pool
221  * @gfp:        gfp flags when allocating the z3fold pool structure
222  * @ops:        user-defined operations for the z3fold pool
223  *
224  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
225  * failed.
226  */
227 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
228                 const struct z3fold_ops *ops)
229 {
230         struct z3fold_pool *pool;
231         int i;
232
233         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
234         if (!pool)
235                 return NULL;
236         spin_lock_init(&pool->lock);
237         for_each_unbuddied_list(i, 0)
238                 INIT_LIST_HEAD(&pool->unbuddied[i]);
239         INIT_LIST_HEAD(&pool->buddied);
240         INIT_LIST_HEAD(&pool->lru);
241         pool->pages_nr = 0;
242         pool->ops = ops;
243         return pool;
244 }
245
246 /**
247  * z3fold_destroy_pool() - destroys an existing z3fold pool
248  * @pool:       the z3fold pool to be destroyed
249  *
250  * The pool should be emptied before this function is called.
251  */
252 static void z3fold_destroy_pool(struct z3fold_pool *pool)
253 {
254         kfree(pool);
255 }
256
257 /* Has to be called with lock held */
258 static int z3fold_compact_page(struct z3fold_header *zhdr)
259 {
260         struct page *page = virt_to_page(zhdr);
261         void *beg = zhdr;
262
263
264         if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
265             zhdr->middle_chunks != 0 &&
266             zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
267                 memmove(beg + ZHDR_SIZE_ALIGNED,
268                         beg + (zhdr->start_middle << CHUNK_SHIFT),
269                         zhdr->middle_chunks << CHUNK_SHIFT);
270                 zhdr->first_chunks = zhdr->middle_chunks;
271                 zhdr->middle_chunks = 0;
272                 zhdr->start_middle = 0;
273                 zhdr->first_num++;
274                 return 1;
275         }
276         return 0;
277 }
278
279 /**
280  * z3fold_alloc() - allocates a region of a given size
281  * @pool:       z3fold pool from which to allocate
282  * @size:       size in bytes of the desired allocation
283  * @gfp:        gfp flags used if the pool needs to grow
284  * @handle:     handle of the new allocation
285  *
286  * This function will attempt to find a free region in the pool large enough to
287  * satisfy the allocation request.  A search of the unbuddied lists is
288  * performed first. If no suitable free region is found, then a new page is
289  * allocated and added to the pool to satisfy the request.
290  *
291  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
292  * as z3fold pool pages.
293  *
294  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
295  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
296  * a new page.
297  */
298 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
299                         unsigned long *handle)
300 {
301         int chunks = 0, i, freechunks;
302         struct z3fold_header *zhdr = NULL;
303         enum buddy bud;
304         struct page *page;
305
306         if (!size || (gfp & __GFP_HIGHMEM))
307                 return -EINVAL;
308
309         if (size > PAGE_SIZE)
310                 return -ENOSPC;
311
312         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
313                 bud = HEADLESS;
314         else {
315                 chunks = size_to_chunks(size);
316                 spin_lock(&pool->lock);
317
318                 /* First, try to find an unbuddied z3fold page. */
319                 zhdr = NULL;
320                 for_each_unbuddied_list(i, chunks) {
321                         if (!list_empty(&pool->unbuddied[i])) {
322                                 zhdr = list_first_entry(&pool->unbuddied[i],
323                                                 struct z3fold_header, buddy);
324                                 page = virt_to_page(zhdr);
325                                 if (zhdr->first_chunks == 0) {
326                                         if (zhdr->middle_chunks != 0 &&
327                                             chunks >= zhdr->start_middle)
328                                                 bud = LAST;
329                                         else
330                                                 bud = FIRST;
331                                 } else if (zhdr->last_chunks == 0)
332                                         bud = LAST;
333                                 else if (zhdr->middle_chunks == 0)
334                                         bud = MIDDLE;
335                                 else {
336                                         pr_err("No free chunks in unbuddied\n");
337                                         WARN_ON(1);
338                                         continue;
339                                 }
340                                 list_del(&zhdr->buddy);
341                                 goto found;
342                         }
343                 }
344                 bud = FIRST;
345                 spin_unlock(&pool->lock);
346         }
347
348         /* Couldn't find unbuddied z3fold page, create new one */
349         page = alloc_page(gfp);
350         if (!page)
351                 return -ENOMEM;
352         spin_lock(&pool->lock);
353         pool->pages_nr++;
354         zhdr = init_z3fold_page(page);
355
356         if (bud == HEADLESS) {
357                 set_bit(PAGE_HEADLESS, &page->private);
358                 goto headless;
359         }
360
361 found:
362         if (bud == FIRST)
363                 zhdr->first_chunks = chunks;
364         else if (bud == LAST)
365                 zhdr->last_chunks = chunks;
366         else {
367                 zhdr->middle_chunks = chunks;
368                 zhdr->start_middle = zhdr->first_chunks + 1;
369         }
370
371         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
372                         zhdr->middle_chunks == 0) {
373                 /* Add to unbuddied list */
374                 freechunks = num_free_chunks(zhdr);
375                 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
376         } else {
377                 /* Add to buddied list */
378                 list_add(&zhdr->buddy, &pool->buddied);
379         }
380
381 headless:
382         /* Add/move z3fold page to beginning of LRU */
383         if (!list_empty(&page->lru))
384                 list_del(&page->lru);
385
386         list_add(&page->lru, &pool->lru);
387
388         *handle = encode_handle(zhdr, bud);
389         spin_unlock(&pool->lock);
390
391         return 0;
392 }
393
394 /**
395  * z3fold_free() - frees the allocation associated with the given handle
396  * @pool:       pool in which the allocation resided
397  * @handle:     handle associated with the allocation returned by z3fold_alloc()
398  *
399  * In the case that the z3fold page in which the allocation resides is under
400  * reclaim, as indicated by the PG_reclaim flag being set, this function
401  * only sets the first|last_chunks to 0.  The page is actually freed
402  * once both buddies are evicted (see z3fold_reclaim_page() below).
403  */
404 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
405 {
406         struct z3fold_header *zhdr;
407         int freechunks;
408         struct page *page;
409         enum buddy bud;
410
411         spin_lock(&pool->lock);
412         zhdr = handle_to_z3fold_header(handle);
413         page = virt_to_page(zhdr);
414
415         if (test_bit(PAGE_HEADLESS, &page->private)) {
416                 /* HEADLESS page stored */
417                 bud = HEADLESS;
418         } else {
419                 bud = handle_to_buddy(handle);
420
421                 switch (bud) {
422                 case FIRST:
423                         zhdr->first_chunks = 0;
424                         break;
425                 case MIDDLE:
426                         zhdr->middle_chunks = 0;
427                         zhdr->start_middle = 0;
428                         break;
429                 case LAST:
430                         zhdr->last_chunks = 0;
431                         break;
432                 default:
433                         pr_err("%s: unknown bud %d\n", __func__, bud);
434                         WARN_ON(1);
435                         spin_unlock(&pool->lock);
436                         return;
437                 }
438         }
439
440         if (test_bit(UNDER_RECLAIM, &page->private)) {
441                 /* z3fold page is under reclaim, reclaim will free */
442                 spin_unlock(&pool->lock);
443                 return;
444         }
445
446         if (bud != HEADLESS) {
447                 /* Remove from existing buddy list */
448                 list_del(&zhdr->buddy);
449         }
450
451         if (bud == HEADLESS ||
452             (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 &&
453                         zhdr->last_chunks == 0)) {
454                 /* z3fold page is empty, free */
455                 list_del(&page->lru);
456                 clear_bit(PAGE_HEADLESS, &page->private);
457                 free_z3fold_page(zhdr);
458                 pool->pages_nr--;
459         } else {
460                 z3fold_compact_page(zhdr);
461                 /* Add to the unbuddied list */
462                 freechunks = num_free_chunks(zhdr);
463                 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
464         }
465
466         spin_unlock(&pool->lock);
467 }
468
469 /**
470  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
471  * @pool:       pool from which a page will attempt to be evicted
472  * @retires:    number of pages on the LRU list for which eviction will
473  *              be attempted before failing
474  *
475  * z3fold reclaim is different from normal system reclaim in that it is done
476  * from the bottom, up. This is because only the bottom layer, z3fold, has
477  * information on how the allocations are organized within each z3fold page.
478  * This has the potential to create interesting locking situations between
479  * z3fold and the user, however.
480  *
481  * To avoid these, this is how z3fold_reclaim_page() should be called:
482
483  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
484  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
485  * call the user-defined eviction handler with the pool and handle as
486  * arguments.
487  *
488  * If the handle can not be evicted, the eviction handler should return
489  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
490  * appropriate list and try the next z3fold page on the LRU up to
491  * a user defined number of retries.
492  *
493  * If the handle is successfully evicted, the eviction handler should
494  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
495  * contains logic to delay freeing the page if the page is under reclaim,
496  * as indicated by the setting of the PG_reclaim flag on the underlying page.
497  *
498  * If all buddies in the z3fold page are successfully evicted, then the
499  * z3fold page can be freed.
500  *
501  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
502  * no pages to evict or an eviction handler is not registered, -EAGAIN if
503  * the retry limit was hit.
504  */
505 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
506 {
507         int i, ret = 0, freechunks;
508         struct z3fold_header *zhdr;
509         struct page *page;
510         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
511
512         spin_lock(&pool->lock);
513         if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
514                         retries == 0) {
515                 spin_unlock(&pool->lock);
516                 return -EINVAL;
517         }
518         for (i = 0; i < retries; i++) {
519                 page = list_last_entry(&pool->lru, struct page, lru);
520                 list_del(&page->lru);
521
522                 /* Protect z3fold page against free */
523                 set_bit(UNDER_RECLAIM, &page->private);
524                 zhdr = page_address(page);
525                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
526                         list_del(&zhdr->buddy);
527                         /*
528                          * We need encode the handles before unlocking, since
529                          * we can race with free that will set
530                          * (first|last)_chunks to 0
531                          */
532                         first_handle = 0;
533                         last_handle = 0;
534                         middle_handle = 0;
535                         if (zhdr->first_chunks)
536                                 first_handle = encode_handle(zhdr, FIRST);
537                         if (zhdr->middle_chunks)
538                                 middle_handle = encode_handle(zhdr, MIDDLE);
539                         if (zhdr->last_chunks)
540                                 last_handle = encode_handle(zhdr, LAST);
541                 } else {
542                         first_handle = encode_handle(zhdr, HEADLESS);
543                         last_handle = middle_handle = 0;
544                 }
545
546                 spin_unlock(&pool->lock);
547
548                 /* Issue the eviction callback(s) */
549                 if (middle_handle) {
550                         ret = pool->ops->evict(pool, middle_handle);
551                         if (ret)
552                                 goto next;
553                 }
554                 if (first_handle) {
555                         ret = pool->ops->evict(pool, first_handle);
556                         if (ret)
557                                 goto next;
558                 }
559                 if (last_handle) {
560                         ret = pool->ops->evict(pool, last_handle);
561                         if (ret)
562                                 goto next;
563                 }
564 next:
565                 spin_lock(&pool->lock);
566                 clear_bit(UNDER_RECLAIM, &page->private);
567                 if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
568                     (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
569                      zhdr->middle_chunks == 0)) {
570                         /*
571                          * All buddies are now free, free the z3fold page and
572                          * return success.
573                          */
574                         clear_bit(PAGE_HEADLESS, &page->private);
575                         free_z3fold_page(zhdr);
576                         pool->pages_nr--;
577                         spin_unlock(&pool->lock);
578                         return 0;
579                 }  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
580                         if (zhdr->first_chunks != 0 &&
581                             zhdr->last_chunks != 0 &&
582                             zhdr->middle_chunks != 0) {
583                                 /* Full, add to buddied list */
584                                 list_add(&zhdr->buddy, &pool->buddied);
585                         } else {
586                                 z3fold_compact_page(zhdr);
587                                 /* add to unbuddied list */
588                                 freechunks = num_free_chunks(zhdr);
589                                 list_add(&zhdr->buddy,
590                                          &pool->unbuddied[freechunks]);
591                         }
592                 }
593
594                 /* add to beginning of LRU */
595                 list_add(&page->lru, &pool->lru);
596         }
597         spin_unlock(&pool->lock);
598         return -EAGAIN;
599 }
600
601 /**
602  * z3fold_map() - maps the allocation associated with the given handle
603  * @pool:       pool in which the allocation resides
604  * @handle:     handle associated with the allocation to be mapped
605  *
606  * Extracts the buddy number from handle and constructs the pointer to the
607  * correct starting chunk within the page.
608  *
609  * Returns: a pointer to the mapped allocation
610  */
611 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
612 {
613         struct z3fold_header *zhdr;
614         struct page *page;
615         void *addr;
616         enum buddy buddy;
617
618         spin_lock(&pool->lock);
619         zhdr = handle_to_z3fold_header(handle);
620         addr = zhdr;
621         page = virt_to_page(zhdr);
622
623         if (test_bit(PAGE_HEADLESS, &page->private))
624                 goto out;
625
626         buddy = handle_to_buddy(handle);
627         switch (buddy) {
628         case FIRST:
629                 addr += ZHDR_SIZE_ALIGNED;
630                 break;
631         case MIDDLE:
632                 addr += zhdr->start_middle << CHUNK_SHIFT;
633                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
634                 break;
635         case LAST:
636                 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
637                 break;
638         default:
639                 pr_err("unknown buddy id %d\n", buddy);
640                 WARN_ON(1);
641                 addr = NULL;
642                 break;
643         }
644 out:
645         spin_unlock(&pool->lock);
646         return addr;
647 }
648
649 /**
650  * z3fold_unmap() - unmaps the allocation associated with the given handle
651  * @pool:       pool in which the allocation resides
652  * @handle:     handle associated with the allocation to be unmapped
653  */
654 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
655 {
656         struct z3fold_header *zhdr;
657         struct page *page;
658         enum buddy buddy;
659
660         spin_lock(&pool->lock);
661         zhdr = handle_to_z3fold_header(handle);
662         page = virt_to_page(zhdr);
663
664         if (test_bit(PAGE_HEADLESS, &page->private)) {
665                 spin_unlock(&pool->lock);
666                 return;
667         }
668
669         buddy = handle_to_buddy(handle);
670         if (buddy == MIDDLE)
671                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
672         spin_unlock(&pool->lock);
673 }
674
675 /**
676  * z3fold_get_pool_size() - gets the z3fold pool size in pages
677  * @pool:       pool whose size is being queried
678  *
679  * Returns: size in pages of the given pool.  The pool lock need not be
680  * taken to access pages_nr.
681  */
682 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
683 {
684         return pool->pages_nr;
685 }
686
687 /*****************
688  * zpool
689  ****************/
690
691 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
692 {
693         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
694                 return pool->zpool_ops->evict(pool->zpool, handle);
695         else
696                 return -ENOENT;
697 }
698
699 static const struct z3fold_ops z3fold_zpool_ops = {
700         .evict =        z3fold_zpool_evict
701 };
702
703 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
704                                const struct zpool_ops *zpool_ops,
705                                struct zpool *zpool)
706 {
707         struct z3fold_pool *pool;
708
709         pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
710         if (pool) {
711                 pool->zpool = zpool;
712                 pool->zpool_ops = zpool_ops;
713         }
714         return pool;
715 }
716
717 static void z3fold_zpool_destroy(void *pool)
718 {
719         z3fold_destroy_pool(pool);
720 }
721
722 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
723                         unsigned long *handle)
724 {
725         return z3fold_alloc(pool, size, gfp, handle);
726 }
727 static void z3fold_zpool_free(void *pool, unsigned long handle)
728 {
729         z3fold_free(pool, handle);
730 }
731
732 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
733                         unsigned int *reclaimed)
734 {
735         unsigned int total = 0;
736         int ret = -EINVAL;
737
738         while (total < pages) {
739                 ret = z3fold_reclaim_page(pool, 8);
740                 if (ret < 0)
741                         break;
742                 total++;
743         }
744
745         if (reclaimed)
746                 *reclaimed = total;
747
748         return ret;
749 }
750
751 static void *z3fold_zpool_map(void *pool, unsigned long handle,
752                         enum zpool_mapmode mm)
753 {
754         return z3fold_map(pool, handle);
755 }
756 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
757 {
758         z3fold_unmap(pool, handle);
759 }
760
761 static u64 z3fold_zpool_total_size(void *pool)
762 {
763         return z3fold_get_pool_size(pool) * PAGE_SIZE;
764 }
765
766 static struct zpool_driver z3fold_zpool_driver = {
767         .type =         "z3fold",
768         .owner =        THIS_MODULE,
769         .create =       z3fold_zpool_create,
770         .destroy =      z3fold_zpool_destroy,
771         .malloc =       z3fold_zpool_malloc,
772         .free =         z3fold_zpool_free,
773         .shrink =       z3fold_zpool_shrink,
774         .map =          z3fold_zpool_map,
775         .unmap =        z3fold_zpool_unmap,
776         .total_size =   z3fold_zpool_total_size,
777 };
778
779 MODULE_ALIAS("zpool-z3fold");
780
781 static int __init init_z3fold(void)
782 {
783         /* Make sure the z3fold header will fit in one chunk */
784         BUILD_BUG_ON(sizeof(struct z3fold_header) > ZHDR_SIZE_ALIGNED);
785         zpool_register_driver(&z3fold_zpool_driver);
786
787         return 0;
788 }
789
790 static void __exit exit_z3fold(void)
791 {
792         zpool_unregister_driver(&z3fold_zpool_driver);
793 }
794
795 module_init(init_z3fold);
796 module_exit(exit_z3fold);
797
798 MODULE_LICENSE("GPL");
799 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
800 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");