Merge tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping
[sfrench/cifs-2.6.git] / mm / z3fold.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/preempt.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/zpool.h>
42 #include <linux/kmemleak.h>
43
44 /*
45  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46  * adjusting internal fragmentation.  It also determines the number of
47  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49  * in the beginning of an allocated page are occupied by z3fold header, so
50  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51  * which shows the max number of free chunks in z3fold page, also there will
52  * be 63, or 62, respectively, freelists per pool.
53  */
54 #define NCHUNKS_ORDER   6
55
56 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
57 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
58 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
61 #define NCHUNKS         (TOTAL_CHUNKS - ZHDR_CHUNKS)
62
63 #define BUDDY_MASK      (0x3)
64 #define BUDDY_SHIFT     2
65 #define SLOTS_ALIGN     (0x40)
66
67 /*****************
68  * Structures
69 *****************/
70 struct z3fold_pool;
71
72 enum buddy {
73         HEADLESS = 0,
74         FIRST,
75         MIDDLE,
76         LAST,
77         BUDDIES_MAX = LAST
78 };
79
80 struct z3fold_buddy_slots {
81         /*
82          * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83          * be enough slots to hold all possible variants
84          */
85         unsigned long slot[BUDDY_MASK + 1];
86         unsigned long pool; /* back link */
87         rwlock_t lock;
88 };
89 #define HANDLE_FLAG_MASK        (0x03)
90
91 /*
92  * struct z3fold_header - z3fold page metadata occupying first chunks of each
93  *                      z3fold page, except for HEADLESS pages
94  * @buddy:              links the z3fold page into the relevant list in the
95  *                      pool
96  * @page_lock:          per-page lock
97  * @refcount:           reference count for the z3fold page
98  * @work:               work_struct for page layout optimization
99  * @slots:              pointer to the structure holding buddy slots
100  * @pool:               pointer to the containing pool
101  * @cpu:                CPU which this page "belongs" to
102  * @first_chunks:       the size of the first buddy in chunks, 0 if free
103  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
104  * @last_chunks:        the size of the last buddy in chunks, 0 if free
105  * @first_num:          the starting number (for the first handle)
106  * @mapped_count:       the number of objects currently mapped
107  */
108 struct z3fold_header {
109         struct list_head buddy;
110         spinlock_t page_lock;
111         struct kref refcount;
112         struct work_struct work;
113         struct z3fold_buddy_slots *slots;
114         struct z3fold_pool *pool;
115         short cpu;
116         unsigned short first_chunks;
117         unsigned short middle_chunks;
118         unsigned short last_chunks;
119         unsigned short start_middle;
120         unsigned short first_num:2;
121         unsigned short mapped_count:2;
122         unsigned short foreign_handles:2;
123 };
124
125 /**
126  * struct z3fold_pool - stores metadata for each z3fold pool
127  * @name:       pool name
128  * @lock:       protects pool unbuddied lists
129  * @stale_lock: protects pool stale page list
130  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
131  *              buddies; the list each z3fold page is added to depends on
132  *              the size of its free region.
133  * @stale:      list of pages marked for freeing
134  * @pages_nr:   number of z3fold pages in the pool.
135  * @c_handle:   cache for z3fold_buddy_slots allocation
136  * @compact_wq: workqueue for page layout background optimization
137  * @release_wq: workqueue for safe page release
138  * @work:       work_struct for safe page release
139  *
140  * This structure is allocated at pool creation time and maintains metadata
141  * pertaining to a particular z3fold pool.
142  */
143 struct z3fold_pool {
144         const char *name;
145         spinlock_t lock;
146         spinlock_t stale_lock;
147         struct list_head *unbuddied;
148         struct list_head stale;
149         atomic64_t pages_nr;
150         struct kmem_cache *c_handle;
151         struct workqueue_struct *compact_wq;
152         struct workqueue_struct *release_wq;
153         struct work_struct work;
154 };
155
156 /*
157  * Internal z3fold page flags
158  */
159 enum z3fold_page_flags {
160         PAGE_HEADLESS = 0,
161         MIDDLE_CHUNK_MAPPED,
162         NEEDS_COMPACTING,
163         PAGE_STALE,
164         PAGE_CLAIMED, /* by either reclaim or free */
165         PAGE_MIGRATED, /* page is migrated and soon to be released */
166 };
167
168 /*
169  * handle flags, go under HANDLE_FLAG_MASK
170  */
171 enum z3fold_handle_flags {
172         HANDLES_NOFREE = 0,
173 };
174
175 /*
176  * Forward declarations
177  */
178 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
179 static void compact_page_work(struct work_struct *w);
180
181 /*****************
182  * Helpers
183 *****************/
184
185 /* Converts an allocation size in bytes to size in z3fold chunks */
186 static int size_to_chunks(size_t size)
187 {
188         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
189 }
190
191 #define for_each_unbuddied_list(_iter, _begin) \
192         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
193
194 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
195                                                         gfp_t gfp)
196 {
197         struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
198                                                              gfp);
199
200         if (slots) {
201                 /* It will be freed separately in free_handle(). */
202                 kmemleak_not_leak(slots);
203                 slots->pool = (unsigned long)pool;
204                 rwlock_init(&slots->lock);
205         }
206
207         return slots;
208 }
209
210 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
211 {
212         return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
213 }
214
215 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
216 {
217         return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
218 }
219
220 /* Lock a z3fold page */
221 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
222 {
223         spin_lock(&zhdr->page_lock);
224 }
225
226 /* Try to lock a z3fold page */
227 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
228 {
229         return spin_trylock(&zhdr->page_lock);
230 }
231
232 /* Unlock a z3fold page */
233 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
234 {
235         spin_unlock(&zhdr->page_lock);
236 }
237
238 /* return locked z3fold page if it's not headless */
239 static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
240 {
241         struct z3fold_buddy_slots *slots;
242         struct z3fold_header *zhdr;
243         int locked = 0;
244
245         if (!(handle & (1 << PAGE_HEADLESS))) {
246                 slots = handle_to_slots(handle);
247                 do {
248                         unsigned long addr;
249
250                         read_lock(&slots->lock);
251                         addr = *(unsigned long *)handle;
252                         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
253                         locked = z3fold_page_trylock(zhdr);
254                         read_unlock(&slots->lock);
255                         if (locked) {
256                                 struct page *page = virt_to_page(zhdr);
257
258                                 if (!test_bit(PAGE_MIGRATED, &page->private))
259                                         break;
260                                 z3fold_page_unlock(zhdr);
261                         }
262                         cpu_relax();
263                 } while (true);
264         } else {
265                 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
266         }
267
268         return zhdr;
269 }
270
271 static inline void put_z3fold_header(struct z3fold_header *zhdr)
272 {
273         struct page *page = virt_to_page(zhdr);
274
275         if (!test_bit(PAGE_HEADLESS, &page->private))
276                 z3fold_page_unlock(zhdr);
277 }
278
279 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
280 {
281         struct z3fold_buddy_slots *slots;
282         int i;
283         bool is_free;
284
285         if (WARN_ON(*(unsigned long *)handle == 0))
286                 return;
287
288         slots = handle_to_slots(handle);
289         write_lock(&slots->lock);
290         *(unsigned long *)handle = 0;
291
292         if (test_bit(HANDLES_NOFREE, &slots->pool)) {
293                 write_unlock(&slots->lock);
294                 return; /* simple case, nothing else to do */
295         }
296
297         if (zhdr->slots != slots)
298                 zhdr->foreign_handles--;
299
300         is_free = true;
301         for (i = 0; i <= BUDDY_MASK; i++) {
302                 if (slots->slot[i]) {
303                         is_free = false;
304                         break;
305                 }
306         }
307         write_unlock(&slots->lock);
308
309         if (is_free) {
310                 struct z3fold_pool *pool = slots_to_pool(slots);
311
312                 if (zhdr->slots == slots)
313                         zhdr->slots = NULL;
314                 kmem_cache_free(pool->c_handle, slots);
315         }
316 }
317
318 /* Initializes the z3fold header of a newly allocated z3fold page */
319 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
320                                         struct z3fold_pool *pool, gfp_t gfp)
321 {
322         struct z3fold_header *zhdr = page_address(page);
323         struct z3fold_buddy_slots *slots;
324
325         clear_bit(PAGE_HEADLESS, &page->private);
326         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
327         clear_bit(NEEDS_COMPACTING, &page->private);
328         clear_bit(PAGE_STALE, &page->private);
329         clear_bit(PAGE_CLAIMED, &page->private);
330         clear_bit(PAGE_MIGRATED, &page->private);
331         if (headless)
332                 return zhdr;
333
334         slots = alloc_slots(pool, gfp);
335         if (!slots)
336                 return NULL;
337
338         memset(zhdr, 0, sizeof(*zhdr));
339         spin_lock_init(&zhdr->page_lock);
340         kref_init(&zhdr->refcount);
341         zhdr->cpu = -1;
342         zhdr->slots = slots;
343         zhdr->pool = pool;
344         INIT_LIST_HEAD(&zhdr->buddy);
345         INIT_WORK(&zhdr->work, compact_page_work);
346         return zhdr;
347 }
348
349 /* Resets the struct page fields and frees the page */
350 static void free_z3fold_page(struct page *page, bool headless)
351 {
352         if (!headless) {
353                 lock_page(page);
354                 __ClearPageMovable(page);
355                 unlock_page(page);
356         }
357         __free_page(page);
358 }
359
360 /* Helper function to build the index */
361 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
362 {
363         return (bud + zhdr->first_num) & BUDDY_MASK;
364 }
365
366 /*
367  * Encodes the handle of a particular buddy within a z3fold page.
368  * Zhdr->page_lock should be held as this function accesses first_num
369  * if bud != HEADLESS.
370  */
371 static unsigned long __encode_handle(struct z3fold_header *zhdr,
372                                 struct z3fold_buddy_slots *slots,
373                                 enum buddy bud)
374 {
375         unsigned long h = (unsigned long)zhdr;
376         int idx = 0;
377
378         /*
379          * For a headless page, its handle is its pointer with the extra
380          * PAGE_HEADLESS bit set
381          */
382         if (bud == HEADLESS)
383                 return h | (1 << PAGE_HEADLESS);
384
385         /* otherwise, return pointer to encoded handle */
386         idx = __idx(zhdr, bud);
387         h += idx;
388         if (bud == LAST)
389                 h |= (zhdr->last_chunks << BUDDY_SHIFT);
390
391         write_lock(&slots->lock);
392         slots->slot[idx] = h;
393         write_unlock(&slots->lock);
394         return (unsigned long)&slots->slot[idx];
395 }
396
397 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
398 {
399         return __encode_handle(zhdr, zhdr->slots, bud);
400 }
401
402 /* only for LAST bud, returns zero otherwise */
403 static unsigned short handle_to_chunks(unsigned long handle)
404 {
405         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
406         unsigned long addr;
407
408         read_lock(&slots->lock);
409         addr = *(unsigned long *)handle;
410         read_unlock(&slots->lock);
411         return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
412 }
413
414 /*
415  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
416  *  but that doesn't matter. because the masking will result in the
417  *  correct buddy number.
418  */
419 static enum buddy handle_to_buddy(unsigned long handle)
420 {
421         struct z3fold_header *zhdr;
422         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
423         unsigned long addr;
424
425         read_lock(&slots->lock);
426         WARN_ON(handle & (1 << PAGE_HEADLESS));
427         addr = *(unsigned long *)handle;
428         read_unlock(&slots->lock);
429         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
430         return (addr - zhdr->first_num) & BUDDY_MASK;
431 }
432
433 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
434 {
435         return zhdr->pool;
436 }
437
438 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
439 {
440         struct page *page = virt_to_page(zhdr);
441         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
442
443         WARN_ON(!list_empty(&zhdr->buddy));
444         set_bit(PAGE_STALE, &page->private);
445         clear_bit(NEEDS_COMPACTING, &page->private);
446         spin_lock(&pool->lock);
447         spin_unlock(&pool->lock);
448
449         if (locked)
450                 z3fold_page_unlock(zhdr);
451
452         spin_lock(&pool->stale_lock);
453         list_add(&zhdr->buddy, &pool->stale);
454         queue_work(pool->release_wq, &pool->work);
455         spin_unlock(&pool->stale_lock);
456
457         atomic64_dec(&pool->pages_nr);
458 }
459
460 static void release_z3fold_page_locked(struct kref *ref)
461 {
462         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
463                                                 refcount);
464         WARN_ON(z3fold_page_trylock(zhdr));
465         __release_z3fold_page(zhdr, true);
466 }
467
468 static void release_z3fold_page_locked_list(struct kref *ref)
469 {
470         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
471                                                refcount);
472         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
473
474         spin_lock(&pool->lock);
475         list_del_init(&zhdr->buddy);
476         spin_unlock(&pool->lock);
477
478         WARN_ON(z3fold_page_trylock(zhdr));
479         __release_z3fold_page(zhdr, true);
480 }
481
482 static inline int put_z3fold_locked(struct z3fold_header *zhdr)
483 {
484         return kref_put(&zhdr->refcount, release_z3fold_page_locked);
485 }
486
487 static inline int put_z3fold_locked_list(struct z3fold_header *zhdr)
488 {
489         return kref_put(&zhdr->refcount, release_z3fold_page_locked_list);
490 }
491
492 static void free_pages_work(struct work_struct *w)
493 {
494         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
495
496         spin_lock(&pool->stale_lock);
497         while (!list_empty(&pool->stale)) {
498                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
499                                                 struct z3fold_header, buddy);
500                 struct page *page = virt_to_page(zhdr);
501
502                 list_del(&zhdr->buddy);
503                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
504                         continue;
505                 spin_unlock(&pool->stale_lock);
506                 cancel_work_sync(&zhdr->work);
507                 free_z3fold_page(page, false);
508                 cond_resched();
509                 spin_lock(&pool->stale_lock);
510         }
511         spin_unlock(&pool->stale_lock);
512 }
513
514 /*
515  * Returns the number of free chunks in a z3fold page.
516  * NB: can't be used with HEADLESS pages.
517  */
518 static int num_free_chunks(struct z3fold_header *zhdr)
519 {
520         int nfree;
521         /*
522          * If there is a middle object, pick up the bigger free space
523          * either before or after it. Otherwise just subtract the number
524          * of chunks occupied by the first and the last objects.
525          */
526         if (zhdr->middle_chunks != 0) {
527                 int nfree_before = zhdr->first_chunks ?
528                         0 : zhdr->start_middle - ZHDR_CHUNKS;
529                 int nfree_after = zhdr->last_chunks ?
530                         0 : TOTAL_CHUNKS -
531                                 (zhdr->start_middle + zhdr->middle_chunks);
532                 nfree = max(nfree_before, nfree_after);
533         } else
534                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
535         return nfree;
536 }
537
538 /* Add to the appropriate unbuddied list */
539 static inline void add_to_unbuddied(struct z3fold_pool *pool,
540                                 struct z3fold_header *zhdr)
541 {
542         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
543                         zhdr->middle_chunks == 0) {
544                 struct list_head *unbuddied;
545                 int freechunks = num_free_chunks(zhdr);
546
547                 migrate_disable();
548                 unbuddied = this_cpu_ptr(pool->unbuddied);
549                 spin_lock(&pool->lock);
550                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
551                 spin_unlock(&pool->lock);
552                 zhdr->cpu = smp_processor_id();
553                 migrate_enable();
554         }
555 }
556
557 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
558 {
559         enum buddy bud = HEADLESS;
560
561         if (zhdr->middle_chunks) {
562                 if (!zhdr->first_chunks &&
563                     chunks <= zhdr->start_middle - ZHDR_CHUNKS)
564                         bud = FIRST;
565                 else if (!zhdr->last_chunks)
566                         bud = LAST;
567         } else {
568                 if (!zhdr->first_chunks)
569                         bud = FIRST;
570                 else if (!zhdr->last_chunks)
571                         bud = LAST;
572                 else
573                         bud = MIDDLE;
574         }
575
576         return bud;
577 }
578
579 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
580                                 unsigned short dst_chunk)
581 {
582         void *beg = zhdr;
583         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
584                        beg + (zhdr->start_middle << CHUNK_SHIFT),
585                        zhdr->middle_chunks << CHUNK_SHIFT);
586 }
587
588 static inline bool buddy_single(struct z3fold_header *zhdr)
589 {
590         return !((zhdr->first_chunks && zhdr->middle_chunks) ||
591                         (zhdr->first_chunks && zhdr->last_chunks) ||
592                         (zhdr->middle_chunks && zhdr->last_chunks));
593 }
594
595 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
596 {
597         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
598         void *p = zhdr;
599         unsigned long old_handle = 0;
600         size_t sz = 0;
601         struct z3fold_header *new_zhdr = NULL;
602         int first_idx = __idx(zhdr, FIRST);
603         int middle_idx = __idx(zhdr, MIDDLE);
604         int last_idx = __idx(zhdr, LAST);
605         unsigned short *moved_chunks = NULL;
606
607         /*
608          * No need to protect slots here -- all the slots are "local" and
609          * the page lock is already taken
610          */
611         if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
612                 p += ZHDR_SIZE_ALIGNED;
613                 sz = zhdr->first_chunks << CHUNK_SHIFT;
614                 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
615                 moved_chunks = &zhdr->first_chunks;
616         } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
617                 p += zhdr->start_middle << CHUNK_SHIFT;
618                 sz = zhdr->middle_chunks << CHUNK_SHIFT;
619                 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
620                 moved_chunks = &zhdr->middle_chunks;
621         } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
622                 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
623                 sz = zhdr->last_chunks << CHUNK_SHIFT;
624                 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
625                 moved_chunks = &zhdr->last_chunks;
626         }
627
628         if (sz > 0) {
629                 enum buddy new_bud = HEADLESS;
630                 short chunks = size_to_chunks(sz);
631                 void *q;
632
633                 new_zhdr = __z3fold_alloc(pool, sz, false);
634                 if (!new_zhdr)
635                         return NULL;
636
637                 if (WARN_ON(new_zhdr == zhdr))
638                         goto out_fail;
639
640                 new_bud = get_free_buddy(new_zhdr, chunks);
641                 q = new_zhdr;
642                 switch (new_bud) {
643                 case FIRST:
644                         new_zhdr->first_chunks = chunks;
645                         q += ZHDR_SIZE_ALIGNED;
646                         break;
647                 case MIDDLE:
648                         new_zhdr->middle_chunks = chunks;
649                         new_zhdr->start_middle =
650                                 new_zhdr->first_chunks + ZHDR_CHUNKS;
651                         q += new_zhdr->start_middle << CHUNK_SHIFT;
652                         break;
653                 case LAST:
654                         new_zhdr->last_chunks = chunks;
655                         q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
656                         break;
657                 default:
658                         goto out_fail;
659                 }
660                 new_zhdr->foreign_handles++;
661                 memcpy(q, p, sz);
662                 write_lock(&zhdr->slots->lock);
663                 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
664                         __idx(new_zhdr, new_bud);
665                 if (new_bud == LAST)
666                         *(unsigned long *)old_handle |=
667                                         (new_zhdr->last_chunks << BUDDY_SHIFT);
668                 write_unlock(&zhdr->slots->lock);
669                 add_to_unbuddied(pool, new_zhdr);
670                 z3fold_page_unlock(new_zhdr);
671
672                 *moved_chunks = 0;
673         }
674
675         return new_zhdr;
676
677 out_fail:
678         if (new_zhdr && !put_z3fold_locked(new_zhdr)) {
679                 add_to_unbuddied(pool, new_zhdr);
680                 z3fold_page_unlock(new_zhdr);
681         }
682         return NULL;
683
684 }
685
686 #define BIG_CHUNK_GAP   3
687 /* Has to be called with lock held */
688 static int z3fold_compact_page(struct z3fold_header *zhdr)
689 {
690         struct page *page = virt_to_page(zhdr);
691
692         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
693                 return 0; /* can't move middle chunk, it's used */
694
695         if (unlikely(PageIsolated(page)))
696                 return 0;
697
698         if (zhdr->middle_chunks == 0)
699                 return 0; /* nothing to compact */
700
701         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
702                 /* move to the beginning */
703                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
704                 zhdr->first_chunks = zhdr->middle_chunks;
705                 zhdr->middle_chunks = 0;
706                 zhdr->start_middle = 0;
707                 zhdr->first_num++;
708                 return 1;
709         }
710
711         /*
712          * moving data is expensive, so let's only do that if
713          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
714          */
715         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
716             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
717                         BIG_CHUNK_GAP) {
718                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
719                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
720                 return 1;
721         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
722                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
723                                         + zhdr->middle_chunks) >=
724                         BIG_CHUNK_GAP) {
725                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
726                         zhdr->middle_chunks;
727                 mchunk_memmove(zhdr, new_start);
728                 zhdr->start_middle = new_start;
729                 return 1;
730         }
731
732         return 0;
733 }
734
735 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
736 {
737         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
738         struct page *page;
739
740         page = virt_to_page(zhdr);
741         if (locked)
742                 WARN_ON(z3fold_page_trylock(zhdr));
743         else
744                 z3fold_page_lock(zhdr);
745         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
746                 z3fold_page_unlock(zhdr);
747                 return;
748         }
749         spin_lock(&pool->lock);
750         list_del_init(&zhdr->buddy);
751         spin_unlock(&pool->lock);
752
753         if (put_z3fold_locked(zhdr))
754                 return;
755
756         if (test_bit(PAGE_STALE, &page->private) ||
757             test_and_set_bit(PAGE_CLAIMED, &page->private)) {
758                 z3fold_page_unlock(zhdr);
759                 return;
760         }
761
762         if (!zhdr->foreign_handles && buddy_single(zhdr) &&
763             zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
764                 if (!put_z3fold_locked(zhdr)) {
765                         clear_bit(PAGE_CLAIMED, &page->private);
766                         z3fold_page_unlock(zhdr);
767                 }
768                 return;
769         }
770
771         z3fold_compact_page(zhdr);
772         add_to_unbuddied(pool, zhdr);
773         clear_bit(PAGE_CLAIMED, &page->private);
774         z3fold_page_unlock(zhdr);
775 }
776
777 static void compact_page_work(struct work_struct *w)
778 {
779         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
780                                                 work);
781
782         do_compact_page(zhdr, false);
783 }
784
785 /* returns _locked_ z3fold page header or NULL */
786 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
787                                                 size_t size, bool can_sleep)
788 {
789         struct z3fold_header *zhdr = NULL;
790         struct page *page;
791         struct list_head *unbuddied;
792         int chunks = size_to_chunks(size), i;
793
794 lookup:
795         migrate_disable();
796         /* First, try to find an unbuddied z3fold page. */
797         unbuddied = this_cpu_ptr(pool->unbuddied);
798         for_each_unbuddied_list(i, chunks) {
799                 struct list_head *l = &unbuddied[i];
800
801                 zhdr = list_first_entry_or_null(READ_ONCE(l),
802                                         struct z3fold_header, buddy);
803
804                 if (!zhdr)
805                         continue;
806
807                 /* Re-check under lock. */
808                 spin_lock(&pool->lock);
809                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
810                                                 struct z3fold_header, buddy)) ||
811                     !z3fold_page_trylock(zhdr)) {
812                         spin_unlock(&pool->lock);
813                         zhdr = NULL;
814                         migrate_enable();
815                         if (can_sleep)
816                                 cond_resched();
817                         goto lookup;
818                 }
819                 list_del_init(&zhdr->buddy);
820                 zhdr->cpu = -1;
821                 spin_unlock(&pool->lock);
822
823                 page = virt_to_page(zhdr);
824                 if (test_bit(NEEDS_COMPACTING, &page->private) ||
825                     test_bit(PAGE_CLAIMED, &page->private)) {
826                         z3fold_page_unlock(zhdr);
827                         zhdr = NULL;
828                         migrate_enable();
829                         if (can_sleep)
830                                 cond_resched();
831                         goto lookup;
832                 }
833
834                 /*
835                  * this page could not be removed from its unbuddied
836                  * list while pool lock was held, and then we've taken
837                  * page lock so kref_put could not be called before
838                  * we got here, so it's safe to just call kref_get()
839                  */
840                 kref_get(&zhdr->refcount);
841                 break;
842         }
843         migrate_enable();
844
845         if (!zhdr) {
846                 int cpu;
847
848                 /* look for _exact_ match on other cpus' lists */
849                 for_each_online_cpu(cpu) {
850                         struct list_head *l;
851
852                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
853                         spin_lock(&pool->lock);
854                         l = &unbuddied[chunks];
855
856                         zhdr = list_first_entry_or_null(READ_ONCE(l),
857                                                 struct z3fold_header, buddy);
858
859                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
860                                 spin_unlock(&pool->lock);
861                                 zhdr = NULL;
862                                 continue;
863                         }
864                         list_del_init(&zhdr->buddy);
865                         zhdr->cpu = -1;
866                         spin_unlock(&pool->lock);
867
868                         page = virt_to_page(zhdr);
869                         if (test_bit(NEEDS_COMPACTING, &page->private) ||
870                             test_bit(PAGE_CLAIMED, &page->private)) {
871                                 z3fold_page_unlock(zhdr);
872                                 zhdr = NULL;
873                                 if (can_sleep)
874                                         cond_resched();
875                                 continue;
876                         }
877                         kref_get(&zhdr->refcount);
878                         break;
879                 }
880         }
881
882         if (zhdr && !zhdr->slots) {
883                 zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
884                 if (!zhdr->slots)
885                         goto out_fail;
886         }
887         return zhdr;
888
889 out_fail:
890         if (!put_z3fold_locked(zhdr)) {
891                 add_to_unbuddied(pool, zhdr);
892                 z3fold_page_unlock(zhdr);
893         }
894         return NULL;
895 }
896
897 /*
898  * API Functions
899  */
900
901 /**
902  * z3fold_create_pool() - create a new z3fold pool
903  * @name:       pool name
904  * @gfp:        gfp flags when allocating the z3fold pool structure
905  *
906  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
907  * failed.
908  */
909 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
910 {
911         struct z3fold_pool *pool = NULL;
912         int i, cpu;
913
914         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
915         if (!pool)
916                 goto out;
917         pool->c_handle = kmem_cache_create("z3fold_handle",
918                                 sizeof(struct z3fold_buddy_slots),
919                                 SLOTS_ALIGN, 0, NULL);
920         if (!pool->c_handle)
921                 goto out_c;
922         spin_lock_init(&pool->lock);
923         spin_lock_init(&pool->stale_lock);
924         pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
925                                          __alignof__(struct list_head));
926         if (!pool->unbuddied)
927                 goto out_pool;
928         for_each_possible_cpu(cpu) {
929                 struct list_head *unbuddied =
930                                 per_cpu_ptr(pool->unbuddied, cpu);
931                 for_each_unbuddied_list(i, 0)
932                         INIT_LIST_HEAD(&unbuddied[i]);
933         }
934         INIT_LIST_HEAD(&pool->stale);
935         atomic64_set(&pool->pages_nr, 0);
936         pool->name = name;
937         pool->compact_wq = create_singlethread_workqueue(pool->name);
938         if (!pool->compact_wq)
939                 goto out_unbuddied;
940         pool->release_wq = create_singlethread_workqueue(pool->name);
941         if (!pool->release_wq)
942                 goto out_wq;
943         INIT_WORK(&pool->work, free_pages_work);
944         return pool;
945
946 out_wq:
947         destroy_workqueue(pool->compact_wq);
948 out_unbuddied:
949         free_percpu(pool->unbuddied);
950 out_pool:
951         kmem_cache_destroy(pool->c_handle);
952 out_c:
953         kfree(pool);
954 out:
955         return NULL;
956 }
957
958 /**
959  * z3fold_destroy_pool() - destroys an existing z3fold pool
960  * @pool:       the z3fold pool to be destroyed
961  *
962  * The pool should be emptied before this function is called.
963  */
964 static void z3fold_destroy_pool(struct z3fold_pool *pool)
965 {
966         kmem_cache_destroy(pool->c_handle);
967
968         /*
969          * We need to destroy pool->compact_wq before pool->release_wq,
970          * as any pending work on pool->compact_wq will call
971          * queue_work(pool->release_wq, &pool->work).
972          *
973          * There are still outstanding pages until both workqueues are drained,
974          * so we cannot unregister migration until then.
975          */
976
977         destroy_workqueue(pool->compact_wq);
978         destroy_workqueue(pool->release_wq);
979         free_percpu(pool->unbuddied);
980         kfree(pool);
981 }
982
983 static const struct movable_operations z3fold_mops;
984
985 /**
986  * z3fold_alloc() - allocates a region of a given size
987  * @pool:       z3fold pool from which to allocate
988  * @size:       size in bytes of the desired allocation
989  * @gfp:        gfp flags used if the pool needs to grow
990  * @handle:     handle of the new allocation
991  *
992  * This function will attempt to find a free region in the pool large enough to
993  * satisfy the allocation request.  A search of the unbuddied lists is
994  * performed first. If no suitable free region is found, then a new page is
995  * allocated and added to the pool to satisfy the request.
996  *
997  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
998  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
999  * a new page.
1000  */
1001 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1002                         unsigned long *handle)
1003 {
1004         int chunks = size_to_chunks(size);
1005         struct z3fold_header *zhdr = NULL;
1006         struct page *page = NULL;
1007         enum buddy bud;
1008         bool can_sleep = gfpflags_allow_blocking(gfp);
1009
1010         if (!size || (gfp & __GFP_HIGHMEM))
1011                 return -EINVAL;
1012
1013         if (size > PAGE_SIZE)
1014                 return -ENOSPC;
1015
1016         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1017                 bud = HEADLESS;
1018         else {
1019 retry:
1020                 zhdr = __z3fold_alloc(pool, size, can_sleep);
1021                 if (zhdr) {
1022                         bud = get_free_buddy(zhdr, chunks);
1023                         if (bud == HEADLESS) {
1024                                 if (!put_z3fold_locked(zhdr))
1025                                         z3fold_page_unlock(zhdr);
1026                                 pr_err("No free chunks in unbuddied\n");
1027                                 WARN_ON(1);
1028                                 goto retry;
1029                         }
1030                         page = virt_to_page(zhdr);
1031                         goto found;
1032                 }
1033                 bud = FIRST;
1034         }
1035
1036         page = alloc_page(gfp);
1037         if (!page)
1038                 return -ENOMEM;
1039
1040         zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1041         if (!zhdr) {
1042                 __free_page(page);
1043                 return -ENOMEM;
1044         }
1045         atomic64_inc(&pool->pages_nr);
1046
1047         if (bud == HEADLESS) {
1048                 set_bit(PAGE_HEADLESS, &page->private);
1049                 goto headless;
1050         }
1051         if (can_sleep) {
1052                 lock_page(page);
1053                 __SetPageMovable(page, &z3fold_mops);
1054                 unlock_page(page);
1055         } else {
1056                 WARN_ON(!trylock_page(page));
1057                 __SetPageMovable(page, &z3fold_mops);
1058                 unlock_page(page);
1059         }
1060         z3fold_page_lock(zhdr);
1061
1062 found:
1063         if (bud == FIRST)
1064                 zhdr->first_chunks = chunks;
1065         else if (bud == LAST)
1066                 zhdr->last_chunks = chunks;
1067         else {
1068                 zhdr->middle_chunks = chunks;
1069                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1070         }
1071         add_to_unbuddied(pool, zhdr);
1072
1073 headless:
1074         spin_lock(&pool->lock);
1075         *handle = encode_handle(zhdr, bud);
1076         spin_unlock(&pool->lock);
1077         if (bud != HEADLESS)
1078                 z3fold_page_unlock(zhdr);
1079
1080         return 0;
1081 }
1082
1083 /**
1084  * z3fold_free() - frees the allocation associated with the given handle
1085  * @pool:       pool in which the allocation resided
1086  * @handle:     handle associated with the allocation returned by z3fold_alloc()
1087  *
1088  * In the case that the z3fold page in which the allocation resides is under
1089  * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1090  * only sets the first|middle|last_chunks to 0.  The page is actually freed
1091  * once all buddies are evicted (see z3fold_reclaim_page() below).
1092  */
1093 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1094 {
1095         struct z3fold_header *zhdr;
1096         struct page *page;
1097         enum buddy bud;
1098         bool page_claimed;
1099
1100         zhdr = get_z3fold_header(handle);
1101         page = virt_to_page(zhdr);
1102         page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1103
1104         if (test_bit(PAGE_HEADLESS, &page->private)) {
1105                 /* if a headless page is under reclaim, just leave.
1106                  * NB: we use test_and_set_bit for a reason: if the bit
1107                  * has not been set before, we release this page
1108                  * immediately so we don't care about its value any more.
1109                  */
1110                 if (!page_claimed) {
1111                         put_z3fold_header(zhdr);
1112                         free_z3fold_page(page, true);
1113                         atomic64_dec(&pool->pages_nr);
1114                 }
1115                 return;
1116         }
1117
1118         /* Non-headless case */
1119         bud = handle_to_buddy(handle);
1120
1121         switch (bud) {
1122         case FIRST:
1123                 zhdr->first_chunks = 0;
1124                 break;
1125         case MIDDLE:
1126                 zhdr->middle_chunks = 0;
1127                 break;
1128         case LAST:
1129                 zhdr->last_chunks = 0;
1130                 break;
1131         default:
1132                 pr_err("%s: unknown bud %d\n", __func__, bud);
1133                 WARN_ON(1);
1134                 put_z3fold_header(zhdr);
1135                 return;
1136         }
1137
1138         if (!page_claimed)
1139                 free_handle(handle, zhdr);
1140         if (put_z3fold_locked_list(zhdr))
1141                 return;
1142         if (page_claimed) {
1143                 /* the page has not been claimed by us */
1144                 put_z3fold_header(zhdr);
1145                 return;
1146         }
1147         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1148                 clear_bit(PAGE_CLAIMED, &page->private);
1149                 put_z3fold_header(zhdr);
1150                 return;
1151         }
1152         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1153                 zhdr->cpu = -1;
1154                 kref_get(&zhdr->refcount);
1155                 clear_bit(PAGE_CLAIMED, &page->private);
1156                 do_compact_page(zhdr, true);
1157                 return;
1158         }
1159         kref_get(&zhdr->refcount);
1160         clear_bit(PAGE_CLAIMED, &page->private);
1161         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1162         put_z3fold_header(zhdr);
1163 }
1164
1165 /**
1166  * z3fold_map() - maps the allocation associated with the given handle
1167  * @pool:       pool in which the allocation resides
1168  * @handle:     handle associated with the allocation to be mapped
1169  *
1170  * Extracts the buddy number from handle and constructs the pointer to the
1171  * correct starting chunk within the page.
1172  *
1173  * Returns: a pointer to the mapped allocation
1174  */
1175 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1176 {
1177         struct z3fold_header *zhdr;
1178         struct page *page;
1179         void *addr;
1180         enum buddy buddy;
1181
1182         zhdr = get_z3fold_header(handle);
1183         addr = zhdr;
1184         page = virt_to_page(zhdr);
1185
1186         if (test_bit(PAGE_HEADLESS, &page->private))
1187                 goto out;
1188
1189         buddy = handle_to_buddy(handle);
1190         switch (buddy) {
1191         case FIRST:
1192                 addr += ZHDR_SIZE_ALIGNED;
1193                 break;
1194         case MIDDLE:
1195                 addr += zhdr->start_middle << CHUNK_SHIFT;
1196                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1197                 break;
1198         case LAST:
1199                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1200                 break;
1201         default:
1202                 pr_err("unknown buddy id %d\n", buddy);
1203                 WARN_ON(1);
1204                 addr = NULL;
1205                 break;
1206         }
1207
1208         if (addr)
1209                 zhdr->mapped_count++;
1210 out:
1211         put_z3fold_header(zhdr);
1212         return addr;
1213 }
1214
1215 /**
1216  * z3fold_unmap() - unmaps the allocation associated with the given handle
1217  * @pool:       pool in which the allocation resides
1218  * @handle:     handle associated with the allocation to be unmapped
1219  */
1220 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1221 {
1222         struct z3fold_header *zhdr;
1223         struct page *page;
1224         enum buddy buddy;
1225
1226         zhdr = get_z3fold_header(handle);
1227         page = virt_to_page(zhdr);
1228
1229         if (test_bit(PAGE_HEADLESS, &page->private))
1230                 return;
1231
1232         buddy = handle_to_buddy(handle);
1233         if (buddy == MIDDLE)
1234                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1235         zhdr->mapped_count--;
1236         put_z3fold_header(zhdr);
1237 }
1238
1239 /**
1240  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1241  * @pool:       pool whose size is being queried
1242  *
1243  * Returns: size in pages of the given pool.
1244  */
1245 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1246 {
1247         return atomic64_read(&pool->pages_nr);
1248 }
1249
1250 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1251 {
1252         struct z3fold_header *zhdr;
1253         struct z3fold_pool *pool;
1254
1255         VM_BUG_ON_PAGE(PageIsolated(page), page);
1256
1257         if (test_bit(PAGE_HEADLESS, &page->private))
1258                 return false;
1259
1260         zhdr = page_address(page);
1261         z3fold_page_lock(zhdr);
1262         if (test_bit(NEEDS_COMPACTING, &page->private) ||
1263             test_bit(PAGE_STALE, &page->private))
1264                 goto out;
1265
1266         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1267                 goto out;
1268
1269         if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1270                 goto out;
1271         pool = zhdr_to_pool(zhdr);
1272         spin_lock(&pool->lock);
1273         if (!list_empty(&zhdr->buddy))
1274                 list_del_init(&zhdr->buddy);
1275         spin_unlock(&pool->lock);
1276
1277         kref_get(&zhdr->refcount);
1278         z3fold_page_unlock(zhdr);
1279         return true;
1280
1281 out:
1282         z3fold_page_unlock(zhdr);
1283         return false;
1284 }
1285
1286 static int z3fold_page_migrate(struct page *newpage, struct page *page,
1287                 enum migrate_mode mode)
1288 {
1289         struct z3fold_header *zhdr, *new_zhdr;
1290         struct z3fold_pool *pool;
1291
1292         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1293         VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1294         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1295
1296         zhdr = page_address(page);
1297         pool = zhdr_to_pool(zhdr);
1298
1299         if (!z3fold_page_trylock(zhdr))
1300                 return -EAGAIN;
1301         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1302                 clear_bit(PAGE_CLAIMED, &page->private);
1303                 z3fold_page_unlock(zhdr);
1304                 return -EBUSY;
1305         }
1306         if (work_pending(&zhdr->work)) {
1307                 z3fold_page_unlock(zhdr);
1308                 return -EAGAIN;
1309         }
1310         new_zhdr = page_address(newpage);
1311         memcpy(new_zhdr, zhdr, PAGE_SIZE);
1312         newpage->private = page->private;
1313         set_bit(PAGE_MIGRATED, &page->private);
1314         z3fold_page_unlock(zhdr);
1315         spin_lock_init(&new_zhdr->page_lock);
1316         INIT_WORK(&new_zhdr->work, compact_page_work);
1317         /*
1318          * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1319          * so we only have to reinitialize it.
1320          */
1321         INIT_LIST_HEAD(&new_zhdr->buddy);
1322         __ClearPageMovable(page);
1323
1324         get_page(newpage);
1325         z3fold_page_lock(new_zhdr);
1326         if (new_zhdr->first_chunks)
1327                 encode_handle(new_zhdr, FIRST);
1328         if (new_zhdr->last_chunks)
1329                 encode_handle(new_zhdr, LAST);
1330         if (new_zhdr->middle_chunks)
1331                 encode_handle(new_zhdr, MIDDLE);
1332         set_bit(NEEDS_COMPACTING, &newpage->private);
1333         new_zhdr->cpu = smp_processor_id();
1334         __SetPageMovable(newpage, &z3fold_mops);
1335         z3fold_page_unlock(new_zhdr);
1336
1337         queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1338
1339         /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1340         page->private = 0;
1341         put_page(page);
1342         return 0;
1343 }
1344
1345 static void z3fold_page_putback(struct page *page)
1346 {
1347         struct z3fold_header *zhdr;
1348         struct z3fold_pool *pool;
1349
1350         zhdr = page_address(page);
1351         pool = zhdr_to_pool(zhdr);
1352
1353         z3fold_page_lock(zhdr);
1354         if (!list_empty(&zhdr->buddy))
1355                 list_del_init(&zhdr->buddy);
1356         INIT_LIST_HEAD(&page->lru);
1357         if (put_z3fold_locked(zhdr))
1358                 return;
1359         if (list_empty(&zhdr->buddy))
1360                 add_to_unbuddied(pool, zhdr);
1361         clear_bit(PAGE_CLAIMED, &page->private);
1362         z3fold_page_unlock(zhdr);
1363 }
1364
1365 static const struct movable_operations z3fold_mops = {
1366         .isolate_page = z3fold_page_isolate,
1367         .migrate_page = z3fold_page_migrate,
1368         .putback_page = z3fold_page_putback,
1369 };
1370
1371 /*****************
1372  * zpool
1373  ****************/
1374
1375 static void *z3fold_zpool_create(const char *name, gfp_t gfp)
1376 {
1377         return z3fold_create_pool(name, gfp);
1378 }
1379
1380 static void z3fold_zpool_destroy(void *pool)
1381 {
1382         z3fold_destroy_pool(pool);
1383 }
1384
1385 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1386                         unsigned long *handle)
1387 {
1388         return z3fold_alloc(pool, size, gfp, handle);
1389 }
1390 static void z3fold_zpool_free(void *pool, unsigned long handle)
1391 {
1392         z3fold_free(pool, handle);
1393 }
1394
1395 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1396                         enum zpool_mapmode mm)
1397 {
1398         return z3fold_map(pool, handle);
1399 }
1400 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1401 {
1402         z3fold_unmap(pool, handle);
1403 }
1404
1405 static u64 z3fold_zpool_total_size(void *pool)
1406 {
1407         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1408 }
1409
1410 static struct zpool_driver z3fold_zpool_driver = {
1411         .type =         "z3fold",
1412         .sleep_mapped = true,
1413         .owner =        THIS_MODULE,
1414         .create =       z3fold_zpool_create,
1415         .destroy =      z3fold_zpool_destroy,
1416         .malloc =       z3fold_zpool_malloc,
1417         .free =         z3fold_zpool_free,
1418         .map =          z3fold_zpool_map,
1419         .unmap =        z3fold_zpool_unmap,
1420         .total_size =   z3fold_zpool_total_size,
1421 };
1422
1423 MODULE_ALIAS("zpool-z3fold");
1424
1425 static int __init init_z3fold(void)
1426 {
1427         /*
1428          * Make sure the z3fold header is not larger than the page size and
1429          * there has remaining spaces for its buddy.
1430          */
1431         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1432         zpool_register_driver(&z3fold_zpool_driver);
1433
1434         return 0;
1435 }
1436
1437 static void __exit exit_z3fold(void)
1438 {
1439         zpool_unregister_driver(&z3fold_zpool_driver);
1440 }
1441
1442 module_init(init_z3fold);
1443 module_exit(exit_z3fold);
1444
1445 MODULE_LICENSE("GPL");
1446 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1447 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");