2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "ordered-data.h"
41 #include "compression.h"
42 #include "extent_io.h"
43 #include "extent_map.h"
45 struct compressed_bio {
46 /* number of bios pending for this compressed extent */
47 atomic_t pending_bios;
49 /* the pages with the compressed data on them */
50 struct page **compressed_pages;
52 /* inode that owns this data */
55 /* starting offset in the inode for our pages */
58 /* number of bytes in the inode we're working on */
61 /* number of bytes on disk */
62 unsigned long compressed_len;
64 /* the compression algorithm for this bio */
67 /* number of compressed pages in the array */
68 unsigned long nr_pages;
74 /* for reads, this is the bio we are copying the data into */
78 * the start of a variable length array of checksums only
84 static int btrfs_decompress_bio(int type, struct page **pages_in,
85 u64 disk_start, struct bio *orig_bio,
88 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
89 unsigned long disk_size)
91 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
93 return sizeof(struct compressed_bio) +
94 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
97 static struct bio *compressed_bio_alloc(struct block_device *bdev,
98 u64 first_byte, gfp_t gfp_flags)
100 return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
103 static int check_compressed_csum(struct btrfs_inode *inode,
104 struct compressed_bio *cb,
112 u32 *cb_sum = &cb->sums;
114 if (inode->flags & BTRFS_INODE_NODATASUM)
117 for (i = 0; i < cb->nr_pages; i++) {
118 page = cb->compressed_pages[i];
121 kaddr = kmap_atomic(page);
122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
123 btrfs_csum_final(csum, (u8 *)&csum);
124 kunmap_atomic(kaddr);
126 if (csum != *cb_sum) {
127 btrfs_print_data_csum_error(inode, disk_start, csum,
128 *cb_sum, cb->mirror_num);
140 /* when we finish reading compressed pages from the disk, we
141 * decompress them and then run the bio end_io routines on the
142 * decompressed pages (in the inode address space).
144 * This allows the checksumming and other IO error handling routines
147 * The compressed pages are freed here, and it must be run
150 static void end_compressed_bio_read(struct bio *bio)
152 struct compressed_bio *cb = bio->bi_private;
161 /* if there are more bios still pending for this compressed
164 if (!atomic_dec_and_test(&cb->pending_bios))
168 ret = check_compressed_csum(BTRFS_I(inode), cb,
169 (u64)bio->bi_iter.bi_sector << 9);
173 /* ok, we're the last bio for this extent, lets start
176 ret = btrfs_decompress_bio(cb->compress_type,
177 cb->compressed_pages,
185 /* release the compressed pages */
187 for (index = 0; index < cb->nr_pages; index++) {
188 page = cb->compressed_pages[index];
189 page->mapping = NULL;
193 /* do io completion on the original bio */
195 bio_io_error(cb->orig_bio);
198 struct bio_vec *bvec;
201 * we have verified the checksum already, set page
202 * checked so the end_io handlers know about it
204 bio_for_each_segment_all(bvec, cb->orig_bio, i)
205 SetPageChecked(bvec->bv_page);
207 bio_endio(cb->orig_bio);
210 /* finally free the cb struct */
211 kfree(cb->compressed_pages);
218 * Clear the writeback bits on all of the file
219 * pages for a compressed write
221 static noinline void end_compressed_writeback(struct inode *inode,
222 const struct compressed_bio *cb)
224 unsigned long index = cb->start >> PAGE_SHIFT;
225 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
226 struct page *pages[16];
227 unsigned long nr_pages = end_index - index + 1;
232 mapping_set_error(inode->i_mapping, -EIO);
234 while (nr_pages > 0) {
235 ret = find_get_pages_contig(inode->i_mapping, index,
237 nr_pages, ARRAY_SIZE(pages)), pages);
243 for (i = 0; i < ret; i++) {
245 SetPageError(pages[i]);
246 end_page_writeback(pages[i]);
252 /* the inode may be gone now */
256 * do the cleanup once all the compressed pages hit the disk.
257 * This will clear writeback on the file pages and free the compressed
260 * This also calls the writeback end hooks for the file pages so that
261 * metadata and checksums can be updated in the file.
263 static void end_compressed_bio_write(struct bio *bio)
265 struct extent_io_tree *tree;
266 struct compressed_bio *cb = bio->bi_private;
274 /* if there are more bios still pending for this compressed
277 if (!atomic_dec_and_test(&cb->pending_bios))
280 /* ok, we're the last bio for this extent, step one is to
281 * call back into the FS and do all the end_io operations
284 tree = &BTRFS_I(inode)->io_tree;
285 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
286 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
288 cb->start + cb->len - 1,
290 bio->bi_error ? 0 : 1);
291 cb->compressed_pages[0]->mapping = NULL;
293 end_compressed_writeback(inode, cb);
294 /* note, our inode could be gone now */
297 * release the compressed pages, these came from alloc_page and
298 * are not attached to the inode at all
301 for (index = 0; index < cb->nr_pages; index++) {
302 page = cb->compressed_pages[index];
303 page->mapping = NULL;
307 /* finally free the cb struct */
308 kfree(cb->compressed_pages);
315 * worker function to build and submit bios for previously compressed pages.
316 * The corresponding pages in the inode should be marked for writeback
317 * and the compressed pages should have a reference on them for dropping
318 * when the IO is complete.
320 * This also checksums the file bytes and gets things ready for
323 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
324 unsigned long len, u64 disk_start,
325 unsigned long compressed_len,
326 struct page **compressed_pages,
327 unsigned long nr_pages)
329 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
330 struct bio *bio = NULL;
331 struct compressed_bio *cb;
332 unsigned long bytes_left;
333 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
336 u64 first_byte = disk_start;
337 struct block_device *bdev;
339 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
341 WARN_ON(start & ((u64)PAGE_SIZE - 1));
342 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
345 atomic_set(&cb->pending_bios, 0);
351 cb->compressed_pages = compressed_pages;
352 cb->compressed_len = compressed_len;
354 cb->nr_pages = nr_pages;
356 bdev = fs_info->fs_devices->latest_bdev;
358 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
363 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
364 bio->bi_private = cb;
365 bio->bi_end_io = end_compressed_bio_write;
366 atomic_inc(&cb->pending_bios);
368 /* create and submit bios for the compressed pages */
369 bytes_left = compressed_len;
370 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
371 page = compressed_pages[pg_index];
372 page->mapping = inode->i_mapping;
373 if (bio->bi_iter.bi_size)
374 ret = io_tree->ops->merge_bio_hook(page, 0,
380 page->mapping = NULL;
381 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
386 * inc the count before we submit the bio so
387 * we know the end IO handler won't happen before
388 * we inc the count. Otherwise, the cb might get
389 * freed before we're done setting it up
391 atomic_inc(&cb->pending_bios);
392 ret = btrfs_bio_wq_end_io(fs_info, bio,
393 BTRFS_WQ_ENDIO_DATA);
394 BUG_ON(ret); /* -ENOMEM */
397 ret = btrfs_csum_one_bio(inode, bio, start, 1);
398 BUG_ON(ret); /* -ENOMEM */
401 ret = btrfs_map_bio(fs_info, bio, 0, 1);
409 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
411 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
412 bio->bi_private = cb;
413 bio->bi_end_io = end_compressed_bio_write;
414 bio_add_page(bio, page, PAGE_SIZE, 0);
416 if (bytes_left < PAGE_SIZE) {
418 "bytes left %lu compress len %lu nr %lu",
419 bytes_left, cb->compressed_len, cb->nr_pages);
421 bytes_left -= PAGE_SIZE;
422 first_byte += PAGE_SIZE;
427 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
428 BUG_ON(ret); /* -ENOMEM */
431 ret = btrfs_csum_one_bio(inode, bio, start, 1);
432 BUG_ON(ret); /* -ENOMEM */
435 ret = btrfs_map_bio(fs_info, bio, 0, 1);
445 static u64 bio_end_offset(struct bio *bio)
447 struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
449 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
452 static noinline int add_ra_bio_pages(struct inode *inode,
454 struct compressed_bio *cb)
456 unsigned long end_index;
457 unsigned long pg_index;
459 u64 isize = i_size_read(inode);
462 unsigned long nr_pages = 0;
463 struct extent_map *em;
464 struct address_space *mapping = inode->i_mapping;
465 struct extent_map_tree *em_tree;
466 struct extent_io_tree *tree;
470 last_offset = bio_end_offset(cb->orig_bio);
471 em_tree = &BTRFS_I(inode)->extent_tree;
472 tree = &BTRFS_I(inode)->io_tree;
477 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
479 while (last_offset < compressed_end) {
480 pg_index = last_offset >> PAGE_SHIFT;
482 if (pg_index > end_index)
486 page = radix_tree_lookup(&mapping->page_tree, pg_index);
488 if (page && !radix_tree_exceptional_entry(page)) {
495 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
500 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
505 end = last_offset + PAGE_SIZE - 1;
507 * at this point, we have a locked page in the page cache
508 * for these bytes in the file. But, we have to make
509 * sure they map to this compressed extent on disk.
511 set_page_extent_mapped(page);
512 lock_extent(tree, last_offset, end);
513 read_lock(&em_tree->lock);
514 em = lookup_extent_mapping(em_tree, last_offset,
516 read_unlock(&em_tree->lock);
518 if (!em || last_offset < em->start ||
519 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
520 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
522 unlock_extent(tree, last_offset, end);
529 if (page->index == end_index) {
531 size_t zero_offset = isize & (PAGE_SIZE - 1);
535 zeros = PAGE_SIZE - zero_offset;
536 userpage = kmap_atomic(page);
537 memset(userpage + zero_offset, 0, zeros);
538 flush_dcache_page(page);
539 kunmap_atomic(userpage);
543 ret = bio_add_page(cb->orig_bio, page,
546 if (ret == PAGE_SIZE) {
550 unlock_extent(tree, last_offset, end);
556 last_offset += PAGE_SIZE;
562 * for a compressed read, the bio we get passed has all the inode pages
563 * in it. We don't actually do IO on those pages but allocate new ones
564 * to hold the compressed pages on disk.
566 * bio->bi_iter.bi_sector points to the compressed extent on disk
567 * bio->bi_io_vec points to all of the inode pages
569 * After the compressed pages are read, we copy the bytes into the
570 * bio we were passed and then call the bio end_io calls
572 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
573 int mirror_num, unsigned long bio_flags)
575 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
576 struct extent_io_tree *tree;
577 struct extent_map_tree *em_tree;
578 struct compressed_bio *cb;
579 unsigned long compressed_len;
580 unsigned long nr_pages;
581 unsigned long pg_index;
583 struct block_device *bdev;
584 struct bio *comp_bio;
585 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
588 struct extent_map *em;
593 tree = &BTRFS_I(inode)->io_tree;
594 em_tree = &BTRFS_I(inode)->extent_tree;
596 /* we need the actual starting offset of this extent in the file */
597 read_lock(&em_tree->lock);
598 em = lookup_extent_mapping(em_tree,
599 page_offset(bio->bi_io_vec->bv_page),
601 read_unlock(&em_tree->lock);
605 compressed_len = em->block_len;
606 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
610 atomic_set(&cb->pending_bios, 0);
613 cb->mirror_num = mirror_num;
616 cb->start = em->orig_start;
618 em_start = em->start;
623 cb->len = bio->bi_iter.bi_size;
624 cb->compressed_len = compressed_len;
625 cb->compress_type = extent_compress_type(bio_flags);
628 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
629 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
631 if (!cb->compressed_pages)
634 bdev = fs_info->fs_devices->latest_bdev;
636 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
637 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
639 if (!cb->compressed_pages[pg_index]) {
640 faili = pg_index - 1;
645 faili = nr_pages - 1;
646 cb->nr_pages = nr_pages;
648 add_ra_bio_pages(inode, em_start + em_len, cb);
650 /* include any pages we added in add_ra-bio_pages */
651 cb->len = bio->bi_iter.bi_size;
653 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
656 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
657 comp_bio->bi_private = cb;
658 comp_bio->bi_end_io = end_compressed_bio_read;
659 atomic_inc(&cb->pending_bios);
661 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
662 page = cb->compressed_pages[pg_index];
663 page->mapping = inode->i_mapping;
664 page->index = em_start >> PAGE_SHIFT;
666 if (comp_bio->bi_iter.bi_size)
667 ret = tree->ops->merge_bio_hook(page, 0,
673 page->mapping = NULL;
674 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
678 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
679 BTRFS_WQ_ENDIO_DATA);
680 BUG_ON(ret); /* -ENOMEM */
683 * inc the count before we submit the bio so
684 * we know the end IO handler won't happen before
685 * we inc the count. Otherwise, the cb might get
686 * freed before we're done setting it up
688 atomic_inc(&cb->pending_bios);
690 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
691 ret = btrfs_lookup_bio_sums(inode, comp_bio,
693 BUG_ON(ret); /* -ENOMEM */
695 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
696 fs_info->sectorsize);
698 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
700 comp_bio->bi_error = ret;
706 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
709 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
710 comp_bio->bi_private = cb;
711 comp_bio->bi_end_io = end_compressed_bio_read;
713 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
715 cur_disk_byte += PAGE_SIZE;
719 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
720 BUG_ON(ret); /* -ENOMEM */
722 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
723 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
724 BUG_ON(ret); /* -ENOMEM */
727 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
729 comp_bio->bi_error = ret;
738 __free_page(cb->compressed_pages[faili]);
742 kfree(cb->compressed_pages);
751 struct list_head idle_ws;
753 /* Number of free workspaces */
755 /* Total number of allocated workspaces */
757 /* Waiters for a free workspace */
758 wait_queue_head_t ws_wait;
759 } btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
761 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
762 &btrfs_zlib_compress,
766 void __init btrfs_init_compress(void)
770 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
771 struct list_head *workspace;
773 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
774 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
775 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
776 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
779 * Preallocate one workspace for each compression type so
780 * we can guarantee forward progress in the worst case
782 workspace = btrfs_compress_op[i]->alloc_workspace();
783 if (IS_ERR(workspace)) {
784 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
786 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
787 btrfs_comp_ws[i].free_ws = 1;
788 list_add(workspace, &btrfs_comp_ws[i].idle_ws);
794 * This finds an available workspace or allocates a new one.
795 * If it's not possible to allocate a new one, waits until there's one.
796 * Preallocation makes a forward progress guarantees and we do not return
799 static struct list_head *find_workspace(int type)
801 struct list_head *workspace;
802 int cpus = num_online_cpus();
805 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
806 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
807 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
808 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
809 int *free_ws = &btrfs_comp_ws[idx].free_ws;
812 if (!list_empty(idle_ws)) {
813 workspace = idle_ws->next;
816 spin_unlock(ws_lock);
820 if (atomic_read(total_ws) > cpus) {
823 spin_unlock(ws_lock);
824 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
825 if (atomic_read(total_ws) > cpus && !*free_ws)
827 finish_wait(ws_wait, &wait);
830 atomic_inc(total_ws);
831 spin_unlock(ws_lock);
833 workspace = btrfs_compress_op[idx]->alloc_workspace();
834 if (IS_ERR(workspace)) {
835 atomic_dec(total_ws);
839 * Do not return the error but go back to waiting. There's a
840 * workspace preallocated for each type and the compression
841 * time is bounded so we get to a workspace eventually. This
842 * makes our caller's life easier.
844 * To prevent silent and low-probability deadlocks (when the
845 * initial preallocation fails), check if there are any
848 if (atomic_read(total_ws) == 0) {
849 static DEFINE_RATELIMIT_STATE(_rs,
850 /* once per minute */ 60 * HZ,
853 if (__ratelimit(&_rs)) {
854 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
863 * put a workspace struct back on the list or free it if we have enough
864 * idle ones sitting around
866 static void free_workspace(int type, struct list_head *workspace)
869 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
870 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
871 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
872 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
873 int *free_ws = &btrfs_comp_ws[idx].free_ws;
876 if (*free_ws < num_online_cpus()) {
877 list_add(workspace, idle_ws);
879 spin_unlock(ws_lock);
882 spin_unlock(ws_lock);
884 btrfs_compress_op[idx]->free_workspace(workspace);
885 atomic_dec(total_ws);
888 * Make sure counter is updated before we wake up waiters.
891 if (waitqueue_active(ws_wait))
896 * cleanup function for module exit
898 static void free_workspaces(void)
900 struct list_head *workspace;
903 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
904 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
905 workspace = btrfs_comp_ws[i].idle_ws.next;
907 btrfs_compress_op[i]->free_workspace(workspace);
908 atomic_dec(&btrfs_comp_ws[i].total_ws);
914 * Given an address space and start and length, compress the bytes into @pages
915 * that are allocated on demand.
917 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
918 * and returns number of actually allocated pages
920 * @total_in is used to return the number of bytes actually read. It
921 * may be smaller than the input length if we had to exit early because we
922 * ran out of room in the pages array or because we cross the
925 * @total_out is an in/out parameter, must be set to the input length and will
926 * be also used to return the total number of compressed bytes
928 * @max_out tells us the max number of bytes that we're allowed to
931 int btrfs_compress_pages(int type, struct address_space *mapping,
932 u64 start, struct page **pages,
933 unsigned long *out_pages,
934 unsigned long *total_in,
935 unsigned long *total_out)
937 struct list_head *workspace;
940 workspace = find_workspace(type);
942 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
945 total_in, total_out);
946 free_workspace(type, workspace);
951 * pages_in is an array of pages with compressed data.
953 * disk_start is the starting logical offset of this array in the file
955 * orig_bio contains the pages from the file that we want to decompress into
957 * srclen is the number of bytes in pages_in
959 * The basic idea is that we have a bio that was created by readpages.
960 * The pages in the bio are for the uncompressed data, and they may not
961 * be contiguous. They all correspond to the range of bytes covered by
962 * the compressed extent.
964 static int btrfs_decompress_bio(int type, struct page **pages_in,
965 u64 disk_start, struct bio *orig_bio,
968 struct list_head *workspace;
971 workspace = find_workspace(type);
973 ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
974 disk_start, orig_bio,
976 free_workspace(type, workspace);
981 * a less complex decompression routine. Our compressed data fits in a
982 * single page, and we want to read a single page out of it.
983 * start_byte tells us the offset into the compressed data we're interested in
985 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
986 unsigned long start_byte, size_t srclen, size_t destlen)
988 struct list_head *workspace;
991 workspace = find_workspace(type);
993 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
994 dest_page, start_byte,
997 free_workspace(type, workspace);
1001 void btrfs_exit_compress(void)
1007 * Copy uncompressed data from working buffer to pages.
1009 * buf_start is the byte offset we're of the start of our workspace buffer.
1011 * total_out is the last byte of the buffer
1013 int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1014 unsigned long total_out, u64 disk_start,
1017 unsigned long buf_offset;
1018 unsigned long current_buf_start;
1019 unsigned long start_byte;
1020 unsigned long prev_start_byte;
1021 unsigned long working_bytes = total_out - buf_start;
1022 unsigned long bytes;
1024 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1027 * start byte is the first byte of the page we're currently
1028 * copying into relative to the start of the compressed data.
1030 start_byte = page_offset(bvec.bv_page) - disk_start;
1032 /* we haven't yet hit data corresponding to this page */
1033 if (total_out <= start_byte)
1037 * the start of the data we care about is offset into
1038 * the middle of our working buffer
1040 if (total_out > start_byte && buf_start < start_byte) {
1041 buf_offset = start_byte - buf_start;
1042 working_bytes -= buf_offset;
1046 current_buf_start = buf_start;
1048 /* copy bytes from the working buffer into the pages */
1049 while (working_bytes > 0) {
1050 bytes = min_t(unsigned long, bvec.bv_len,
1051 PAGE_SIZE - buf_offset);
1052 bytes = min(bytes, working_bytes);
1054 kaddr = kmap_atomic(bvec.bv_page);
1055 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1056 kunmap_atomic(kaddr);
1057 flush_dcache_page(bvec.bv_page);
1059 buf_offset += bytes;
1060 working_bytes -= bytes;
1061 current_buf_start += bytes;
1063 /* check if we need to pick another page */
1064 bio_advance(bio, bytes);
1065 if (!bio->bi_iter.bi_size)
1067 bvec = bio_iter_iovec(bio, bio->bi_iter);
1068 prev_start_byte = start_byte;
1069 start_byte = page_offset(bvec.bv_page) - disk_start;
1072 * We need to make sure we're only adjusting
1073 * our offset into compression working buffer when
1074 * we're switching pages. Otherwise we can incorrectly
1075 * keep copying when we were actually done.
1077 if (start_byte != prev_start_byte) {
1079 * make sure our new page is covered by this
1082 if (total_out <= start_byte)
1086 * the next page in the biovec might not be adjacent
1087 * to the last page, but it might still be found
1088 * inside this working buffer. bump our offset pointer
1090 if (total_out > start_byte &&
1091 current_buf_start < start_byte) {
1092 buf_offset = start_byte - buf_start;
1093 working_bytes = total_out - start_byte;
1094 current_buf_start = buf_start + buf_offset;