2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 * Based on jffs2 zlib code:
19 * Copyright © 2001-2007 Red Hat, Inc.
20 * Created by David Woodhouse <dwmw2@infradead.org>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/zlib.h>
26 #include <linux/zutil.h>
27 #include <linux/vmalloc.h>
28 #include <linux/init.h>
29 #include <linux/err.h>
30 #include <linux/sched.h>
31 #include <linux/pagemap.h>
32 #include <linux/bio.h>
33 #include "compression.h"
38 struct list_head list;
41 static void zlib_free_workspace(struct list_head *ws)
43 struct workspace *workspace = list_entry(ws, struct workspace, list);
45 vfree(workspace->strm.workspace);
46 kfree(workspace->buf);
50 static struct list_head *zlib_alloc_workspace(void)
52 struct workspace *workspace;
55 workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
57 return ERR_PTR(-ENOMEM);
59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
60 zlib_inflate_workspacesize());
61 workspace->strm.workspace = vmalloc(workspacesize);
62 workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
63 if (!workspace->strm.workspace || !workspace->buf)
66 INIT_LIST_HEAD(&workspace->list);
68 return &workspace->list;
70 zlib_free_workspace(&workspace->list);
71 return ERR_PTR(-ENOMEM);
74 static int zlib_compress_pages(struct list_head *ws,
75 struct address_space *mapping,
76 u64 start, unsigned long len,
78 unsigned long nr_dest_pages,
79 unsigned long *out_pages,
80 unsigned long *total_in,
81 unsigned long *total_out,
82 unsigned long max_out)
84 struct workspace *workspace = list_entry(ws, struct workspace, list);
89 struct page *in_page = NULL;
90 struct page *out_page = NULL;
91 unsigned long bytes_left;
97 if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
98 pr_warn("BTRFS: deflateInit failed\n");
103 workspace->strm.total_in = 0;
104 workspace->strm.total_out = 0;
106 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
107 data_in = kmap(in_page);
109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
110 if (out_page == NULL) {
114 cpage_out = kmap(out_page);
118 workspace->strm.next_in = data_in;
119 workspace->strm.next_out = cpage_out;
120 workspace->strm.avail_out = PAGE_SIZE;
121 workspace->strm.avail_in = min(len, PAGE_SIZE);
123 while (workspace->strm.total_in < len) {
124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
126 pr_debug("BTRFS: deflate in loop returned %d\n",
128 zlib_deflateEnd(&workspace->strm);
133 /* we're making it bigger, give up */
134 if (workspace->strm.total_in > 8192 &&
135 workspace->strm.total_in <
136 workspace->strm.total_out) {
140 /* we need another page for writing out. Test this
141 * before the total_in so we will pull in a new page for
142 * the stream end if required
144 if (workspace->strm.avail_out == 0) {
146 if (nr_pages == nr_dest_pages) {
151 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
152 if (out_page == NULL) {
156 cpage_out = kmap(out_page);
157 pages[nr_pages] = out_page;
159 workspace->strm.avail_out = PAGE_SIZE;
160 workspace->strm.next_out = cpage_out;
163 if (workspace->strm.total_in >= len)
166 /* we've read in a full page, get a new one */
167 if (workspace->strm.avail_in == 0) {
168 if (workspace->strm.total_out > max_out)
171 bytes_left = len - workspace->strm.total_in;
176 in_page = find_get_page(mapping,
177 start >> PAGE_SHIFT);
178 data_in = kmap(in_page);
179 workspace->strm.avail_in = min(bytes_left,
181 workspace->strm.next_in = data_in;
184 workspace->strm.avail_in = 0;
185 ret = zlib_deflate(&workspace->strm, Z_FINISH);
186 zlib_deflateEnd(&workspace->strm);
188 if (ret != Z_STREAM_END) {
193 if (workspace->strm.total_out >= workspace->strm.total_in) {
199 *total_out = workspace->strm.total_out;
200 *total_in = workspace->strm.total_in;
202 *out_pages = nr_pages;
213 static int zlib_decompress_bio(struct list_head *ws, struct page **pages_in,
215 struct bio *orig_bio,
218 struct workspace *workspace = list_entry(ws, struct workspace, list);
220 int wbits = MAX_WBITS;
222 size_t total_out = 0;
223 unsigned long page_in_index = 0;
224 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
225 unsigned long buf_start;
227 data_in = kmap(pages_in[page_in_index]);
228 workspace->strm.next_in = data_in;
229 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
230 workspace->strm.total_in = 0;
232 workspace->strm.total_out = 0;
233 workspace->strm.next_out = workspace->buf;
234 workspace->strm.avail_out = PAGE_SIZE;
236 /* If it's deflate, and it's got no preset dictionary, then
237 we can tell zlib to skip the adler32 check. */
238 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
239 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
240 !(((data_in[0]<<8) + data_in[1]) % 31)) {
242 wbits = -((data_in[0] >> 4) + 8);
243 workspace->strm.next_in += 2;
244 workspace->strm.avail_in -= 2;
247 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
248 pr_warn("BTRFS: inflateInit failed\n");
249 kunmap(pages_in[page_in_index]);
252 while (workspace->strm.total_in < srclen) {
253 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
254 if (ret != Z_OK && ret != Z_STREAM_END)
257 buf_start = total_out;
258 total_out = workspace->strm.total_out;
260 /* we didn't make progress in this inflate call, we're done */
261 if (buf_start == total_out)
264 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
265 total_out, disk_start,
272 workspace->strm.next_out = workspace->buf;
273 workspace->strm.avail_out = PAGE_SIZE;
275 if (workspace->strm.avail_in == 0) {
277 kunmap(pages_in[page_in_index]);
279 if (page_in_index >= total_pages_in) {
283 data_in = kmap(pages_in[page_in_index]);
284 workspace->strm.next_in = data_in;
285 tmp = srclen - workspace->strm.total_in;
286 workspace->strm.avail_in = min(tmp,
290 if (ret != Z_STREAM_END)
295 zlib_inflateEnd(&workspace->strm);
297 kunmap(pages_in[page_in_index]);
299 zero_fill_bio(orig_bio);
303 static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
304 struct page *dest_page,
305 unsigned long start_byte,
306 size_t srclen, size_t destlen)
308 struct workspace *workspace = list_entry(ws, struct workspace, list);
310 int wbits = MAX_WBITS;
311 unsigned long bytes_left;
312 unsigned long total_out = 0;
313 unsigned long pg_offset = 0;
316 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
317 bytes_left = destlen;
319 workspace->strm.next_in = data_in;
320 workspace->strm.avail_in = srclen;
321 workspace->strm.total_in = 0;
323 workspace->strm.next_out = workspace->buf;
324 workspace->strm.avail_out = PAGE_SIZE;
325 workspace->strm.total_out = 0;
326 /* If it's deflate, and it's got no preset dictionary, then
327 we can tell zlib to skip the adler32 check. */
328 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
329 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
330 !(((data_in[0]<<8) + data_in[1]) % 31)) {
332 wbits = -((data_in[0] >> 4) + 8);
333 workspace->strm.next_in += 2;
334 workspace->strm.avail_in -= 2;
337 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
338 pr_warn("BTRFS: inflateInit failed\n");
342 while (bytes_left > 0) {
343 unsigned long buf_start;
344 unsigned long buf_offset;
347 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
348 if (ret != Z_OK && ret != Z_STREAM_END)
351 buf_start = total_out;
352 total_out = workspace->strm.total_out;
354 if (total_out == buf_start) {
359 if (total_out <= start_byte)
362 if (total_out > start_byte && buf_start < start_byte)
363 buf_offset = start_byte - buf_start;
367 bytes = min(PAGE_SIZE - pg_offset,
368 PAGE_SIZE - buf_offset);
369 bytes = min(bytes, bytes_left);
371 kaddr = kmap_atomic(dest_page);
372 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
373 kunmap_atomic(kaddr);
378 workspace->strm.next_out = workspace->buf;
379 workspace->strm.avail_out = PAGE_SIZE;
382 if (ret != Z_STREAM_END && bytes_left != 0)
387 zlib_inflateEnd(&workspace->strm);
390 * this should only happen if zlib returned fewer bytes than we
391 * expected. btrfs_get_block is responsible for zeroing from the
392 * end of the inline extent (destlen) to the end of the page
394 if (pg_offset < destlen) {
395 kaddr = kmap_atomic(dest_page);
396 memset(kaddr + pg_offset, 0, destlen - pg_offset);
397 kunmap_atomic(kaddr);
402 const struct btrfs_compress_op btrfs_zlib_compress = {
403 .alloc_workspace = zlib_alloc_workspace,
404 .free_workspace = zlib_free_workspace,
405 .compress_pages = zlib_compress_pages,
406 .decompress_bio = zlib_decompress_bio,
407 .decompress = zlib_decompress,