1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 #include <linux/module.h>
11 #ifndef LZ4_DISTANCE_MAX /* history window size */
12 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
15 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
16 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
20 struct z_erofs_decompressor {
22 * if destpages have sparsed pages, fill them with bounce pages.
23 * it also check whether destpages indicate continuous physical memory.
25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
26 struct list_head *pagepool);
27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
32 module_param(use_vmap, bool, 0444);
33 MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)");
35 static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
36 struct list_head *pagepool)
38 const unsigned int nr =
39 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
40 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
41 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
42 BITS_PER_LONG)] = { 0 };
44 unsigned int i, j, top;
47 for (i = j = 0; i < nr; ++i, ++j) {
48 struct page *const page = rq->out[i];
51 if (j >= LZ4_MAX_DISTANCE_PAGES)
54 /* 'valid' bounced can only be tested after a complete round */
55 if (test_bit(j, bounced)) {
56 DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
57 DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
58 availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
62 __clear_bit(j, bounced);
64 if (kaddr + PAGE_SIZE == page_address(page))
69 kaddr = page_address(page);
74 __set_bit(j, bounced);
77 victim = availables[--top];
80 victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
81 if (unlikely(!victim))
83 victim->mapping = Z_EROFS_MAPPING_STAGING;
90 static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
91 u8 *src, unsigned int pageofs_in)
94 * if in-place decompression is ongoing, those decompressed
95 * pages should be copied in order to avoid being overlapped.
97 struct page **in = rq->in;
98 u8 *const tmp = erofs_get_pcpubuf(0);
100 unsigned int inlen = rq->inputsize - pageofs_in;
101 unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
103 while (tmpp < tmp + inlen) {
105 src = kmap_atomic(*in);
106 memcpy(tmpp, src + pageofs_in, count);
117 static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
119 unsigned int inputmargin, inlen;
121 bool copied, support_0padding;
124 if (rq->inputsize > PAGE_SIZE)
127 src = kmap_atomic(*rq->in);
129 support_0padding = false;
131 /* decompression inplace is only safe when 0padding is enabled */
132 if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) {
133 support_0padding = true;
135 while (!src[inputmargin & ~PAGE_MASK])
136 if (!(++inputmargin & ~PAGE_MASK))
139 if (inputmargin >= rq->inputsize) {
146 inlen = rq->inputsize - inputmargin;
147 if (rq->inplace_io) {
148 const uint oend = (rq->pageofs_out +
149 rq->outputsize) & ~PAGE_MASK;
150 const uint nr = PAGE_ALIGN(rq->pageofs_out +
151 rq->outputsize) >> PAGE_SHIFT;
153 if (rq->partial_decoding || !support_0padding ||
154 rq->out[nr - 1] != rq->in[0] ||
155 rq->inputsize - oend <
156 LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
157 src = generic_copy_inplace_data(rq, src, inputmargin);
163 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
164 inlen, rq->outputsize,
167 errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]",
168 __func__, src + inputmargin, inlen, inputmargin,
169 out, rq->outputsize);
171 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
172 16, 1, src + inputmargin, inlen, true);
173 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
174 16, 1, out, rq->outputsize, true);
179 erofs_put_pcpubuf(src);
185 static struct z_erofs_decompressor decompressors[] = {
186 [Z_EROFS_COMPRESSION_SHIFTED] = {
189 [Z_EROFS_COMPRESSION_LZ4] = {
190 .prepare_destpages = lz4_prepare_destpages,
191 .decompress = lz4_decompress,
196 static void copy_from_pcpubuf(struct page **out, const char *dst,
197 unsigned short pageofs_out,
198 unsigned int outputsize)
200 const char *end = dst + outputsize;
201 const unsigned int righthalf = PAGE_SIZE - pageofs_out;
202 const char *cur = dst - pageofs_out;
205 struct page *const page = *out++;
208 char *buf = kmap_atomic(page);
211 memcpy(buf, cur, min_t(uint, PAGE_SIZE,
214 memcpy(buf + pageofs_out, cur + pageofs_out,
215 min_t(uint, righthalf, end - cur));
223 static void *erofs_vmap(struct page **pages, unsigned int count)
228 return vmap(pages, count, VM_MAP, PAGE_KERNEL);
231 void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
233 /* retry two more times (totally 3 times) */
234 if (addr || ++i >= 3)
241 static void erofs_vunmap(const void *mem, unsigned int count)
244 vm_unmap_ram(mem, count);
249 static int decompress_generic(struct z_erofs_decompress_req *rq,
250 struct list_head *pagepool)
252 const unsigned int nrpages_out =
253 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
254 const struct z_erofs_decompressor *alg = decompressors + rq->alg;
255 unsigned int dst_maptype;
259 if (nrpages_out == 1 && !rq->inplace_io) {
260 DBG_BUGON(!*rq->out);
261 dst = kmap_atomic(*rq->out);
267 * For the case of small output size (especially much less
268 * than PAGE_SIZE), memcpy the decompressed data rather than
269 * compressed data is preferred.
271 if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
272 dst = erofs_get_pcpubuf(0);
276 rq->inplace_io = false;
277 ret = alg->decompress(rq, dst);
279 copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
282 erofs_put_pcpubuf(dst);
286 ret = alg->prepare_destpages(rq, pagepool);
290 dst = page_address(*rq->out);
295 dst = erofs_vmap(rq->out, nrpages_out);
301 ret = alg->decompress(rq, dst + rq->pageofs_out);
305 else if (dst_maptype == 2)
306 erofs_vunmap(dst, nrpages_out);
310 static int shifted_decompress(const struct z_erofs_decompress_req *rq,
311 struct list_head *pagepool)
313 const unsigned int nrpages_out =
314 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
315 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
316 unsigned char *src, *dst;
318 if (nrpages_out > 2) {
323 if (rq->out[0] == *rq->in) {
324 DBG_BUGON(nrpages_out != 1);
328 src = kmap_atomic(*rq->in);
332 dst = kmap_atomic(rq->out[0]);
333 memcpy(dst + rq->pageofs_out, src, righthalf);
336 if (rq->out[1] == *rq->in) {
337 memmove(src, src + righthalf, rq->pageofs_out);
338 } else if (nrpages_out == 2) {
341 DBG_BUGON(!rq->out[1]);
342 dst = kmap_atomic(rq->out[1]);
343 memcpy(dst, src + righthalf, rq->pageofs_out);
351 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
352 struct list_head *pagepool)
354 if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
355 return shifted_decompress(rq, pagepool);
356 return decompress_generic(rq, pagepool);