1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/staging/erofs/data.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
10 #include <linux/prefetch.h>
12 #include <trace/events/erofs.h>
14 static inline void read_endio(struct bio *bio)
16 struct super_block *const sb = bio->bi_private;
18 blk_status_t err = bio->bi_status;
19 struct bvec_iter_all iter_all;
21 if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
22 erofs_show_injection_info(FAULT_READ_IO);
26 bio_for_each_segment_all(bvec, bio, iter_all) {
27 struct page *page = bvec->bv_page;
29 /* page is already locked */
30 DBG_BUGON(PageUptodate(page));
35 SetPageUptodate(page);
38 /* page could be reclaimed now */
43 /* prio -- true is used for dir */
44 struct page *__erofs_get_meta_page(struct super_block *sb,
45 erofs_blk_t blkaddr, bool prio, bool nofail)
47 struct inode *const bd_inode = sb->s_bdev->bd_inode;
48 struct address_space *const mapping = bd_inode->i_mapping;
49 /* prefer retrying in the allocator to blindly looping below */
50 const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
51 (nofail ? __GFP_NOFAIL : 0);
52 unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
57 page = find_or_create_page(mapping, blkaddr, gfp);
58 if (unlikely(!page)) {
60 return ERR_PTR(-ENOMEM);
62 DBG_BUGON(!PageLocked(page));
64 if (!PageUptodate(page)) {
67 bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
74 err = bio_add_page(bio, page, PAGE_SIZE, 0);
75 if (unlikely(err != PAGE_SIZE)) {
80 __submit_bio(bio, REQ_OP_READ,
81 REQ_META | (prio ? REQ_PRIO : 0));
85 /* this page has been truncated by others */
86 if (unlikely(page->mapping != mapping)) {
93 /* more likely a read error */
94 if (unlikely(!PageUptodate(page))) {
111 static int erofs_map_blocks_flatmode(struct inode *inode,
112 struct erofs_map_blocks *map,
116 erofs_blk_t nblocks, lastblk;
117 u64 offset = map->m_la;
118 struct erofs_vnode *vi = EROFS_V(inode);
120 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
122 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
123 lastblk = nblocks - is_inode_flat_inline(inode);
125 if (unlikely(offset >= inode->i_size)) {
126 /* leave out-of-bound access unmapped */
132 /* there is no hole in flatmode */
133 map->m_flags = EROFS_MAP_MAPPED;
135 if (offset < blknr_to_addr(lastblk)) {
136 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
137 map->m_plen = blknr_to_addr(lastblk) - offset;
138 } else if (is_inode_flat_inline(inode)) {
139 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
140 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
142 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
143 vi->xattr_isize + erofs_blkoff(map->m_la);
144 map->m_plen = inode->i_size - offset;
146 /* inline data should be located in one meta block */
147 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
148 errln("inline data cross block boundary @ nid %llu",
155 map->m_flags |= EROFS_MAP_META;
157 errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
158 vi->nid, inode->i_size, map->m_la);
165 map->m_llen = map->m_plen;
168 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
172 int erofs_map_blocks(struct inode *inode,
173 struct erofs_map_blocks *map, int flags)
175 if (unlikely(is_inode_layout_compression(inode))) {
176 int err = z_erofs_map_blocks_iter(inode, map, flags);
179 put_page(map->mpage);
184 return erofs_map_blocks_flatmode(inode, map, flags);
187 static inline struct bio *erofs_read_raw_page(struct bio *bio,
188 struct address_space *mapping,
190 erofs_off_t *last_block,
191 unsigned int nblocks,
194 struct inode *const inode = mapping->host;
195 struct super_block *const sb = inode->i_sb;
196 erofs_off_t current_block = (erofs_off_t)page->index;
201 if (PageUptodate(page)) {
206 /* note that for readpage case, bio also equals to NULL */
209 *last_block + 1 != current_block) {
211 __submit_bio(bio, REQ_OP_READ, 0);
216 struct erofs_map_blocks map = {
217 .m_la = blknr_to_addr(current_block),
222 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
226 /* zero out the holed page */
227 if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
228 zero_user_segment(page, 0, PAGE_SIZE);
229 SetPageUptodate(page);
231 /* imply err = 0, see erofs_map_blocks */
235 /* for RAW access mode, m_plen must be equal to m_llen */
236 DBG_BUGON(map.m_plen != map.m_llen);
238 blknr = erofs_blknr(map.m_pa);
239 blkoff = erofs_blkoff(map.m_pa);
241 /* deal with inline page */
242 if (map.m_flags & EROFS_MAP_META) {
246 DBG_BUGON(map.m_plen > PAGE_SIZE);
248 ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
251 err = PTR_ERR(ipage);
255 vsrc = kmap_atomic(ipage);
256 vto = kmap_atomic(page);
257 memcpy(vto, vsrc + blkoff, map.m_plen);
258 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
261 flush_dcache_page(page);
263 SetPageUptodate(page);
264 /* TODO: could we unlock the page earlier? */
268 /* imply err = 0, see erofs_map_blocks */
272 /* pa must be block-aligned for raw reading */
273 DBG_BUGON(erofs_blkoff(map.m_pa));
275 /* max # of continuous pages */
276 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
277 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
278 if (nblocks > BIO_MAX_PAGES)
279 nblocks = BIO_MAX_PAGES;
281 bio = erofs_grab_bio(sb, blknr, nblocks, sb,
290 err = bio_add_page(bio, page, PAGE_SIZE, 0);
291 /* out of the extent or bio is full */
293 goto submit_bio_retry;
295 *last_block = current_block;
297 /* shift in advance in case of it followed by too many gaps */
298 if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
299 /* err should reassign to 0 after submitting */
307 /* for sync reading, set page error immediately */
310 ClearPageUptodate(page);
315 /* if updated manually, continuous pages has a gap */
318 __submit_bio(bio, REQ_OP_READ, 0);
320 return unlikely(err) ? ERR_PTR(err) : NULL;
324 * since we dont have write or truncate flows, so no inode
325 * locking needs to be held at the moment.
327 static int erofs_raw_access_readpage(struct file *file, struct page *page)
329 erofs_off_t last_block;
332 trace_erofs_readpage(page, true);
334 bio = erofs_read_raw_page(NULL, page->mapping,
335 page, &last_block, 1, false);
340 DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
344 static int erofs_raw_access_readpages(struct file *filp,
345 struct address_space *mapping,
346 struct list_head *pages,
347 unsigned int nr_pages)
349 erofs_off_t last_block;
350 struct bio *bio = NULL;
351 gfp_t gfp = readahead_gfp_mask(mapping);
352 struct page *page = list_last_entry(pages, struct page, lru);
354 trace_erofs_readpages(mapping->host, page, nr_pages, true);
356 for (; nr_pages; --nr_pages) {
357 page = list_entry(pages->prev, struct page, lru);
359 prefetchw(&page->flags);
360 list_del(&page->lru);
362 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
363 bio = erofs_read_raw_page(bio, mapping, page,
364 &last_block, nr_pages, true);
366 /* all the page errors are ignored when readahead */
368 pr_err("%s, readahead error at page %lu of nid %llu\n",
369 __func__, page->index,
370 EROFS_V(mapping->host)->nid);
376 /* pages could still be locked */
379 DBG_BUGON(!list_empty(pages));
381 /* the rare case (end in gaps) */
383 __submit_bio(bio, REQ_OP_READ, 0);
387 static int erofs_get_block(struct inode *inode, sector_t iblock,
388 struct buffer_head *bh, int create)
390 struct erofs_map_blocks map = {
395 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
399 if (map.m_flags & EROFS_MAP_MAPPED)
400 bh->b_blocknr = erofs_blknr(map.m_pa);
405 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
407 struct inode *inode = mapping->host;
409 if (is_inode_flat_inline(inode)) {
410 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
412 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
416 return generic_block_bmap(mapping, block, erofs_get_block);
419 /* for uncompressed (aligned) files and raw access for other files */
420 const struct address_space_operations erofs_raw_access_aops = {
421 .readpage = erofs_raw_access_readpage,
422 .readpages = erofs_raw_access_readpages,