]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/erofs/data.c
staging: rtl8192u: fix spacing in ieee80211
[linux.git] / drivers / staging / erofs / data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/drivers/staging/erofs/data.c
4  *
5  * Copyright (C) 2017-2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  */
9 #include "internal.h"
10 #include <linux/prefetch.h>
11
12 #include <trace/events/erofs.h>
13
14 static inline void read_endio(struct bio *bio)
15 {
16         struct super_block *const sb = bio->bi_private;
17         struct bio_vec *bvec;
18         blk_status_t err = bio->bi_status;
19         struct bvec_iter_all iter_all;
20
21         if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
22                 erofs_show_injection_info(FAULT_READ_IO);
23                 err = BLK_STS_IOERR;
24         }
25
26         bio_for_each_segment_all(bvec, bio, iter_all) {
27                 struct page *page = bvec->bv_page;
28
29                 /* page is already locked */
30                 DBG_BUGON(PageUptodate(page));
31
32                 if (unlikely(err))
33                         SetPageError(page);
34                 else
35                         SetPageUptodate(page);
36
37                 unlock_page(page);
38                 /* page could be reclaimed now */
39         }
40         bio_put(bio);
41 }
42
43 /* prio -- true is used for dir */
44 struct page *__erofs_get_meta_page(struct super_block *sb,
45                                    erofs_blk_t blkaddr, bool prio, bool nofail)
46 {
47         struct inode *const bd_inode = sb->s_bdev->bd_inode;
48         struct address_space *const mapping = bd_inode->i_mapping;
49         /* prefer retrying in the allocator to blindly looping below */
50         const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
51                 (nofail ? __GFP_NOFAIL : 0);
52         unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
53         struct page *page;
54         int err;
55
56 repeat:
57         page = find_or_create_page(mapping, blkaddr, gfp);
58         if (unlikely(!page)) {
59                 DBG_BUGON(nofail);
60                 return ERR_PTR(-ENOMEM);
61         }
62         DBG_BUGON(!PageLocked(page));
63
64         if (!PageUptodate(page)) {
65                 struct bio *bio;
66
67                 bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
68                 if (IS_ERR(bio)) {
69                         DBG_BUGON(nofail);
70                         err = PTR_ERR(bio);
71                         goto err_out;
72                 }
73
74                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
75                 if (unlikely(err != PAGE_SIZE)) {
76                         err = -EFAULT;
77                         goto err_out;
78                 }
79
80                 __submit_bio(bio, REQ_OP_READ,
81                              REQ_META | (prio ? REQ_PRIO : 0));
82
83                 lock_page(page);
84
85                 /* this page has been truncated by others */
86                 if (unlikely(page->mapping != mapping)) {
87 unlock_repeat:
88                         unlock_page(page);
89                         put_page(page);
90                         goto repeat;
91                 }
92
93                 /* more likely a read error */
94                 if (unlikely(!PageUptodate(page))) {
95                         if (io_retries) {
96                                 --io_retries;
97                                 goto unlock_repeat;
98                         }
99                         err = -EIO;
100                         goto err_out;
101                 }
102         }
103         return page;
104
105 err_out:
106         unlock_page(page);
107         put_page(page);
108         return ERR_PTR(err);
109 }
110
111 static int erofs_map_blocks_flatmode(struct inode *inode,
112                                      struct erofs_map_blocks *map,
113                                      int flags)
114 {
115         int err = 0;
116         erofs_blk_t nblocks, lastblk;
117         u64 offset = map->m_la;
118         struct erofs_vnode *vi = EROFS_V(inode);
119
120         trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
121
122         nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
123         lastblk = nblocks - is_inode_flat_inline(inode);
124
125         if (unlikely(offset >= inode->i_size)) {
126                 /* leave out-of-bound access unmapped */
127                 map->m_flags = 0;
128                 map->m_plen = 0;
129                 goto out;
130         }
131
132         /* there is no hole in flatmode */
133         map->m_flags = EROFS_MAP_MAPPED;
134
135         if (offset < blknr_to_addr(lastblk)) {
136                 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
137                 map->m_plen = blknr_to_addr(lastblk) - offset;
138         } else if (is_inode_flat_inline(inode)) {
139                 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
140                 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
141
142                 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
143                         vi->xattr_isize + erofs_blkoff(map->m_la);
144                 map->m_plen = inode->i_size - offset;
145
146                 /* inline data should be located in one meta block */
147                 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
148                         errln("inline data cross block boundary @ nid %llu",
149                               vi->nid);
150                         DBG_BUGON(1);
151                         err = -EFSCORRUPTED;
152                         goto err_out;
153                 }
154
155                 map->m_flags |= EROFS_MAP_META;
156         } else {
157                 errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
158                       vi->nid, inode->i_size, map->m_la);
159                 DBG_BUGON(1);
160                 err = -EIO;
161                 goto err_out;
162         }
163
164 out:
165         map->m_llen = map->m_plen;
166
167 err_out:
168         trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
169         return err;
170 }
171
172 int erofs_map_blocks(struct inode *inode,
173                      struct erofs_map_blocks *map, int flags)
174 {
175         if (unlikely(is_inode_layout_compression(inode))) {
176                 int err = z_erofs_map_blocks_iter(inode, map, flags);
177
178                 if (map->mpage) {
179                         put_page(map->mpage);
180                         map->mpage = NULL;
181                 }
182                 return err;
183         }
184         return erofs_map_blocks_flatmode(inode, map, flags);
185 }
186
187 static inline struct bio *erofs_read_raw_page(struct bio *bio,
188                                               struct address_space *mapping,
189                                               struct page *page,
190                                               erofs_off_t *last_block,
191                                               unsigned int nblocks,
192                                               bool ra)
193 {
194         struct inode *const inode = mapping->host;
195         struct super_block *const sb = inode->i_sb;
196         erofs_off_t current_block = (erofs_off_t)page->index;
197         int err;
198
199         DBG_BUGON(!nblocks);
200
201         if (PageUptodate(page)) {
202                 err = 0;
203                 goto has_updated;
204         }
205
206         /* note that for readpage case, bio also equals to NULL */
207         if (bio &&
208             /* not continuous */
209             *last_block + 1 != current_block) {
210 submit_bio_retry:
211                 __submit_bio(bio, REQ_OP_READ, 0);
212                 bio = NULL;
213         }
214
215         if (!bio) {
216                 struct erofs_map_blocks map = {
217                         .m_la = blknr_to_addr(current_block),
218                 };
219                 erofs_blk_t blknr;
220                 unsigned int blkoff;
221
222                 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
223                 if (unlikely(err))
224                         goto err_out;
225
226                 /* zero out the holed page */
227                 if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
228                         zero_user_segment(page, 0, PAGE_SIZE);
229                         SetPageUptodate(page);
230
231                         /* imply err = 0, see erofs_map_blocks */
232                         goto has_updated;
233                 }
234
235                 /* for RAW access mode, m_plen must be equal to m_llen */
236                 DBG_BUGON(map.m_plen != map.m_llen);
237
238                 blknr = erofs_blknr(map.m_pa);
239                 blkoff = erofs_blkoff(map.m_pa);
240
241                 /* deal with inline page */
242                 if (map.m_flags & EROFS_MAP_META) {
243                         void *vsrc, *vto;
244                         struct page *ipage;
245
246                         DBG_BUGON(map.m_plen > PAGE_SIZE);
247
248                         ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
249
250                         if (IS_ERR(ipage)) {
251                                 err = PTR_ERR(ipage);
252                                 goto err_out;
253                         }
254
255                         vsrc = kmap_atomic(ipage);
256                         vto = kmap_atomic(page);
257                         memcpy(vto, vsrc + blkoff, map.m_plen);
258                         memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
259                         kunmap_atomic(vto);
260                         kunmap_atomic(vsrc);
261                         flush_dcache_page(page);
262
263                         SetPageUptodate(page);
264                         /* TODO: could we unlock the page earlier? */
265                         unlock_page(ipage);
266                         put_page(ipage);
267
268                         /* imply err = 0, see erofs_map_blocks */
269                         goto has_updated;
270                 }
271
272                 /* pa must be block-aligned for raw reading */
273                 DBG_BUGON(erofs_blkoff(map.m_pa));
274
275                 /* max # of continuous pages */
276                 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
277                         nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
278                 if (nblocks > BIO_MAX_PAGES)
279                         nblocks = BIO_MAX_PAGES;
280
281                 bio = erofs_grab_bio(sb, blknr, nblocks, sb,
282                                      read_endio, false);
283                 if (IS_ERR(bio)) {
284                         err = PTR_ERR(bio);
285                         bio = NULL;
286                         goto err_out;
287                 }
288         }
289
290         err = bio_add_page(bio, page, PAGE_SIZE, 0);
291         /* out of the extent or bio is full */
292         if (err < PAGE_SIZE)
293                 goto submit_bio_retry;
294
295         *last_block = current_block;
296
297         /* shift in advance in case of it followed by too many gaps */
298         if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
299                 /* err should reassign to 0 after submitting */
300                 err = 0;
301                 goto submit_bio_out;
302         }
303
304         return bio;
305
306 err_out:
307         /* for sync reading, set page error immediately */
308         if (!ra) {
309                 SetPageError(page);
310                 ClearPageUptodate(page);
311         }
312 has_updated:
313         unlock_page(page);
314
315         /* if updated manually, continuous pages has a gap */
316         if (bio)
317 submit_bio_out:
318                 __submit_bio(bio, REQ_OP_READ, 0);
319
320         return unlikely(err) ? ERR_PTR(err) : NULL;
321 }
322
323 /*
324  * since we dont have write or truncate flows, so no inode
325  * locking needs to be held at the moment.
326  */
327 static int erofs_raw_access_readpage(struct file *file, struct page *page)
328 {
329         erofs_off_t last_block;
330         struct bio *bio;
331
332         trace_erofs_readpage(page, true);
333
334         bio = erofs_read_raw_page(NULL, page->mapping,
335                                   page, &last_block, 1, false);
336
337         if (IS_ERR(bio))
338                 return PTR_ERR(bio);
339
340         DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
341         return 0;
342 }
343
344 static int erofs_raw_access_readpages(struct file *filp,
345                                       struct address_space *mapping,
346                                       struct list_head *pages,
347                                       unsigned int nr_pages)
348 {
349         erofs_off_t last_block;
350         struct bio *bio = NULL;
351         gfp_t gfp = readahead_gfp_mask(mapping);
352         struct page *page = list_last_entry(pages, struct page, lru);
353
354         trace_erofs_readpages(mapping->host, page, nr_pages, true);
355
356         for (; nr_pages; --nr_pages) {
357                 page = list_entry(pages->prev, struct page, lru);
358
359                 prefetchw(&page->flags);
360                 list_del(&page->lru);
361
362                 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
363                         bio = erofs_read_raw_page(bio, mapping, page,
364                                                   &last_block, nr_pages, true);
365
366                         /* all the page errors are ignored when readahead */
367                         if (IS_ERR(bio)) {
368                                 pr_err("%s, readahead error at page %lu of nid %llu\n",
369                                        __func__, page->index,
370                                        EROFS_V(mapping->host)->nid);
371
372                                 bio = NULL;
373                         }
374                 }
375
376                 /* pages could still be locked */
377                 put_page(page);
378         }
379         DBG_BUGON(!list_empty(pages));
380
381         /* the rare case (end in gaps) */
382         if (unlikely(bio))
383                 __submit_bio(bio, REQ_OP_READ, 0);
384         return 0;
385 }
386
387 static int erofs_get_block(struct inode *inode, sector_t iblock,
388                            struct buffer_head *bh, int create)
389 {
390         struct erofs_map_blocks map = {
391                 .m_la = iblock << 9,
392         };
393         int err;
394
395         err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
396         if (err)
397                 return err;
398
399         if (map.m_flags & EROFS_MAP_MAPPED)
400                 bh->b_blocknr = erofs_blknr(map.m_pa);
401
402         return err;
403 }
404
405 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
406 {
407         struct inode *inode = mapping->host;
408
409         if (is_inode_flat_inline(inode)) {
410                 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
411
412                 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
413                         return 0;
414         }
415
416         return generic_block_bmap(mapping, block, erofs_get_block);
417 }
418
419 /* for uncompressed (aligned) files and raw access for other files */
420 const struct address_space_operations erofs_raw_access_aops = {
421         .readpage = erofs_raw_access_readpage,
422         .readpages = erofs_raw_access_readpages,
423         .bmap = erofs_bmap,
424 };
425