1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
30 #include "trace_gfs2.h"
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight; /* find_metapath height */
40 int mp_aheight; /* actual height (lookup height) */
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
48 * @dibh: the dinode buffer
49 * @block: the block number that was allocated
50 * @page: The (optional) page. This is looked up if @page is NULL
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
58 struct inode *inode = &ip->i_inode;
59 struct buffer_head *bh;
62 if (!page || page->index) {
63 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
69 if (!PageUptodate(page)) {
70 void *kaddr = kmap(page);
71 u64 dsize = i_size_read(inode);
73 if (dsize > gfs2_max_stuffed_size(ip))
74 dsize = gfs2_max_stuffed_size(ip);
76 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
77 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
80 SetPageUptodate(page);
83 if (!page_has_buffers(page))
84 create_empty_buffers(page, BIT(inode->i_blkbits),
87 bh = page_buffers(page);
89 if (!buffer_mapped(bh))
90 map_bh(bh, inode->i_sb, block);
92 set_buffer_uptodate(bh);
93 if (gfs2_is_jdata(ip))
94 gfs2_trans_add_data(ip->i_gl, bh);
96 mark_buffer_dirty(bh);
97 gfs2_ordered_add_inode(ip);
109 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
110 * @ip: The GFS2 inode to unstuff
111 * @page: The (optional) page. This is looked up if the @page is NULL
113 * This routine unstuffs a dinode and returns it to a "normal" state such
114 * that the height can be grown in the traditional way.
119 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
121 struct buffer_head *bh, *dibh;
122 struct gfs2_dinode *di;
124 int isdir = gfs2_is_dir(ip);
127 down_write(&ip->i_rw_mutex);
129 error = gfs2_meta_inode_buffer(ip, &dibh);
133 if (i_size_read(&ip->i_inode)) {
134 /* Get a free block, fill it with the stuffed data,
135 and write it out to disk */
138 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
142 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
143 error = gfs2_dir_get_new_buffer(ip, block, &bh);
146 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
147 dibh, sizeof(struct gfs2_dinode));
150 error = gfs2_unstuffer_page(ip, dibh, block, page);
156 /* Set up the pointer to the new block */
158 gfs2_trans_add_meta(ip->i_gl, dibh);
159 di = (struct gfs2_dinode *)dibh->b_data;
160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
162 if (i_size_read(&ip->i_inode)) {
163 *(__be64 *)(di + 1) = cpu_to_be64(block);
164 gfs2_add_inode_blocks(&ip->i_inode, 1);
165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
169 di->di_height = cpu_to_be16(1);
174 up_write(&ip->i_rw_mutex);
180 * find_metapath - Find path through the metadata tree
181 * @sdp: The superblock
182 * @block: The disk block to look up
183 * @mp: The metapath to return the result in
184 * @height: The pre-calculated height of the metadata tree
186 * This routine returns a struct metapath structure that defines a path
187 * through the metadata of inode "ip" to get to block "block".
190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
191 * filesystem with a blocksize of 4096.
193 * find_metapath() would return a struct metapath structure set to:
194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
202 * ----------------------------------------
207 * ----------------------------------------
211 * ----------------------------------------
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
220 * ----------------------------------------
225 * ----------------------------------------
229 * ----------------------------------------
230 * | Data block containing offset |
234 * ----------------------------------------
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
243 mp->mp_fheight = height;
244 for (i = height; i--;)
245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
250 if (mp->mp_list[0] == 0)
256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257 * @height: The metadata height (0 = dinode)
260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
262 struct buffer_head *bh = mp->mp_bh[height];
264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
269 * metapointer - Return pointer to start of metadata in a buffer
270 * @height: The metadata height (0 = dinode)
273 * Return a pointer to the block number of the next height of the metadata
274 * tree given a buffer containing the pointer to the current height of the
278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
280 __be64 *p = metaptr1(height, mp);
281 return p + mp->mp_list[height];
284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
286 const struct buffer_head *bh = mp->mp_bh[height];
287 return (const __be64 *)(bh->b_data + bh->b_size);
290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
295 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 get_bh(clone->mp_bh[hgt]);
299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
303 for (t = start; t < end; t++) {
304 struct buffer_head *rabh;
309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 if (trylock_buffer(rabh)) {
311 if (!buffer_uptodate(rabh)) {
312 rabh->b_end_io = end_buffer_read_sync;
313 submit_bh(REQ_OP_READ,
314 REQ_RAHEAD | REQ_META | REQ_PRIO,
324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 unsigned int x, unsigned int h)
328 __be64 *ptr = metapointer(x, mp);
329 u64 dblock = be64_to_cpu(*ptr);
334 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
338 mp->mp_aheight = x + 1;
343 * lookup_metapath - Walk the metadata tree to a specific point
347 * Assumes that the inode's buffer has already been looked up and
348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349 * by find_metapath().
351 * If this function encounters part of the tree which has not been
352 * allocated, it returns the current height of the tree at the point
353 * at which it found the unallocated block. Blocks which are found are
354 * added to the mp->mp_bh[] list.
359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
365 * fillup_metapath - fill up buffers for the metadata path to a specific height
368 * @h: The height to which it should be mapped
370 * Similar to lookup_metapath, but does lookups for a range of heights
372 * Returns: error or the number of buffers filled
375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
381 /* find the first buffer we need to look up. */
382 for (x = h - 1; x > 0; x--) {
387 ret = __fillup_metapath(ip, mp, x, h);
390 return mp->mp_aheight - x - 1;
393 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
395 sector_t factor = 1, block = 0;
398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
399 if (hgt < mp->mp_aheight)
400 block += mp->mp_list[hgt] * factor;
401 factor *= sdp->sd_inptrs;
406 static void release_metapath(struct metapath *mp)
410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
411 if (mp->mp_bh[i] == NULL)
413 brelse(mp->mp_bh[i]);
419 * gfs2_extent_length - Returns length of an extent of blocks
420 * @bh: The metadata block
421 * @ptr: Current position in @bh
422 * @limit: Max extent length to return
423 * @eob: Set to 1 if we hit "end of block"
425 * Returns: The length of the extent (minimum of one block)
428 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
431 const __be64 *first = ptr;
432 u64 d = be64_to_cpu(*ptr);
440 } while(be64_to_cpu(*ptr) == d);
446 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
449 * gfs2_metadata_walker - walk an indirect block
450 * @mp: Metapath to indirect block
451 * @ptrs: Number of pointers to look at
453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
454 * indirect block to follow.
456 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
460 * gfs2_walk_metadata - walk a tree of indirect blocks
462 * @mp: Starting point of walk
463 * @max_len: Maximum number of blocks to walk
464 * @walker: Called during the walk
466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
467 * past the end of metadata, and a negative error code otherwise.
470 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
471 u64 max_len, gfs2_metadata_walker walker)
473 struct gfs2_inode *ip = GFS2_I(inode);
474 struct gfs2_sbd *sdp = GFS2_SB(inode);
480 * The walk starts in the lowest allocated indirect block, which may be
481 * before the position indicated by @mp. Adjust @max_len accordingly
482 * to avoid a short walk.
484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
485 max_len += mp->mp_list[hgt] * factor;
486 mp->mp_list[hgt] = 0;
487 factor *= sdp->sd_inptrs;
491 u16 start = mp->mp_list[hgt];
492 enum walker_status status;
496 /* Walk indirect block. */
497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
500 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
501 status = walker(mp, ptrs);
506 BUG_ON(mp->mp_aheight == mp->mp_fheight);
507 ptrs = mp->mp_list[hgt] - start;
516 if (status == WALK_FOLLOW)
517 goto fill_up_metapath;
520 /* Decrease height of metapath. */
521 brelse(mp->mp_bh[hgt]);
522 mp->mp_bh[hgt] = NULL;
523 mp->mp_list[hgt] = 0;
527 factor *= sdp->sd_inptrs;
529 /* Advance in metadata tree. */
530 (mp->mp_list[hgt])++;
531 if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
538 /* Increase height of metapath. */
539 ret = fillup_metapath(ip, mp, ip->i_height - 1);
544 do_div(factor, sdp->sd_inptrs);
545 mp->mp_aheight = hgt + 1;
550 static enum walker_status gfs2_hole_walker(struct metapath *mp,
553 const __be64 *start, *ptr, *end;
556 hgt = mp->mp_aheight - 1;
557 start = metapointer(hgt, mp);
560 for (ptr = start; ptr < end; ptr++) {
562 mp->mp_list[hgt] += ptr - start;
563 if (mp->mp_aheight == mp->mp_fheight)
568 return WALK_CONTINUE;
572 * gfs2_hole_size - figure out the size of a hole
574 * @lblock: The logical starting block number
575 * @len: How far to look (in blocks)
576 * @mp: The metapath at lblock
577 * @iomap: The iomap to store the hole size in
579 * This function modifies @mp.
581 * Returns: errno on error
583 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
584 struct metapath *mp, struct iomap *iomap)
586 struct metapath clone;
590 clone_metapath(&clone, mp);
591 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
596 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
599 iomap->length = hole_size << inode->i_blkbits;
603 release_metapath(&clone);
607 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
608 struct gfs2_glock *gl, unsigned int i,
609 unsigned offset, u64 bn)
611 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
612 ((i > 1) ? sizeof(struct gfs2_meta_header) :
613 sizeof(struct gfs2_dinode)));
615 BUG_ON(mp->mp_bh[i] != NULL);
616 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
617 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
618 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
619 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
621 *ptr = cpu_to_be64(bn);
627 ALLOC_GROW_DEPTH = 1,
628 ALLOC_GROW_HEIGHT = 2,
629 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
633 * gfs2_iomap_alloc - Build a metadata tree of the requested height
634 * @inode: The GFS2 inode
635 * @iomap: The iomap structure
636 * @mp: The metapath, with proper height information calculated
638 * In this routine we may have to alloc:
639 * i) Indirect blocks to grow the metadata tree height
640 * ii) Indirect blocks to fill in lower part of the metadata tree
643 * This function is called after gfs2_iomap_get, which works out the
644 * total number of blocks which we need via gfs2_alloc_size.
646 * We then do the actual allocation asking for an extent at a time (if
647 * enough contiguous free blocks are available, there will only be one
648 * allocation request per call) and uses the state machine to initialise
649 * the blocks in order.
651 * Right now, this function will allocate at most one indirect block
652 * worth of data -- with a default block size of 4K, that's slightly
653 * less than 2M. If this limitation is ever removed to allow huge
654 * allocations, we would probably still want to limit the iomap size we
655 * return to avoid stalling other tasks during huge writes; the next
656 * iomap iteration would then find the blocks already allocated.
658 * Returns: errno on error
661 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
664 struct gfs2_inode *ip = GFS2_I(inode);
665 struct gfs2_sbd *sdp = GFS2_SB(inode);
666 struct buffer_head *dibh = mp->mp_bh[0];
668 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
669 size_t dblks = iomap->length >> inode->i_blkbits;
670 const unsigned end_of_metadata = mp->mp_fheight - 1;
672 enum alloc_state state;
676 BUG_ON(mp->mp_aheight < 1);
677 BUG_ON(dibh == NULL);
680 gfs2_trans_add_meta(ip->i_gl, dibh);
682 down_write(&ip->i_rw_mutex);
684 if (mp->mp_fheight == mp->mp_aheight) {
685 /* Bottom indirect block exists */
688 /* Need to allocate indirect blocks */
689 if (mp->mp_fheight == ip->i_height) {
690 /* Writing into existing tree, extend tree down */
691 iblks = mp->mp_fheight - mp->mp_aheight;
692 state = ALLOC_GROW_DEPTH;
694 /* Building up tree height */
695 state = ALLOC_GROW_HEIGHT;
696 iblks = mp->mp_fheight - ip->i_height;
697 branch_start = metapath_branch_start(mp);
698 iblks += (mp->mp_fheight - branch_start);
702 /* start of the second part of the function (state machine) */
704 blks = dblks + iblks;
708 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
712 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
713 gfs2_trans_remove_revoke(sdp, bn, n);
715 /* Growing height of tree */
716 case ALLOC_GROW_HEIGHT:
718 ptr = (__be64 *)(dibh->b_data +
719 sizeof(struct gfs2_dinode));
722 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
724 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
725 if (i - 1 == mp->mp_fheight - ip->i_height) {
727 gfs2_buffer_copy_tail(mp->mp_bh[i],
728 sizeof(struct gfs2_meta_header),
729 dibh, sizeof(struct gfs2_dinode));
730 gfs2_buffer_clear_tail(dibh,
731 sizeof(struct gfs2_dinode) +
733 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
734 sizeof(struct gfs2_meta_header));
736 state = ALLOC_GROW_DEPTH;
737 for(i = branch_start; i < mp->mp_fheight; i++) {
738 if (mp->mp_bh[i] == NULL)
740 brelse(mp->mp_bh[i]);
747 /* fall through - To branching from existing tree */
748 case ALLOC_GROW_DEPTH:
749 if (i > 1 && i < mp->mp_fheight)
750 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
751 for (; i < mp->mp_fheight && n > 0; i++, n--)
752 gfs2_indirect_init(mp, ip->i_gl, i,
753 mp->mp_list[i-1], bn++);
754 if (i == mp->mp_fheight)
758 /* fall through - To tree complete, adding data blocks */
761 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
762 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
764 ptr = metapointer(end_of_metadata, mp);
765 iomap->addr = bn << inode->i_blkbits;
766 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
768 *ptr++ = cpu_to_be64(bn++);
771 } while (iomap->addr == IOMAP_NULL_ADDR);
773 iomap->type = IOMAP_MAPPED;
774 iomap->length = (u64)dblks << inode->i_blkbits;
775 ip->i_height = mp->mp_fheight;
776 gfs2_add_inode_blocks(&ip->i_inode, alloced);
777 gfs2_dinode_out(ip, dibh->b_data);
779 up_write(&ip->i_rw_mutex);
783 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
786 * gfs2_alloc_size - Compute the maximum allocation size
789 * @size: Requested size in blocks
791 * Compute the maximum size of the next allocation at @mp.
793 * Returns: size in blocks
795 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
797 struct gfs2_inode *ip = GFS2_I(inode);
798 struct gfs2_sbd *sdp = GFS2_SB(inode);
799 const __be64 *first, *ptr, *end;
802 * For writes to stuffed files, this function is called twice via
803 * gfs2_iomap_get, before and after unstuffing. The size we return the
804 * first time needs to be large enough to get the reservation and
805 * allocation sizes right. The size we return the second time must
806 * be exact or else gfs2_iomap_alloc won't do the right thing.
809 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
810 unsigned int maxsize = mp->mp_fheight > 1 ?
811 sdp->sd_inptrs : sdp->sd_diptrs;
812 maxsize -= mp->mp_list[mp->mp_fheight - 1];
818 first = metapointer(ip->i_height - 1, mp);
819 end = metaend(ip->i_height - 1, mp);
820 if (end - first > size)
822 for (ptr = first; ptr < end; ptr++) {
830 * gfs2_iomap_get - Map blocks from an inode to disk blocks
832 * @pos: Starting position in bytes
833 * @length: Length to map, in bytes
834 * @flags: iomap flags
835 * @iomap: The iomap structure
840 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
841 unsigned flags, struct iomap *iomap,
844 struct gfs2_inode *ip = GFS2_I(inode);
845 struct gfs2_sbd *sdp = GFS2_SB(inode);
846 loff_t size = i_size_read(inode);
849 sector_t lblock_stop;
853 struct buffer_head *dibh = NULL, *bh;
859 down_read(&ip->i_rw_mutex);
861 ret = gfs2_meta_inode_buffer(ip, &dibh);
866 if (gfs2_is_stuffed(ip)) {
867 if (flags & IOMAP_WRITE) {
868 loff_t max_size = gfs2_max_stuffed_size(ip);
870 if (pos + length > max_size)
872 iomap->length = max_size;
875 if (flags & IOMAP_REPORT) {
881 iomap->length = length;
885 iomap->length = size;
887 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
888 sizeof(struct gfs2_dinode);
889 iomap->type = IOMAP_INLINE;
890 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
895 lblock = pos >> inode->i_blkbits;
896 iomap->offset = lblock << inode->i_blkbits;
897 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
898 len = lblock_stop - lblock + 1;
899 iomap->length = len << inode->i_blkbits;
901 height = ip->i_height;
902 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
904 find_metapath(sdp, lblock, mp, height);
905 if (height > ip->i_height || gfs2_is_stuffed(ip))
908 ret = lookup_metapath(ip, mp);
912 if (mp->mp_aheight != ip->i_height)
915 ptr = metapointer(ip->i_height - 1, mp);
919 bh = mp->mp_bh[ip->i_height - 1];
920 len = gfs2_extent_length(bh, ptr, len, &eob);
922 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
923 iomap->length = len << inode->i_blkbits;
924 iomap->type = IOMAP_MAPPED;
925 iomap->flags |= IOMAP_F_MERGED;
927 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
930 iomap->bdev = inode->i_sb->s_bdev;
932 up_read(&ip->i_rw_mutex);
936 iomap->addr = IOMAP_NULL_ADDR;
937 iomap->type = IOMAP_HOLE;
938 if (flags & IOMAP_REPORT) {
941 else if (height == ip->i_height)
942 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
944 iomap->length = size - pos;
945 } else if (flags & IOMAP_WRITE) {
948 if (flags & IOMAP_DIRECT)
949 goto out; /* (see gfs2_file_direct_write) */
951 len = gfs2_alloc_size(inode, mp, len);
952 alloc_size = len << inode->i_blkbits;
953 if (alloc_size < iomap->length)
954 iomap->length = alloc_size;
956 if (pos < size && height == ip->i_height)
957 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
963 * gfs2_lblk_to_dblk - convert logical block to disk block
964 * @inode: the inode of the file we're mapping
965 * @lblock: the block relative to the start of the file
966 * @dblock: the returned dblock, if no error
968 * This function maps a single block from a file logical block (relative to
969 * the start of the file) to a file system absolute block using iomap.
971 * Returns: the absolute file system block, or an error
973 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
975 struct iomap iomap = { };
976 struct metapath mp = { .mp_aheight = 1, };
977 loff_t pos = (loff_t)lblock << inode->i_blkbits;
980 ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
981 release_metapath(&mp);
983 *dblock = iomap.addr >> inode->i_blkbits;
988 static int gfs2_write_lock(struct inode *inode)
990 struct gfs2_inode *ip = GFS2_I(inode);
991 struct gfs2_sbd *sdp = GFS2_SB(inode);
994 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
995 error = gfs2_glock_nq(&ip->i_gh);
998 if (&ip->i_inode == sdp->sd_rindex) {
999 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1001 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1002 GL_NOCACHE, &m_ip->i_gh);
1009 gfs2_glock_dq(&ip->i_gh);
1011 gfs2_holder_uninit(&ip->i_gh);
1015 static void gfs2_write_unlock(struct inode *inode)
1017 struct gfs2_inode *ip = GFS2_I(inode);
1018 struct gfs2_sbd *sdp = GFS2_SB(inode);
1020 if (&ip->i_inode == sdp->sd_rindex) {
1021 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1023 gfs2_glock_dq_uninit(&m_ip->i_gh);
1025 gfs2_glock_dq_uninit(&ip->i_gh);
1028 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1029 unsigned len, struct iomap *iomap)
1031 unsigned int blockmask = i_blocksize(inode) - 1;
1032 struct gfs2_sbd *sdp = GFS2_SB(inode);
1033 unsigned int blocks;
1035 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1036 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1039 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1040 unsigned copied, struct page *page,
1041 struct iomap *iomap)
1043 struct gfs2_trans *tr = current->journal_info;
1044 struct gfs2_inode *ip = GFS2_I(inode);
1045 struct gfs2_sbd *sdp = GFS2_SB(inode);
1047 if (page && !gfs2_is_stuffed(ip))
1048 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1050 if (tr->tr_num_buf_new)
1051 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1053 gfs2_trans_end(sdp);
1056 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1057 .page_prepare = gfs2_iomap_page_prepare,
1058 .page_done = gfs2_iomap_page_done,
1061 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1062 loff_t length, unsigned flags,
1063 struct iomap *iomap,
1064 struct metapath *mp)
1066 struct gfs2_inode *ip = GFS2_I(inode);
1067 struct gfs2_sbd *sdp = GFS2_SB(inode);
1071 unstuff = gfs2_is_stuffed(ip) &&
1072 pos + length > gfs2_max_stuffed_size(ip);
1074 if (unstuff || iomap->type == IOMAP_HOLE) {
1075 unsigned int data_blocks, ind_blocks;
1076 struct gfs2_alloc_parms ap = {};
1077 unsigned int rblocks;
1078 struct gfs2_trans *tr;
1080 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1082 ap.target = data_blocks + ind_blocks;
1083 ret = gfs2_quota_lock_check(ip, &ap);
1087 ret = gfs2_inplace_reserve(ip, &ap);
1091 rblocks = RES_DINODE + ind_blocks;
1092 if (gfs2_is_jdata(ip))
1093 rblocks += data_blocks;
1094 if (ind_blocks || data_blocks)
1095 rblocks += RES_STATFS + RES_QUOTA;
1096 if (inode == sdp->sd_rindex)
1097 rblocks += 2 * RES_STATFS;
1098 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1100 ret = gfs2_trans_begin(sdp, rblocks,
1101 iomap->length >> inode->i_blkbits);
1103 goto out_trans_fail;
1106 ret = gfs2_unstuff_dinode(ip, NULL);
1109 release_metapath(mp);
1110 ret = gfs2_iomap_get(inode, iomap->offset,
1111 iomap->length, flags, iomap, mp);
1116 if (iomap->type == IOMAP_HOLE) {
1117 ret = gfs2_iomap_alloc(inode, iomap, mp);
1119 gfs2_trans_end(sdp);
1120 gfs2_inplace_release(ip);
1121 punch_hole(ip, iomap->offset, iomap->length);
1126 tr = current->journal_info;
1127 if (tr->tr_num_buf_new)
1128 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1130 gfs2_trans_end(sdp);
1133 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1134 iomap->page_ops = &gfs2_iomap_page_ops;
1138 gfs2_trans_end(sdp);
1140 gfs2_inplace_release(ip);
1142 gfs2_quota_unlock(ip);
1146 static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1148 return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
1151 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1152 unsigned flags, struct iomap *iomap)
1154 struct gfs2_inode *ip = GFS2_I(inode);
1155 struct metapath mp = { .mp_aheight = 1, };
1158 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1160 trace_gfs2_iomap_start(ip, pos, length, flags);
1161 if (gfs2_iomap_need_write_lock(flags)) {
1162 ret = gfs2_write_lock(inode);
1167 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1171 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1173 if (flags & IOMAP_DIRECT) {
1175 * Silently fall back to buffered I/O for stuffed files
1176 * or if we've got a hole (see gfs2_file_direct_write).
1178 if (iomap->type != IOMAP_MAPPED)
1184 if (iomap->type == IOMAP_HOLE)
1191 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1194 if (ret && gfs2_iomap_need_write_lock(flags))
1195 gfs2_write_unlock(inode);
1196 release_metapath(&mp);
1198 trace_gfs2_iomap_end(ip, iomap, ret);
1202 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1203 ssize_t written, unsigned flags, struct iomap *iomap)
1205 struct gfs2_inode *ip = GFS2_I(inode);
1206 struct gfs2_sbd *sdp = GFS2_SB(inode);
1208 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1210 if (flags & IOMAP_DIRECT)
1214 if (iomap->type == IOMAP_HOLE)
1221 if (!gfs2_is_stuffed(ip))
1222 gfs2_ordered_add_inode(ip);
1224 if (inode == sdp->sd_rindex)
1225 adjust_fs_space(inode);
1227 gfs2_inplace_release(ip);
1229 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1230 /* Deallocate blocks that were just allocated. */
1231 loff_t blockmask = i_blocksize(inode) - 1;
1232 loff_t end = (pos + length) & ~blockmask;
1234 pos = (pos + written + blockmask) & ~blockmask;
1236 truncate_pagecache_range(inode, pos, end - 1);
1237 punch_hole(ip, pos, end - pos);
1241 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1242 gfs2_quota_unlock(ip);
1244 if (unlikely(!written))
1247 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1248 mark_inode_dirty(inode);
1249 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1252 if (gfs2_iomap_need_write_lock(flags))
1253 gfs2_write_unlock(inode);
1257 const struct iomap_ops gfs2_iomap_ops = {
1258 .iomap_begin = gfs2_iomap_begin,
1259 .iomap_end = gfs2_iomap_end,
1263 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1265 * @lblock: The logical block number
1266 * @bh_map: The bh to be mapped
1267 * @create: True if its ok to alloc blocks to satify the request
1269 * The size of the requested mapping is defined in bh_map->b_size.
1271 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1272 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1273 * bh_map->b_size to indicate the size of the mapping when @lblock and
1274 * successive blocks are mapped, up to the requested size.
1276 * Sets buffer_boundary() if a read of metadata will be required
1277 * before the next block can be mapped. Sets buffer_new() if new
1278 * blocks were allocated.
1283 int gfs2_block_map(struct inode *inode, sector_t lblock,
1284 struct buffer_head *bh_map, int create)
1286 struct gfs2_inode *ip = GFS2_I(inode);
1287 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1288 loff_t length = bh_map->b_size;
1289 struct metapath mp = { .mp_aheight = 1, };
1290 struct iomap iomap = { };
1293 clear_buffer_mapped(bh_map);
1294 clear_buffer_new(bh_map);
1295 clear_buffer_boundary(bh_map);
1296 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1299 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1300 if (!ret && iomap.type == IOMAP_HOLE)
1301 ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1302 release_metapath(&mp);
1304 ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1305 release_metapath(&mp);
1310 if (iomap.length > bh_map->b_size) {
1311 iomap.length = bh_map->b_size;
1312 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1314 if (iomap.addr != IOMAP_NULL_ADDR)
1315 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1316 bh_map->b_size = iomap.length;
1317 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1318 set_buffer_boundary(bh_map);
1319 if (iomap.flags & IOMAP_F_NEW)
1320 set_buffer_new(bh_map);
1323 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1328 * Deprecated: do not use in new code
1330 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1332 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1340 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1341 ret = gfs2_block_map(inode, lblock, &bh, create);
1342 *extlen = bh.b_size >> inode->i_blkbits;
1343 *dblock = bh.b_blocknr;
1344 if (buffer_new(&bh))
1351 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1352 unsigned int length)
1354 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1357 #define GFS2_JTRUNC_REVOKES 8192
1360 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1361 * @inode: The inode being truncated
1362 * @oldsize: The original (larger) size
1363 * @newsize: The new smaller size
1365 * With jdata files, we have to journal a revoke for each block which is
1366 * truncated. As a result, we need to split this into separate transactions
1367 * if the number of pages being truncated gets too large.
1370 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1372 struct gfs2_sbd *sdp = GFS2_SB(inode);
1373 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1377 while (oldsize != newsize) {
1378 struct gfs2_trans *tr;
1381 chunk = oldsize - newsize;
1382 if (chunk > max_chunk)
1385 offs = oldsize & ~PAGE_MASK;
1386 if (offs && chunk > PAGE_SIZE)
1387 chunk = offs + ((chunk - offs) & PAGE_MASK);
1389 truncate_pagecache(inode, oldsize - chunk);
1392 tr = current->journal_info;
1393 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1396 gfs2_trans_end(sdp);
1397 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1405 static int trunc_start(struct inode *inode, u64 newsize)
1407 struct gfs2_inode *ip = GFS2_I(inode);
1408 struct gfs2_sbd *sdp = GFS2_SB(inode);
1409 struct buffer_head *dibh = NULL;
1410 int journaled = gfs2_is_jdata(ip);
1411 u64 oldsize = inode->i_size;
1415 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1417 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1421 error = gfs2_meta_inode_buffer(ip, &dibh);
1425 gfs2_trans_add_meta(ip->i_gl, dibh);
1427 if (gfs2_is_stuffed(ip)) {
1428 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1430 unsigned int blocksize = i_blocksize(inode);
1431 unsigned int offs = newsize & (blocksize - 1);
1433 error = gfs2_block_zero_range(inode, newsize,
1438 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1441 i_size_write(inode, newsize);
1442 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1443 gfs2_dinode_out(ip, dibh->b_data);
1446 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1448 truncate_pagecache(inode, newsize);
1452 if (current->journal_info)
1453 gfs2_trans_end(sdp);
1457 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1458 struct iomap *iomap)
1460 struct metapath mp = { .mp_aheight = 1, };
1463 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1464 if (!ret && iomap->type == IOMAP_HOLE)
1465 ret = gfs2_iomap_alloc(inode, iomap, &mp);
1466 release_metapath(&mp);
1471 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1473 * @rg_gh: holder of resource group glock
1474 * @bh: buffer head to sweep
1475 * @start: starting point in bh
1476 * @end: end point in bh
1477 * @meta: true if bh points to metadata (rather than data)
1478 * @btotal: place to keep count of total blocks freed
1480 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1481 * free, and free them all. However, we do it one rgrp at a time. If this
1482 * block has references to multiple rgrps, we break it into individual
1483 * transactions. This allows other processes to use the rgrps while we're
1484 * focused on a single one, for better concurrency / performance.
1485 * At every transaction boundary, we rewrite the inode into the journal.
1486 * That way the bitmaps are kept consistent with the inode and we can recover
1487 * if we're interrupted by power-outages.
1489 * Returns: 0, or return code if an error occurred.
1490 * *btotal has the total number of blocks freed
1492 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1493 struct buffer_head *bh, __be64 *start, __be64 *end,
1494 bool meta, u32 *btotal)
1496 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1497 struct gfs2_rgrpd *rgd;
1498 struct gfs2_trans *tr;
1500 int blks_outside_rgrp;
1501 u64 bn, bstart, isize_blks;
1502 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1504 bool buf_in_tr = false; /* buffer was added to transaction */
1508 if (gfs2_holder_initialized(rd_gh)) {
1509 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1510 gfs2_assert_withdraw(sdp,
1511 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1513 blks_outside_rgrp = 0;
1517 for (p = start; p < end; p++) {
1520 bn = be64_to_cpu(*p);
1523 if (!rgrp_contains_block(rgd, bn)) {
1524 blks_outside_rgrp++;
1528 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1529 if (unlikely(!rgd)) {
1533 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1538 /* Must be done with the rgrp glock held: */
1539 if (gfs2_rs_active(&ip->i_res) &&
1540 rgd == ip->i_res.rs_rbm.rgd)
1541 gfs2_rs_deltree(&ip->i_res);
1544 /* The size of our transactions will be unknown until we
1545 actually process all the metadata blocks that relate to
1546 the rgrp. So we estimate. We know it can't be more than
1547 the dinode's i_blocks and we don't want to exceed the
1548 journal flush threshold, sd_log_thresh2. */
1549 if (current->journal_info == NULL) {
1550 unsigned int jblocks_rqsted, revokes;
1552 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1554 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1555 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1557 atomic_read(&sdp->sd_log_thresh2);
1559 jblocks_rqsted += isize_blks;
1560 revokes = jblocks_rqsted;
1562 revokes += end - start;
1563 else if (ip->i_depth)
1564 revokes += sdp->sd_inptrs;
1565 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1568 down_write(&ip->i_rw_mutex);
1570 /* check if we will exceed the transaction blocks requested */
1571 tr = current->journal_info;
1572 if (tr->tr_num_buf_new + RES_STATFS +
1573 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1574 /* We set blks_outside_rgrp to ensure the loop will
1575 be repeated for the same rgrp, but with a new
1577 blks_outside_rgrp++;
1578 /* This next part is tricky. If the buffer was added
1579 to the transaction, we've already set some block
1580 pointers to 0, so we better follow through and free
1581 them, or we will introduce corruption (so break).
1582 This may be impossible, or at least rare, but I
1583 decided to cover the case regardless.
1585 If the buffer was not added to the transaction
1586 (this call), doing so would exceed our transaction
1587 size, so we need to end the transaction and start a
1588 new one (so goto). */
1595 gfs2_trans_add_meta(ip->i_gl, bh);
1598 if (bstart + blen == bn) {
1603 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1605 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1611 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1613 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1616 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1617 outside the rgrp we just processed,
1618 do it all over again. */
1619 if (current->journal_info) {
1620 struct buffer_head *dibh;
1622 ret = gfs2_meta_inode_buffer(ip, &dibh);
1626 /* Every transaction boundary, we rewrite the dinode
1627 to keep its di_blocks current in case of failure. */
1628 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1629 current_time(&ip->i_inode);
1630 gfs2_trans_add_meta(ip->i_gl, dibh);
1631 gfs2_dinode_out(ip, dibh->b_data);
1633 up_write(&ip->i_rw_mutex);
1634 gfs2_trans_end(sdp);
1637 gfs2_glock_dq_uninit(rd_gh);
1645 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1647 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1653 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1654 * @mp: starting metapath
1655 * @h: desired height to search
1657 * Assumes the metapath is valid (with buffers) out to height h.
1658 * Returns: true if a non-null pointer was found in the metapath buffer
1659 * false if all remaining pointers are NULL in the buffer
1661 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1663 __u16 *end_list, unsigned int end_aligned)
1665 struct buffer_head *bh = mp->mp_bh[h];
1666 __be64 *first, *ptr, *end;
1668 first = metaptr1(h, mp);
1669 ptr = first + mp->mp_list[h];
1670 end = (__be64 *)(bh->b_data + bh->b_size);
1671 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1672 bool keep_end = h < end_aligned;
1673 end = first + end_list[h] + keep_end;
1677 if (*ptr) { /* if we have a non-null pointer */
1678 mp->mp_list[h] = ptr - first;
1680 if (h < GFS2_MAX_META_HEIGHT)
1689 enum dealloc_states {
1690 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1691 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1692 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1693 DEALLOC_DONE = 3, /* process complete */
1697 metapointer_range(struct metapath *mp, int height,
1698 __u16 *start_list, unsigned int start_aligned,
1699 __u16 *end_list, unsigned int end_aligned,
1700 __be64 **start, __be64 **end)
1702 struct buffer_head *bh = mp->mp_bh[height];
1705 first = metaptr1(height, mp);
1707 if (mp_eq_to_hgt(mp, start_list, height)) {
1708 bool keep_start = height < start_aligned;
1709 *start = first + start_list[height] + keep_start;
1711 *end = (__be64 *)(bh->b_data + bh->b_size);
1712 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1713 bool keep_end = height < end_aligned;
1714 *end = first + end_list[height] + keep_end;
1718 static inline bool walk_done(struct gfs2_sbd *sdp,
1719 struct metapath *mp, int height,
1720 __u16 *end_list, unsigned int end_aligned)
1725 bool keep_end = height < end_aligned;
1726 if (!mp_eq_to_hgt(mp, end_list, height))
1728 end = end_list[height] + keep_end;
1730 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1731 return mp->mp_list[height] >= end;
1735 * punch_hole - deallocate blocks in a file
1736 * @ip: inode to truncate
1737 * @offset: the start of the hole
1738 * @length: the size of the hole (or 0 for truncate)
1740 * Punch a hole into a file or truncate a file at a given position. This
1741 * function operates in whole blocks (@offset and @length are rounded
1742 * accordingly); partially filled blocks must be cleared otherwise.
1744 * This function works from the bottom up, and from the right to the left. In
1745 * other words, it strips off the highest layer (data) before stripping any of
1746 * the metadata. Doing it this way is best in case the operation is interrupted
1747 * by power failure, etc. The dinode is rewritten in every transaction to
1748 * guarantee integrity.
1750 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1752 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1753 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1754 struct metapath mp = {};
1755 struct buffer_head *dibh, *bh;
1756 struct gfs2_holder rd_gh;
1757 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1758 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1759 __u16 start_list[GFS2_MAX_META_HEIGHT];
1760 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1761 unsigned int start_aligned, uninitialized_var(end_aligned);
1762 unsigned int strip_h = ip->i_height - 1;
1765 int mp_h; /* metapath buffers are read in to this height */
1767 __be64 *start, *end;
1769 if (offset >= maxsize) {
1771 * The starting point lies beyond the allocated meta-data;
1772 * there are no blocks do deallocate.
1778 * The start position of the hole is defined by lblock, start_list, and
1779 * start_aligned. The end position of the hole is defined by lend,
1780 * end_list, and end_aligned.
1782 * start_aligned and end_aligned define down to which height the start
1783 * and end positions are aligned to the metadata tree (i.e., the
1784 * position is a multiple of the metadata granularity at the height
1785 * above). This determines at which heights additional meta pointers
1786 * needs to be preserved for the remaining data.
1790 u64 end_offset = offset + length;
1794 * Clip the end at the maximum file size for the given height:
1795 * that's how far the metadata goes; files bigger than that
1796 * will have additional layers of indirection.
1798 if (end_offset > maxsize)
1799 end_offset = maxsize;
1800 lend = end_offset >> bsize_shift;
1805 find_metapath(sdp, lend, &mp, ip->i_height);
1806 end_list = __end_list;
1807 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1809 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1816 find_metapath(sdp, lblock, &mp, ip->i_height);
1817 memcpy(start_list, mp.mp_list, sizeof(start_list));
1819 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1820 if (start_list[mp_h])
1823 start_aligned = mp_h;
1825 ret = gfs2_meta_inode_buffer(ip, &dibh);
1830 ret = lookup_metapath(ip, &mp);
1834 /* issue read-ahead on metadata */
1835 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1836 metapointer_range(&mp, mp_h, start_list, start_aligned,
1837 end_list, end_aligned, &start, &end);
1838 gfs2_metapath_ra(ip->i_gl, start, end);
1841 if (mp.mp_aheight == ip->i_height)
1842 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1844 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1846 ret = gfs2_rindex_update(sdp);
1850 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1853 gfs2_holder_mark_uninitialized(&rd_gh);
1857 while (state != DEALLOC_DONE) {
1859 /* Truncate a full metapath at the given strip height.
1860 * Note that strip_h == mp_h in order to be in this state. */
1861 case DEALLOC_MP_FULL:
1862 bh = mp.mp_bh[mp_h];
1863 gfs2_assert_withdraw(sdp, bh);
1864 if (gfs2_assert_withdraw(sdp,
1865 prev_bnr != bh->b_blocknr)) {
1866 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1867 "s_h:%u, mp_h:%u\n",
1868 (unsigned long long)ip->i_no_addr,
1869 prev_bnr, ip->i_height, strip_h, mp_h);
1871 prev_bnr = bh->b_blocknr;
1873 if (gfs2_metatype_check(sdp, bh,
1874 (mp_h ? GFS2_METATYPE_IN :
1875 GFS2_METATYPE_DI))) {
1881 * Below, passing end_aligned as 0 gives us the
1882 * metapointer range excluding the end point: the end
1883 * point is the first metapath we must not deallocate!
1886 metapointer_range(&mp, mp_h, start_list, start_aligned,
1887 end_list, 0 /* end_aligned */,
1889 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1891 mp_h != ip->i_height - 1,
1894 /* If we hit an error or just swept dinode buffer,
1897 state = DEALLOC_DONE;
1900 state = DEALLOC_MP_LOWER;
1903 /* lower the metapath strip height */
1904 case DEALLOC_MP_LOWER:
1905 /* We're done with the current buffer, so release it,
1906 unless it's the dinode buffer. Then back up to the
1907 previous pointer. */
1909 brelse(mp.mp_bh[mp_h]);
1910 mp.mp_bh[mp_h] = NULL;
1912 /* If we can't get any lower in height, we've stripped
1913 off all we can. Next step is to back up and start
1914 stripping the previous level of metadata. */
1917 memcpy(mp.mp_list, start_list, sizeof(start_list));
1919 state = DEALLOC_FILL_MP;
1922 mp.mp_list[mp_h] = 0;
1923 mp_h--; /* search one metadata height down */
1925 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1927 /* Here we've found a part of the metapath that is not
1928 * allocated. We need to search at that height for the
1929 * next non-null pointer. */
1930 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1931 state = DEALLOC_FILL_MP;
1934 /* No more non-null pointers at this height. Back up
1935 to the previous height and try again. */
1936 break; /* loop around in the same state */
1938 /* Fill the metapath with buffers to the given height. */
1939 case DEALLOC_FILL_MP:
1940 /* Fill the buffers out to the current height. */
1941 ret = fillup_metapath(ip, &mp, mp_h);
1945 /* On the first pass, issue read-ahead on metadata. */
1946 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1947 unsigned int height = mp.mp_aheight - 1;
1949 /* No read-ahead for data blocks. */
1950 if (mp.mp_aheight - 1 == strip_h)
1953 for (; height >= mp.mp_aheight - ret; height--) {
1954 metapointer_range(&mp, height,
1955 start_list, start_aligned,
1956 end_list, end_aligned,
1958 gfs2_metapath_ra(ip->i_gl, start, end);
1962 /* If buffers found for the entire strip height */
1963 if (mp.mp_aheight - 1 == strip_h) {
1964 state = DEALLOC_MP_FULL;
1967 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1968 mp_h = mp.mp_aheight - 1;
1970 /* If we find a non-null block pointer, crawl a bit
1971 higher up in the metapath and try again, otherwise
1972 we need to look lower for a new starting point. */
1973 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1976 state = DEALLOC_MP_LOWER;
1982 if (current->journal_info == NULL) {
1983 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1987 down_write(&ip->i_rw_mutex);
1989 gfs2_statfs_change(sdp, 0, +btotal, 0);
1990 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1992 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1993 gfs2_trans_add_meta(ip->i_gl, dibh);
1994 gfs2_dinode_out(ip, dibh->b_data);
1995 up_write(&ip->i_rw_mutex);
1996 gfs2_trans_end(sdp);
2000 if (gfs2_holder_initialized(&rd_gh))
2001 gfs2_glock_dq_uninit(&rd_gh);
2002 if (current->journal_info) {
2003 up_write(&ip->i_rw_mutex);
2004 gfs2_trans_end(sdp);
2007 gfs2_quota_unhold(ip);
2009 release_metapath(&mp);
2013 static int trunc_end(struct gfs2_inode *ip)
2015 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2016 struct buffer_head *dibh;
2019 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2023 down_write(&ip->i_rw_mutex);
2025 error = gfs2_meta_inode_buffer(ip, &dibh);
2029 if (!i_size_read(&ip->i_inode)) {
2031 ip->i_goal = ip->i_no_addr;
2032 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2033 gfs2_ordered_del_inode(ip);
2035 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2036 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2038 gfs2_trans_add_meta(ip->i_gl, dibh);
2039 gfs2_dinode_out(ip, dibh->b_data);
2043 up_write(&ip->i_rw_mutex);
2044 gfs2_trans_end(sdp);
2049 * do_shrink - make a file smaller
2051 * @newsize: the size to make the file
2053 * Called with an exclusive lock on @inode. The @size must
2054 * be equal to or smaller than the current inode size.
2059 static int do_shrink(struct inode *inode, u64 newsize)
2061 struct gfs2_inode *ip = GFS2_I(inode);
2064 error = trunc_start(inode, newsize);
2067 if (gfs2_is_stuffed(ip))
2070 error = punch_hole(ip, newsize, 0);
2072 error = trunc_end(ip);
2077 void gfs2_trim_blocks(struct inode *inode)
2081 ret = do_shrink(inode, inode->i_size);
2086 * do_grow - Touch and update inode size
2088 * @size: The new size
2090 * This function updates the timestamps on the inode and
2091 * may also increase the size of the inode. This function
2092 * must not be called with @size any smaller than the current
2095 * Although it is not strictly required to unstuff files here,
2096 * earlier versions of GFS2 have a bug in the stuffed file reading
2097 * code which will result in a buffer overrun if the size is larger
2098 * than the max stuffed file size. In order to prevent this from
2099 * occurring, such files are unstuffed, but in other cases we can
2100 * just update the inode size directly.
2102 * Returns: 0 on success, or -ve on error
2105 static int do_grow(struct inode *inode, u64 size)
2107 struct gfs2_inode *ip = GFS2_I(inode);
2108 struct gfs2_sbd *sdp = GFS2_SB(inode);
2109 struct gfs2_alloc_parms ap = { .target = 1, };
2110 struct buffer_head *dibh;
2114 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2115 error = gfs2_quota_lock_check(ip, &ap);
2119 error = gfs2_inplace_reserve(ip, &ap);
2121 goto do_grow_qunlock;
2125 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2127 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2128 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2131 goto do_grow_release;
2134 error = gfs2_unstuff_dinode(ip, NULL);
2139 error = gfs2_meta_inode_buffer(ip, &dibh);
2143 truncate_setsize(inode, size);
2144 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2145 gfs2_trans_add_meta(ip->i_gl, dibh);
2146 gfs2_dinode_out(ip, dibh->b_data);
2150 gfs2_trans_end(sdp);
2153 gfs2_inplace_release(ip);
2155 gfs2_quota_unlock(ip);
2161 * gfs2_setattr_size - make a file a given size
2163 * @newsize: the size to make the file
2165 * The file size can grow, shrink, or stay the same size. This
2166 * is called holding i_rwsem and an exclusive glock on the inode
2172 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2174 struct gfs2_inode *ip = GFS2_I(inode);
2177 BUG_ON(!S_ISREG(inode->i_mode));
2179 ret = inode_newsize_ok(inode, newsize);
2183 inode_dio_wait(inode);
2185 ret = gfs2_rsqa_alloc(ip);
2189 if (newsize >= inode->i_size) {
2190 ret = do_grow(inode, newsize);
2194 ret = do_shrink(inode, newsize);
2196 gfs2_rsqa_delete(ip, NULL);
2200 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2203 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2205 error = trunc_end(ip);
2209 int gfs2_file_dealloc(struct gfs2_inode *ip)
2211 return punch_hole(ip, 0, 0);
2215 * gfs2_free_journal_extents - Free cached journal bmap info
2220 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2222 struct gfs2_journal_extent *jext;
2224 while(!list_empty(&jd->extent_list)) {
2225 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2226 list_del(&jext->list);
2232 * gfs2_add_jextent - Add or merge a new extent to extent cache
2233 * @jd: The journal descriptor
2234 * @lblock: The logical block at start of new extent
2235 * @dblock: The physical block at start of new extent
2236 * @blocks: Size of extent in fs blocks
2238 * Returns: 0 on success or -ENOMEM
2241 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2243 struct gfs2_journal_extent *jext;
2245 if (!list_empty(&jd->extent_list)) {
2246 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2247 if ((jext->dblock + jext->blocks) == dblock) {
2248 jext->blocks += blocks;
2253 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2256 jext->dblock = dblock;
2257 jext->lblock = lblock;
2258 jext->blocks = blocks;
2259 list_add_tail(&jext->list, &jd->extent_list);
2265 * gfs2_map_journal_extents - Cache journal bmap info
2266 * @sdp: The super block
2267 * @jd: The journal to map
2269 * Create a reusable "extent" mapping from all logical
2270 * blocks to all physical blocks for the given journal. This will save
2271 * us time when writing journal blocks. Most journals will have only one
2272 * extent that maps all their logical blocks. That's because gfs2.mkfs
2273 * arranges the journal blocks sequentially to maximize performance.
2274 * So the extent would map the first block for the entire file length.
2275 * However, gfs2_jadd can happen while file activity is happening, so
2276 * those journals may not be sequential. Less likely is the case where
2277 * the users created their own journals by mounting the metafs and
2278 * laying it out. But it's still possible. These journals might have
2281 * Returns: 0 on success, or error on failure
2284 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2288 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2289 struct buffer_head bh;
2290 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2295 start = ktime_get();
2296 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2297 size = (lblock_stop - lblock) << shift;
2299 WARN_ON(!list_empty(&jd->extent_list));
2305 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2306 if (rc || !buffer_mapped(&bh))
2308 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2312 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2316 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2317 jd->nr_extents, ktime_ms_delta(end, start));
2321 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2323 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2325 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2326 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2327 bh.b_state, (unsigned long long)bh.b_size);
2328 gfs2_free_journal_extents(jd);
2333 * gfs2_write_alloc_required - figure out if a write will require an allocation
2334 * @ip: the file being written to
2335 * @offset: the offset to write to
2336 * @len: the number of bytes being written
2338 * Returns: 1 if an alloc is required, 0 otherwise
2341 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2344 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2345 struct buffer_head bh;
2347 u64 lblock, lblock_stop, size;
2353 if (gfs2_is_stuffed(ip)) {
2354 if (offset + len > gfs2_max_stuffed_size(ip))
2359 shift = sdp->sd_sb.sb_bsize_shift;
2360 BUG_ON(gfs2_is_dir(ip));
2361 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2362 lblock = offset >> shift;
2363 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2364 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2367 size = (lblock_stop - lblock) << shift;
2371 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2372 if (!buffer_mapped(&bh))
2375 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2381 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2383 struct gfs2_inode *ip = GFS2_I(inode);
2384 struct buffer_head *dibh;
2387 if (offset >= inode->i_size)
2389 if (offset + length > inode->i_size)
2390 length = inode->i_size - offset;
2392 error = gfs2_meta_inode_buffer(ip, &dibh);
2395 gfs2_trans_add_meta(ip->i_gl, dibh);
2396 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2402 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2405 struct gfs2_sbd *sdp = GFS2_SB(inode);
2406 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2410 struct gfs2_trans *tr;
2415 if (chunk > max_chunk)
2418 offs = offset & ~PAGE_MASK;
2419 if (offs && chunk > PAGE_SIZE)
2420 chunk = offs + ((chunk - offs) & PAGE_MASK);
2422 truncate_pagecache_range(inode, offset, chunk);
2426 tr = current->journal_info;
2427 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2430 gfs2_trans_end(sdp);
2431 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2438 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2440 struct inode *inode = file_inode(file);
2441 struct gfs2_inode *ip = GFS2_I(inode);
2442 struct gfs2_sbd *sdp = GFS2_SB(inode);
2445 if (gfs2_is_jdata(ip))
2446 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2447 GFS2_JTRUNC_REVOKES);
2449 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2453 if (gfs2_is_stuffed(ip)) {
2454 error = stuffed_zero_range(inode, offset, length);
2458 unsigned int start_off, end_len, blocksize;
2460 blocksize = i_blocksize(inode);
2461 start_off = offset & (blocksize - 1);
2462 end_len = (offset + length) & (blocksize - 1);
2464 unsigned int len = length;
2465 if (length > blocksize - start_off)
2466 len = blocksize - start_off;
2467 error = gfs2_block_zero_range(inode, offset, len);
2470 if (start_off + length < blocksize)
2474 error = gfs2_block_zero_range(inode,
2475 offset + length - end_len, end_len);
2481 if (gfs2_is_jdata(ip)) {
2482 BUG_ON(!current->journal_info);
2483 gfs2_journaled_truncate_range(inode, offset, length);
2485 truncate_pagecache_range(inode, offset, offset + length - 1);
2487 file_update_time(file);
2488 mark_inode_dirty(inode);
2490 if (current->journal_info)
2491 gfs2_trans_end(sdp);
2493 if (!gfs2_is_stuffed(ip))
2494 error = punch_hole(ip, offset, length);
2497 if (current->journal_info)
2498 gfs2_trans_end(sdp);