2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/prefetch.h>
17 #include <linux/blkdev.h>
18 #include <linux/rbtree.h>
19 #include <linux/random.h>
34 #include "trace_gfs2.h"
36 #define BFITNOENT ((u32)~0)
37 #define NO_BLOCK ((u64)~0)
39 #if BITS_PER_LONG == 32
40 #define LBITMASK (0x55555555UL)
41 #define LBITSKIP55 (0x55555555UL)
42 #define LBITSKIP00 (0x00000000UL)
44 #define LBITMASK (0x5555555555555555UL)
45 #define LBITSKIP55 (0x5555555555555555UL)
46 #define LBITSKIP00 (0x0000000000000000UL)
50 * These routines are used by the resource group routines (rgrp.c)
51 * to keep track of block allocation. Each block is represented by two
52 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 * 1 = Used (not metadata)
56 * 2 = Unlinked (still in use) inode
60 static const char valid_change[16] = {
68 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
69 const struct gfs2_inode *ip, bool nowrap);
73 * gfs2_setbit - Set a bit in the bitmaps
74 * @rbm: The position of the bit to set
75 * @do_clone: Also set the clone bitmap, if it exists
76 * @new_state: the new state of the block
80 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
81 unsigned char new_state)
83 unsigned char *byte1, *byte2, *end, cur_state;
84 struct gfs2_bitmap *bi = rbm_bi(rbm);
85 unsigned int buflen = bi->bi_len;
86 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
88 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
89 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
93 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
95 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
96 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
97 "new_state=%d\n", rbm->offset, cur_state, new_state);
98 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
99 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
101 bi->bi_offset, bi->bi_len);
103 gfs2_consist_rgrpd(rbm->rgd);
106 *byte1 ^= (cur_state ^ new_state) << bit;
108 if (do_clone && bi->bi_clone) {
109 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
111 *byte2 ^= (cur_state ^ new_state) << bit;
116 * gfs2_testbit - test a bit in the bitmaps
117 * @rbm: The bit to test
119 * Returns: The two bit block state of the requested bit
122 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
124 struct gfs2_bitmap *bi = rbm_bi(rbm);
125 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
129 byte = buffer + (rbm->offset / GFS2_NBBY);
130 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
132 return (*byte >> bit) & GFS2_BIT_MASK;
137 * @ptr: Pointer to bitmap data
138 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
139 * @state: The state we are searching for
141 * We xor the bitmap data with a patter which is the bitwise opposite
142 * of what we are looking for, this gives rise to a pattern of ones
143 * wherever there is a match. Since we have two bits per entry, we
144 * take this pattern, shift it down by one place and then and it with
145 * the original. All the even bit positions (0,2,4, etc) then represent
146 * successful matches, so we mask with 0x55555..... to remove the unwanted
149 * This allows searching of a whole u64 at once (32 blocks) with a
150 * single test (on 64 bit arches).
153 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
156 static const u64 search[] = {
157 [0] = 0xffffffffffffffffULL,
158 [1] = 0xaaaaaaaaaaaaaaaaULL,
159 [2] = 0x5555555555555555ULL,
160 [3] = 0x0000000000000000ULL,
162 tmp = le64_to_cpu(*ptr) ^ search[state];
169 * rs_cmp - multi-block reservation range compare
170 * @blk: absolute file system block number of the new reservation
171 * @len: number of blocks in the new reservation
172 * @rs: existing reservation to compare against
174 * returns: 1 if the block range is beyond the reach of the reservation
175 * -1 if the block range is before the start of the reservation
176 * 0 if the block range overlaps with the reservation
178 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
180 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
182 if (blk >= startblk + rs->rs_free)
184 if (blk + len - 1 < startblk)
190 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
191 * a block in a given allocation state.
192 * @buf: the buffer that holds the bitmaps
193 * @len: the length (in bytes) of the buffer
194 * @goal: start search at this block's bit-pair (within @buffer)
195 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
197 * Scope of @goal and returned block number is only within this bitmap buffer,
198 * not entire rgrp or filesystem. @buffer will be offset from the actual
199 * beginning of a bitmap block buffer, skipping any header structures, but
200 * headers are always a multiple of 64 bits long so that the buffer is
201 * always aligned to a 64 bit boundary.
203 * The size of the buffer is in bytes, but is it assumed that it is
204 * always ok to read a complete multiple of 64 bits at the end
205 * of the block in case the end is no aligned to a natural boundary.
207 * Return: the block number (bitmap buffer scope) that was found
210 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
213 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
214 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
215 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
217 u64 mask = 0x5555555555555555ULL;
220 /* Mask off bits we don't care about at the start of the search */
222 tmp = gfs2_bit_search(ptr, mask, state);
224 while(tmp == 0 && ptr < end) {
225 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
228 /* Mask off any bits which are more than len bytes from the start */
229 if (ptr == end && (len & (sizeof(u64) - 1)))
230 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
231 /* Didn't find anything, so return */
236 bit /= 2; /* two bits per entry in the bitmap */
237 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
241 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
242 * @rbm: The rbm with rgd already set correctly
243 * @block: The block number (filesystem relative)
245 * This sets the bi and offset members of an rbm based on a
246 * resource group and a filesystem relative block number. The
247 * resource group must be set in the rbm on entry, the bi and
248 * offset members will be set by this function.
250 * Returns: 0 on success, or an error code
253 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
255 u64 rblock = block - rbm->rgd->rd_data0;
257 if (WARN_ON_ONCE(rblock > UINT_MAX))
259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
263 rbm->offset = (u32)(rblock);
264 /* Check if the block is within the first block */
265 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
269 rbm->offset += (sizeof(struct gfs2_rgrp) -
270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
271 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
272 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
277 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
278 * @rbm: Position to search (value/result)
279 * @n_unaligned: Number of unaligned blocks to check
280 * @len: Decremented for each block found (terminate on zero)
282 * Returns: true if a non-free block is encountered
285 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
291 for (n = 0; n < n_unaligned; n++) {
292 res = gfs2_testbit(rbm);
293 if (res != GFS2_BLKST_FREE)
298 block = gfs2_rbm_to_block(rbm);
299 if (gfs2_rbm_from_block(rbm, block + 1))
307 * gfs2_free_extlen - Return extent length of free blocks
308 * @rbm: Starting position
309 * @len: Max length to check
311 * Starting at the block specified by the rbm, see how many free blocks
312 * there are, not reading more than len blocks ahead. This can be done
313 * using memchr_inv when the blocks are byte aligned, but has to be done
314 * on a block by block basis in case of unaligned blocks. Also this
315 * function can cope with bitmap boundaries (although it must stop on
316 * a resource group boundary)
318 * Returns: Number of free blocks in the extent
321 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
323 struct gfs2_rbm rbm = *rrbm;
324 u32 n_unaligned = rbm.offset & 3;
328 u8 *ptr, *start, *end;
330 struct gfs2_bitmap *bi;
333 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
336 n_unaligned = len & 3;
337 /* Start is now byte aligned */
340 start = bi->bi_bh->b_data;
342 start = bi->bi_clone;
343 end = start + bi->bi_bh->b_size;
344 start += bi->bi_offset;
345 BUG_ON(rbm.offset & 3);
346 start += (rbm.offset / GFS2_NBBY);
347 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
348 ptr = memchr_inv(start, 0, bytes);
349 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
350 chunk_size *= GFS2_NBBY;
351 BUG_ON(len < chunk_size);
353 block = gfs2_rbm_to_block(&rbm);
354 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
362 n_unaligned = len & 3;
365 /* Deal with any bits left over at the end */
367 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
373 * gfs2_bitcount - count the number of bits in a certain state
374 * @rgd: the resource group descriptor
375 * @buffer: the buffer that holds the bitmaps
376 * @buflen: the length (in bytes) of the buffer
377 * @state: the state of the block we're looking for
379 * Returns: The number of bits
382 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
383 unsigned int buflen, u8 state)
385 const u8 *byte = buffer;
386 const u8 *end = buffer + buflen;
387 const u8 state1 = state << 2;
388 const u8 state2 = state << 4;
389 const u8 state3 = state << 6;
392 for (; byte < end; byte++) {
393 if (((*byte) & 0x03) == state)
395 if (((*byte) & 0x0C) == state1)
397 if (((*byte) & 0x30) == state2)
399 if (((*byte) & 0xC0) == state3)
407 * gfs2_rgrp_verify - Verify that a resource group is consistent
412 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
414 struct gfs2_sbd *sdp = rgd->rd_sbd;
415 struct gfs2_bitmap *bi = NULL;
416 u32 length = rgd->rd_length;
420 memset(count, 0, 4 * sizeof(u32));
422 /* Count # blocks in each of 4 possible allocation states */
423 for (buf = 0; buf < length; buf++) {
424 bi = rgd->rd_bits + buf;
425 for (x = 0; x < 4; x++)
426 count[x] += gfs2_bitcount(rgd,
432 if (count[0] != rgd->rd_free) {
433 if (gfs2_consist_rgrpd(rgd))
434 fs_err(sdp, "free data mismatch: %u != %u\n",
435 count[0], rgd->rd_free);
439 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
440 if (count[1] != tmp) {
441 if (gfs2_consist_rgrpd(rgd))
442 fs_err(sdp, "used data mismatch: %u != %u\n",
447 if (count[2] + count[3] != rgd->rd_dinodes) {
448 if (gfs2_consist_rgrpd(rgd))
449 fs_err(sdp, "used metadata mismatch: %u != %u\n",
450 count[2] + count[3], rgd->rd_dinodes);
455 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
457 u64 first = rgd->rd_data0;
458 u64 last = first + rgd->rd_data;
459 return first <= block && block < last;
463 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
464 * @sdp: The GFS2 superblock
465 * @blk: The data block number
466 * @exact: True if this needs to be an exact match
468 * Returns: The resource group, or NULL if not found
471 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
473 struct rb_node *n, *next;
474 struct gfs2_rgrpd *cur;
476 spin_lock(&sdp->sd_rindex_spin);
477 n = sdp->sd_rindex_tree.rb_node;
479 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
481 if (blk < cur->rd_addr)
483 else if (blk >= cur->rd_data0 + cur->rd_data)
486 spin_unlock(&sdp->sd_rindex_spin);
488 if (blk < cur->rd_addr)
490 if (blk >= cur->rd_data0 + cur->rd_data)
497 spin_unlock(&sdp->sd_rindex_spin);
503 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
504 * @sdp: The GFS2 superblock
506 * Returns: The first rgrp in the filesystem
509 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
511 const struct rb_node *n;
512 struct gfs2_rgrpd *rgd;
514 spin_lock(&sdp->sd_rindex_spin);
515 n = rb_first(&sdp->sd_rindex_tree);
516 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
517 spin_unlock(&sdp->sd_rindex_spin);
523 * gfs2_rgrpd_get_next - get the next RG
524 * @rgd: the resource group descriptor
526 * Returns: The next rgrp
529 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
531 struct gfs2_sbd *sdp = rgd->rd_sbd;
532 const struct rb_node *n;
534 spin_lock(&sdp->sd_rindex_spin);
535 n = rb_next(&rgd->rd_node);
537 n = rb_first(&sdp->sd_rindex_tree);
539 if (unlikely(&rgd->rd_node == n)) {
540 spin_unlock(&sdp->sd_rindex_spin);
543 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
544 spin_unlock(&sdp->sd_rindex_spin);
548 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
552 for (x = 0; x < rgd->rd_length; x++) {
553 struct gfs2_bitmap *bi = rgd->rd_bits + x;
560 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
561 * @ip: the inode for this reservation
563 int gfs2_rs_alloc(struct gfs2_inode *ip)
567 down_write(&ip->i_rw_mutex);
571 ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
577 RB_CLEAR_NODE(&ip->i_res->rs_node);
579 up_write(&ip->i_rw_mutex);
583 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
585 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
586 (unsigned long long)rs->rs_inum,
587 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
588 rs->rs_rbm.offset, rs->rs_free);
592 * __rs_deltree - remove a multi-block reservation from the rgd tree
593 * @rs: The reservation to remove
596 static void __rs_deltree(struct gfs2_blkreserv *rs)
598 struct gfs2_rgrpd *rgd;
600 if (!gfs2_rs_active(rs))
603 rgd = rs->rs_rbm.rgd;
604 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
605 rb_erase(&rs->rs_node, &rgd->rd_rstree);
606 RB_CLEAR_NODE(&rs->rs_node);
609 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
611 /* return reserved blocks to the rgrp */
612 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
613 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
615 clear_bit(GBF_FULL, &bi->bi_flags);
616 smp_mb__after_clear_bit();
621 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
622 * @rs: The reservation to remove
625 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
627 struct gfs2_rgrpd *rgd;
629 rgd = rs->rs_rbm.rgd;
631 spin_lock(&rgd->rd_rsspin);
633 spin_unlock(&rgd->rd_rsspin);
638 * gfs2_rs_delete - delete a multi-block reservation
639 * @ip: The inode for this reservation
642 void gfs2_rs_delete(struct gfs2_inode *ip)
644 struct inode *inode = &ip->i_inode;
646 down_write(&ip->i_rw_mutex);
647 if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
648 gfs2_rs_deltree(ip->i_res);
649 BUG_ON(ip->i_res->rs_free);
650 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
653 up_write(&ip->i_rw_mutex);
657 * return_all_reservations - return all reserved blocks back to the rgrp.
658 * @rgd: the rgrp that needs its space back
660 * We previously reserved a bunch of blocks for allocation. Now we need to
661 * give them back. This leave the reservation structures in tact, but removes
662 * all of their corresponding "no-fly zones".
664 static void return_all_reservations(struct gfs2_rgrpd *rgd)
667 struct gfs2_blkreserv *rs;
669 spin_lock(&rgd->rd_rsspin);
670 while ((n = rb_first(&rgd->rd_rstree))) {
671 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
674 spin_unlock(&rgd->rd_rsspin);
677 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
680 struct gfs2_rgrpd *rgd;
681 struct gfs2_glock *gl;
683 while ((n = rb_first(&sdp->sd_rindex_tree))) {
684 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
687 rb_erase(n, &sdp->sd_rindex_tree);
690 spin_lock(&gl->gl_spin);
691 gl->gl_object = NULL;
692 spin_unlock(&gl->gl_spin);
693 gfs2_glock_add_to_lru(gl);
697 gfs2_free_clones(rgd);
699 return_all_reservations(rgd);
700 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
704 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
706 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
707 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
708 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
709 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
710 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
714 * gfs2_compute_bitstructs - Compute the bitmap sizes
715 * @rgd: The resource group descriptor
717 * Calculates bitmap descriptors, one for each block that contains bitmap data
722 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
724 struct gfs2_sbd *sdp = rgd->rd_sbd;
725 struct gfs2_bitmap *bi;
726 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
727 u32 bytes_left, bytes;
733 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
737 bytes_left = rgd->rd_bitbytes;
739 for (x = 0; x < length; x++) {
740 bi = rgd->rd_bits + x;
743 /* small rgrp; bitmap stored completely in header block */
746 bi->bi_offset = sizeof(struct gfs2_rgrp);
749 bi->bi_blocks = bytes * GFS2_NBBY;
752 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
753 bi->bi_offset = sizeof(struct gfs2_rgrp);
756 bi->bi_blocks = bytes * GFS2_NBBY;
758 } else if (x + 1 == length) {
760 bi->bi_offset = sizeof(struct gfs2_meta_header);
761 bi->bi_start = rgd->rd_bitbytes - bytes_left;
763 bi->bi_blocks = bytes * GFS2_NBBY;
766 bytes = sdp->sd_sb.sb_bsize -
767 sizeof(struct gfs2_meta_header);
768 bi->bi_offset = sizeof(struct gfs2_meta_header);
769 bi->bi_start = rgd->rd_bitbytes - bytes_left;
771 bi->bi_blocks = bytes * GFS2_NBBY;
778 gfs2_consist_rgrpd(rgd);
781 bi = rgd->rd_bits + (length - 1);
782 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
783 if (gfs2_consist_rgrpd(rgd)) {
784 gfs2_rindex_print(rgd);
785 fs_err(sdp, "start=%u len=%u offset=%u\n",
786 bi->bi_start, bi->bi_len, bi->bi_offset);
795 * gfs2_ri_total - Total up the file system space, according to the rindex.
796 * @sdp: the filesystem
799 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
802 struct inode *inode = sdp->sd_rindex;
803 struct gfs2_inode *ip = GFS2_I(inode);
804 char buf[sizeof(struct gfs2_rindex)];
807 for (rgrps = 0;; rgrps++) {
808 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
810 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
812 error = gfs2_internal_read(ip, buf, &pos,
813 sizeof(struct gfs2_rindex));
814 if (error != sizeof(struct gfs2_rindex))
816 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
821 static int rgd_insert(struct gfs2_rgrpd *rgd)
823 struct gfs2_sbd *sdp = rgd->rd_sbd;
824 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
826 /* Figure out where to put new node */
828 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
832 if (rgd->rd_addr < cur->rd_addr)
833 newn = &((*newn)->rb_left);
834 else if (rgd->rd_addr > cur->rd_addr)
835 newn = &((*newn)->rb_right);
840 rb_link_node(&rgd->rd_node, parent, newn);
841 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
847 * read_rindex_entry - Pull in a new resource index entry from the disk
848 * @ip: Pointer to the rindex inode
850 * Returns: 0 on success, > 0 on EOF, error code otherwise
853 static int read_rindex_entry(struct gfs2_inode *ip)
855 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
856 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
857 struct gfs2_rindex buf;
859 struct gfs2_rgrpd *rgd;
861 if (pos >= i_size_read(&ip->i_inode))
864 error = gfs2_internal_read(ip, (char *)&buf, &pos,
865 sizeof(struct gfs2_rindex));
867 if (error != sizeof(struct gfs2_rindex))
868 return (error == 0) ? 1 : error;
870 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
876 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
877 rgd->rd_length = be32_to_cpu(buf.ri_length);
878 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
879 rgd->rd_data = be32_to_cpu(buf.ri_data);
880 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
881 spin_lock_init(&rgd->rd_rsspin);
883 error = compute_bitstructs(rgd);
887 error = gfs2_glock_get(sdp, rgd->rd_addr,
888 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
892 rgd->rd_gl->gl_object = rgd;
893 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
894 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
895 if (rgd->rd_data > sdp->sd_max_rg_data)
896 sdp->sd_max_rg_data = rgd->rd_data;
897 spin_lock(&sdp->sd_rindex_spin);
898 error = rgd_insert(rgd);
899 spin_unlock(&sdp->sd_rindex_spin);
903 error = 0; /* someone else read in the rgrp; free it and ignore it */
904 gfs2_glock_put(rgd->rd_gl);
908 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
913 * gfs2_ri_update - Pull in a new resource index from the disk
914 * @ip: pointer to the rindex inode
916 * Returns: 0 on successful update, error code otherwise
919 static int gfs2_ri_update(struct gfs2_inode *ip)
921 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
925 error = read_rindex_entry(ip);
926 } while (error == 0);
931 sdp->sd_rindex_uptodate = 1;
936 * gfs2_rindex_update - Update the rindex if required
937 * @sdp: The GFS2 superblock
939 * We grab a lock on the rindex inode to make sure that it doesn't
940 * change whilst we are performing an operation. We keep this lock
941 * for quite long periods of time compared to other locks. This
942 * doesn't matter, since it is shared and it is very, very rarely
943 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
945 * This makes sure that we're using the latest copy of the resource index
946 * special file, which might have been updated if someone expanded the
947 * filesystem (via gfs2_grow utility), which adds new resource groups.
949 * Returns: 0 on succeess, error code otherwise
952 int gfs2_rindex_update(struct gfs2_sbd *sdp)
954 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
955 struct gfs2_glock *gl = ip->i_gl;
956 struct gfs2_holder ri_gh;
958 int unlock_required = 0;
960 /* Read new copy from disk if we don't have the latest */
961 if (!sdp->sd_rindex_uptodate) {
962 if (!gfs2_glock_is_locked_by_me(gl)) {
963 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
968 if (!sdp->sd_rindex_uptodate)
969 error = gfs2_ri_update(ip);
971 gfs2_glock_dq_uninit(&ri_gh);
977 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
979 const struct gfs2_rgrp *str = buf;
982 rg_flags = be32_to_cpu(str->rg_flags);
983 rg_flags &= ~GFS2_RDF_MASK;
984 rgd->rd_flags &= GFS2_RDF_MASK;
985 rgd->rd_flags |= rg_flags;
986 rgd->rd_free = be32_to_cpu(str->rg_free);
987 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
988 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
991 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
993 struct gfs2_rgrp *str = buf;
995 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
996 str->rg_free = cpu_to_be32(rgd->rd_free);
997 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
998 str->__pad = cpu_to_be32(0);
999 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1000 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1003 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1005 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1006 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1008 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1009 rgl->rl_dinodes != str->rg_dinodes ||
1010 rgl->rl_igeneration != str->rg_igeneration)
1015 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1017 const struct gfs2_rgrp *str = buf;
1019 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1020 rgl->rl_flags = str->rg_flags;
1021 rgl->rl_free = str->rg_free;
1022 rgl->rl_dinodes = str->rg_dinodes;
1023 rgl->rl_igeneration = str->rg_igeneration;
1027 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1029 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1030 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1031 rgl->rl_unlinked = cpu_to_be32(unlinked);
1034 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1036 struct gfs2_bitmap *bi;
1037 const u32 length = rgd->rd_length;
1038 const u8 *buffer = NULL;
1039 u32 i, goal, count = 0;
1041 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1043 buffer = bi->bi_bh->b_data + bi->bi_offset;
1044 WARN_ON(!buffer_uptodate(bi->bi_bh));
1045 while (goal < bi->bi_len * GFS2_NBBY) {
1046 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1047 GFS2_BLKST_UNLINKED);
1048 if (goal == BFITNOENT)
1060 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1061 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1063 * Read in all of a Resource Group's header and bitmap blocks.
1064 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1069 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1071 struct gfs2_sbd *sdp = rgd->rd_sbd;
1072 struct gfs2_glock *gl = rgd->rd_gl;
1073 unsigned int length = rgd->rd_length;
1074 struct gfs2_bitmap *bi;
1078 if (rgd->rd_bits[0].bi_bh != NULL)
1081 for (x = 0; x < length; x++) {
1082 bi = rgd->rd_bits + x;
1083 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
1088 for (y = length; y--;) {
1089 bi = rgd->rd_bits + y;
1090 error = gfs2_meta_wait(sdp, bi->bi_bh);
1093 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1094 GFS2_METATYPE_RG)) {
1100 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1101 for (x = 0; x < length; x++)
1102 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1103 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1104 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1105 rgd->rd_free_clone = rgd->rd_free;
1107 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1108 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1109 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1110 rgd->rd_bits[0].bi_bh->b_data);
1112 else if (sdp->sd_args.ar_rgrplvb) {
1113 if (!gfs2_rgrp_lvb_valid(rgd)){
1114 gfs2_consist_rgrpd(rgd);
1118 if (rgd->rd_rgl->rl_unlinked == 0)
1119 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1125 bi = rgd->rd_bits + x;
1128 gfs2_assert_warn(sdp, !bi->bi_clone);
1134 int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1138 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1141 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1142 return gfs2_rgrp_bh_get(rgd);
1144 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1145 rl_flags &= ~GFS2_RDF_MASK;
1146 rgd->rd_flags &= GFS2_RDF_MASK;
1147 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1148 if (rgd->rd_rgl->rl_unlinked == 0)
1149 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1150 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1151 rgd->rd_free_clone = rgd->rd_free;
1152 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1153 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1157 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1159 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1160 struct gfs2_sbd *sdp = rgd->rd_sbd;
1162 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1164 return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
1168 * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1169 * @gh: The glock holder for the resource group
1173 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1175 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1176 int x, length = rgd->rd_length;
1178 for (x = 0; x < length; x++) {
1179 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1188 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1189 struct buffer_head *bh,
1190 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1192 struct super_block *sb = sdp->sd_vfs;
1195 sector_t nr_blks = 0;
1201 for (x = 0; x < bi->bi_len; x++) {
1202 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1203 clone += bi->bi_offset;
1206 const u8 *orig = bh->b_data + bi->bi_offset + x;
1207 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1209 diff = ~(*clone | (*clone >> 1));
1214 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1218 goto start_new_extent;
1219 if ((start + nr_blks) != blk) {
1220 if (nr_blks >= minlen) {
1221 rv = sb_issue_discard(sb,
1238 if (nr_blks >= minlen) {
1239 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1245 *ptrimmed = trimmed;
1249 if (sdp->sd_args.ar_discard)
1250 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1251 sdp->sd_args.ar_discard = 0;
1256 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1257 * @filp: Any file on the filesystem
1258 * @argp: Pointer to the arguments (also used to pass result)
1260 * Returns: 0 on success, otherwise error code
1263 int gfs2_fitrim(struct file *filp, void __user *argp)
1265 struct inode *inode = file_inode(filp);
1266 struct gfs2_sbd *sdp = GFS2_SB(inode);
1267 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1268 struct buffer_head *bh;
1269 struct gfs2_rgrpd *rgd;
1270 struct gfs2_rgrpd *rgd_end;
1271 struct gfs2_holder gh;
1272 struct fstrim_range r;
1276 u64 start, end, minlen;
1278 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1280 if (!capable(CAP_SYS_ADMIN))
1283 if (!blk_queue_discard(q))
1286 if (copy_from_user(&r, argp, sizeof(r)))
1289 ret = gfs2_rindex_update(sdp);
1293 start = r.start >> bs_shift;
1294 end = start + (r.len >> bs_shift);
1295 minlen = max_t(u64, r.minlen,
1296 q->limits.discard_granularity) >> bs_shift;
1298 if (end <= start || minlen > sdp->sd_max_rg_data)
1301 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1302 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1304 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1305 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1306 return -EINVAL; /* start is beyond the end of the fs */
1310 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1314 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1315 /* Trim each bitmap in the rgrp */
1316 for (x = 0; x < rgd->rd_length; x++) {
1317 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1318 ret = gfs2_rgrp_send_discards(sdp,
1319 rgd->rd_data0, NULL, bi, minlen,
1322 gfs2_glock_dq_uninit(&gh);
1328 /* Mark rgrp as having been trimmed */
1329 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1331 bh = rgd->rd_bits[0].bi_bh;
1332 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1333 gfs2_trans_add_meta(rgd->rd_gl, bh);
1334 gfs2_rgrp_out(rgd, bh->b_data);
1335 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1336 gfs2_trans_end(sdp);
1339 gfs2_glock_dq_uninit(&gh);
1344 rgd = gfs2_rgrpd_get_next(rgd);
1348 r.len = trimmed << bs_shift;
1349 if (copy_to_user(argp, &r, sizeof(r)))
1356 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1357 * @ip: the inode structure
1360 static void rs_insert(struct gfs2_inode *ip)
1362 struct rb_node **newn, *parent = NULL;
1364 struct gfs2_blkreserv *rs = ip->i_res;
1365 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1366 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1368 BUG_ON(gfs2_rs_active(rs));
1370 spin_lock(&rgd->rd_rsspin);
1371 newn = &rgd->rd_rstree.rb_node;
1373 struct gfs2_blkreserv *cur =
1374 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1377 rc = rs_cmp(fsblock, rs->rs_free, cur);
1379 newn = &((*newn)->rb_right);
1381 newn = &((*newn)->rb_left);
1383 spin_unlock(&rgd->rd_rsspin);
1389 rb_link_node(&rs->rs_node, parent, newn);
1390 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1392 /* Do our rgrp accounting for the reservation */
1393 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1394 spin_unlock(&rgd->rd_rsspin);
1395 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1399 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1400 * @rgd: the resource group descriptor
1401 * @ip: pointer to the inode for which we're reserving blocks
1402 * @requested: number of blocks required for this allocation
1406 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1409 struct gfs2_rbm rbm = { .rgd = rgd, };
1411 struct gfs2_blkreserv *rs = ip->i_res;
1413 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1415 struct inode *inode = &ip->i_inode;
1417 if (S_ISDIR(inode->i_mode))
1420 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
1421 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1423 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1426 /* Find bitmap block that contains bits for goal block */
1427 if (rgrp_contains_block(rgd, ip->i_goal))
1430 goal = rgd->rd_last_alloc + rgd->rd_data0;
1432 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1435 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true);
1438 rs->rs_free = extlen;
1439 rs->rs_inum = ip->i_no_addr;
1442 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1443 rgd->rd_last_alloc = 0;
1448 * gfs2_next_unreserved_block - Return next block that is not reserved
1449 * @rgd: The resource group
1450 * @block: The starting block
1451 * @length: The required length
1452 * @ip: Ignore any reservations for this inode
1454 * If the block does not appear in any reservation, then return the
1455 * block number unchanged. If it does appear in the reservation, then
1456 * keep looking through the tree of reservations in order to find the
1457 * first block number which is not reserved.
1460 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1462 const struct gfs2_inode *ip)
1464 struct gfs2_blkreserv *rs;
1468 spin_lock(&rgd->rd_rsspin);
1469 n = rgd->rd_rstree.rb_node;
1471 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1472 rc = rs_cmp(block, length, rs);
1482 while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
1483 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1487 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1491 spin_unlock(&rgd->rd_rsspin);
1496 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1497 * @rbm: The current position in the resource group
1498 * @ip: The inode for which we are searching for blocks
1499 * @minext: The minimum extent length
1501 * This checks the current position in the rgrp to see whether there is
1502 * a reservation covering this block. If not then this function is a
1503 * no-op. If there is, then the position is moved to the end of the
1504 * contiguous reservation(s) so that we are pointing at the first
1505 * non-reserved block.
1507 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1510 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1511 const struct gfs2_inode *ip,
1514 u64 block = gfs2_rbm_to_block(rbm);
1520 * If we have a minimum extent length, then skip over any extent
1521 * which is less than the min extent length in size.
1524 extlen = gfs2_free_extlen(rbm, minext);
1525 nblock = block + extlen;
1526 if (extlen < minext)
1531 * Check the extent which has been found against the reservations
1532 * and skip if parts of it are already reserved
1534 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1535 if (nblock == block)
1538 ret = gfs2_rbm_from_block(rbm, nblock);
1545 * gfs2_rbm_find - Look for blocks of a particular state
1546 * @rbm: Value/result starting position and final position
1547 * @state: The state which we want to find
1548 * @minext: The requested extent length (0 for a single block)
1549 * @ip: If set, check for reservations
1550 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1551 * around until we've reached the starting point.
1554 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1555 * has no free blocks in it.
1557 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1560 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1561 const struct gfs2_inode *ip, bool nowrap)
1563 struct buffer_head *bh;
1569 int iters = rbm->rgd->rd_length;
1571 struct gfs2_bitmap *bi;
1573 /* If we are not starting at the beginning of a bitmap, then we
1574 * need to add one to the bitmap count to ensure that we search
1575 * the starting bitmap twice.
1577 if (rbm->offset != 0)
1582 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1583 (state == GFS2_BLKST_FREE))
1587 buffer = bh->b_data + bi->bi_offset;
1588 WARN_ON(!buffer_uptodate(bh));
1589 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1590 buffer = bi->bi_clone + bi->bi_offset;
1591 initial_offset = rbm->offset;
1592 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1593 if (offset == BFITNOENT)
1595 rbm->offset = offset;
1599 initial_bii = rbm->bii;
1600 ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1604 n += (rbm->bii - initial_bii);
1607 if (ret == -E2BIG) {
1610 n += (rbm->bii - initial_bii);
1611 goto res_covered_end_of_rgrp;
1615 bitmap_full: /* Mark bitmap as full and fall through */
1616 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
1617 struct gfs2_bitmap *bi = rbm_bi(rbm);
1618 set_bit(GBF_FULL, &bi->bi_flags);
1621 next_bitmap: /* Find next bitmap in the rgrp */
1624 if (rbm->bii == rbm->rgd->rd_length)
1626 res_covered_end_of_rgrp:
1627 if ((rbm->bii == 0) && nowrap)
1639 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1641 * @last_unlinked: block address of the last dinode we unlinked
1642 * @skip: block address we should explicitly not unlink
1644 * Returns: 0 if no error
1645 * The inode, if one has been found, in inode.
1648 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1651 struct gfs2_sbd *sdp = rgd->rd_sbd;
1652 struct gfs2_glock *gl;
1653 struct gfs2_inode *ip;
1656 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1659 down_write(&sdp->sd_log_flush_lock);
1660 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true);
1661 up_write(&sdp->sd_log_flush_lock);
1662 if (error == -ENOSPC)
1664 if (WARN_ON_ONCE(error))
1667 block = gfs2_rbm_to_block(&rbm);
1668 if (gfs2_rbm_from_block(&rbm, block + 1))
1670 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1674 *last_unlinked = block;
1676 error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
1680 /* If the inode is already in cache, we can ignore it here
1681 * because the existing inode disposal code will deal with
1682 * it when all refs have gone away. Accessing gl_object like
1683 * this is not safe in general. Here it is ok because we do
1684 * not dereference the pointer, and we only need an approx
1685 * answer to whether it is NULL or not.
1689 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1694 /* Limit reclaim to sensible number of tasks */
1695 if (found > NR_CPUS)
1699 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1704 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1705 * @rgd: The rgrp in question
1706 * @loops: An indication of how picky we can be (0=very, 1=less so)
1708 * This function uses the recently added glock statistics in order to
1709 * figure out whether a parciular resource group is suffering from
1710 * contention from multiple nodes. This is done purely on the basis
1711 * of timings, since this is the only data we have to work with and
1712 * our aim here is to reject a resource group which is highly contended
1713 * but (very important) not to do this too often in order to ensure that
1714 * we do not land up introducing fragmentation by changing resource
1715 * groups when not actually required.
1717 * The calculation is fairly simple, we want to know whether the SRTTB
1718 * (i.e. smoothed round trip time for blocking operations) to acquire
1719 * the lock for this rgrp's glock is significantly greater than the
1720 * time taken for resource groups on average. We introduce a margin in
1721 * the form of the variable @var which is computed as the sum of the two
1722 * respective variences, and multiplied by a factor depending on @loops
1723 * and whether we have a lot of data to base the decision on. This is
1724 * then tested against the square difference of the means in order to
1725 * decide whether the result is statistically significant or not.
1727 * Returns: A boolean verdict on the congestion status
1730 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1732 const struct gfs2_glock *gl = rgd->rd_gl;
1733 const struct gfs2_sbd *sdp = gl->gl_sbd;
1734 struct gfs2_lkstats *st;
1735 s64 r_dcount, l_dcount;
1736 s64 r_srttb, l_srttb;
1742 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1743 r_srttb = st->stats[GFS2_LKS_SRTTB];
1744 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1745 var = st->stats[GFS2_LKS_SRTTVARB] +
1746 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1749 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1750 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1752 if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0))
1755 srttb_diff = r_srttb - l_srttb;
1756 sqr_diff = srttb_diff * srttb_diff;
1759 if (l_dcount < 8 || r_dcount < 8)
1764 return ((srttb_diff < 0) && (sqr_diff > var));
1768 * gfs2_rgrp_used_recently
1769 * @rs: The block reservation with the rgrp to test
1770 * @msecs: The time limit in milliseconds
1772 * Returns: True if the rgrp glock has been used within the time limit
1774 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1779 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1780 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1782 return tdiff > (msecs * 1000 * 1000);
1785 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1787 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1790 get_random_bytes(&skip, sizeof(skip));
1791 return skip % sdp->sd_rgrps;
1794 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1796 struct gfs2_rgrpd *rgd = *pos;
1797 struct gfs2_sbd *sdp = rgd->rd_sbd;
1799 rgd = gfs2_rgrpd_get_next(rgd);
1801 rgd = gfs2_rgrpd_get_first(sdp);
1803 if (rgd != begin) /* If we didn't wrap */
1809 * gfs2_inplace_reserve - Reserve space in the filesystem
1810 * @ip: the inode to reserve space for
1811 * @requested: the number of blocks to be reserved
1816 int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
1818 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1819 struct gfs2_rgrpd *begin = NULL;
1820 struct gfs2_blkreserv *rs = ip->i_res;
1821 int error = 0, rg_locked, flags = 0;
1822 u64 last_unlinked = NO_BLOCK;
1826 if (sdp->sd_args.ar_rgrplvb)
1828 if (gfs2_assert_warn(sdp, requested))
1830 if (gfs2_rs_active(rs)) {
1831 begin = rs->rs_rbm.rgd;
1832 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1833 rs->rs_rbm.rgd = begin = ip->i_rgd;
1835 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1837 if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
1838 skip = gfs2_orlov_skip(ip);
1839 if (rs->rs_rbm.rgd == NULL)
1845 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
1849 if (!gfs2_rs_active(rs) && (loops < 2) &&
1850 gfs2_rgrp_used_recently(rs, 1000) &&
1851 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
1853 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
1854 LM_ST_EXCLUSIVE, flags,
1856 if (unlikely(error))
1858 if (!gfs2_rs_active(rs) && (loops < 2) &&
1859 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
1861 if (sdp->sd_args.ar_rgrplvb) {
1862 error = update_rgrp_lvb(rs->rs_rbm.rgd);
1863 if (unlikely(error)) {
1864 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1870 /* Skip unuseable resource groups */
1871 if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
1874 if (sdp->sd_args.ar_rgrplvb)
1875 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
1877 /* Get a reservation if we don't already have one */
1878 if (!gfs2_rs_active(rs))
1879 rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
1881 /* Skip rgrps when we can't get a reservation on first pass */
1882 if (!gfs2_rs_active(rs) && (loops < 1))
1885 /* If rgrp has enough free space, use it */
1886 if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
1887 ip->i_rgd = rs->rs_rbm.rgd;
1891 /* Drop reservation, if we couldn't use reserved rgrp */
1892 if (gfs2_rs_active(rs))
1893 gfs2_rs_deltree(rs);
1895 /* Check for unlinked inodes which can be reclaimed */
1896 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
1897 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
1900 /* Unlock rgrp if required */
1902 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1904 /* Find the next rgrp, and continue looking */
1905 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
1910 /* If we've scanned all the rgrps, but found no free blocks
1911 * then this checks for some less likely conditions before
1915 /* Check that fs hasn't grown if writing to rindex */
1916 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
1917 error = gfs2_ri_update(ip);
1921 /* Flushing the log may release space */
1923 gfs2_log_flush(sdp, NULL);
1930 * gfs2_inplace_release - release an inplace reservation
1931 * @ip: the inode the reservation was taken out on
1933 * Release a reservation made by gfs2_inplace_reserve().
1936 void gfs2_inplace_release(struct gfs2_inode *ip)
1938 struct gfs2_blkreserv *rs = ip->i_res;
1940 if (rs->rs_rgd_gh.gh_gl)
1941 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1945 * gfs2_get_block_type - Check a block in a RG is of given type
1946 * @rgd: the resource group holding the block
1947 * @block: the block number
1949 * Returns: The block type (GFS2_BLKST_*)
1952 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1954 struct gfs2_rbm rbm = { .rgd = rgd, };
1957 ret = gfs2_rbm_from_block(&rbm, block);
1958 WARN_ON_ONCE(ret != 0);
1960 return gfs2_testbit(&rbm);
1965 * gfs2_alloc_extent - allocate an extent from a given bitmap
1966 * @rbm: the resource group information
1967 * @dinode: TRUE if the first block we allocate is for a dinode
1968 * @n: The extent length (value/result)
1970 * Add the bitmap buffer to the transaction.
1971 * Set the found bits to @new_state to change block's allocation state.
1973 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1976 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
1977 const unsigned int elen = *n;
1982 block = gfs2_rbm_to_block(rbm);
1983 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
1984 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1987 ret = gfs2_rbm_from_block(&pos, block);
1988 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1990 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
1991 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1998 * rgblk_free - Change alloc state of given block(s)
1999 * @sdp: the filesystem
2000 * @bstart: the start of a run of blocks to free
2001 * @blen: the length of the block run (all must lie within ONE RG!)
2002 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2004 * Returns: Resource group containing the block(s)
2007 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2008 u32 blen, unsigned char new_state)
2010 struct gfs2_rbm rbm;
2011 struct gfs2_bitmap *bi;
2013 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2015 if (gfs2_consist(sdp))
2016 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2021 gfs2_rbm_from_block(&rbm, bstart);
2024 if (!bi->bi_clone) {
2025 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2026 GFP_NOFS | __GFP_NOFAIL);
2027 memcpy(bi->bi_clone + bi->bi_offset,
2028 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
2030 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2031 gfs2_setbit(&rbm, false, new_state);
2038 * gfs2_rgrp_dump - print out an rgrp
2039 * @seq: The iterator
2040 * @gl: The glock in question
2044 int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2046 struct gfs2_rgrpd *rgd = gl->gl_object;
2047 struct gfs2_blkreserv *trs;
2048 const struct rb_node *n;
2052 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
2053 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2054 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2056 spin_lock(&rgd->rd_rsspin);
2057 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2058 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2061 spin_unlock(&rgd->rd_rsspin);
2065 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2067 struct gfs2_sbd *sdp = rgd->rd_sbd;
2068 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2069 (unsigned long long)rgd->rd_addr);
2070 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2071 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2072 rgd->rd_flags |= GFS2_RDF_ERROR;
2076 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2077 * @ip: The inode we have just allocated blocks for
2078 * @rbm: The start of the allocated blocks
2079 * @len: The extent length
2081 * Adjusts a reservation after an allocation has taken place. If the
2082 * reservation does not match the allocation, or if it is now empty
2083 * then it is removed.
2086 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2087 const struct gfs2_rbm *rbm, unsigned len)
2089 struct gfs2_blkreserv *rs = ip->i_res;
2090 struct gfs2_rgrpd *rgd = rbm->rgd;
2095 spin_lock(&rgd->rd_rsspin);
2096 if (gfs2_rs_active(rs)) {
2097 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2098 block = gfs2_rbm_to_block(rbm);
2099 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2100 rlen = min(rs->rs_free, len);
2101 rs->rs_free -= rlen;
2102 rgd->rd_reserved -= rlen;
2103 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2104 if (rs->rs_free && !ret)
2110 spin_unlock(&rgd->rd_rsspin);
2114 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2115 * @ip: the inode to allocate the block for
2116 * @bn: Used to return the starting block number
2117 * @nblocks: requested number of blocks/extent length (value/result)
2118 * @dinode: 1 if we're allocating a dinode block, else 0
2119 * @generation: the generation number of the inode
2121 * Returns: 0 or error
2124 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2125 bool dinode, u64 *generation)
2127 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2128 struct buffer_head *dibh;
2129 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2132 u64 block; /* block, within the file system scope */
2135 if (gfs2_rs_active(ip->i_res))
2136 goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
2137 else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
2140 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
2142 gfs2_rbm_from_block(&rbm, goal);
2143 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
2145 if (error == -ENOSPC) {
2146 gfs2_rbm_from_block(&rbm, goal);
2147 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
2150 /* Since all blocks are reserved in advance, this shouldn't happen */
2152 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n",
2153 (unsigned long long)ip->i_no_addr, error, *nblocks,
2154 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
2158 gfs2_alloc_extent(&rbm, dinode, nblocks);
2159 block = gfs2_rbm_to_block(&rbm);
2160 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2161 if (gfs2_rs_active(ip->i_res))
2162 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2168 ip->i_goal = block + ndata - 1;
2169 error = gfs2_meta_inode_buffer(ip, &dibh);
2171 struct gfs2_dinode *di =
2172 (struct gfs2_dinode *)dibh->b_data;
2173 gfs2_trans_add_meta(ip->i_gl, dibh);
2174 di->di_goal_meta = di->di_goal_data =
2175 cpu_to_be64(ip->i_goal);
2179 if (rbm.rgd->rd_free < *nblocks) {
2180 printk(KERN_WARNING "nblocks=%u\n", *nblocks);
2184 rbm.rgd->rd_free -= *nblocks;
2186 rbm.rgd->rd_dinodes++;
2187 *generation = rbm.rgd->rd_igeneration++;
2188 if (*generation == 0)
2189 *generation = rbm.rgd->rd_igeneration++;
2192 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2193 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2194 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2196 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2198 gfs2_trans_add_unrevoke(sdp, block, 1);
2200 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2202 rbm.rgd->rd_free_clone -= *nblocks;
2203 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2204 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2209 gfs2_rgrp_error(rbm.rgd);
2214 * __gfs2_free_blocks - free a contiguous run of block(s)
2215 * @ip: the inode these blocks are being freed from
2216 * @bstart: first block of a run of contiguous blocks
2217 * @blen: the length of the block run
2218 * @meta: 1 if the blocks represent metadata
2222 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2224 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2225 struct gfs2_rgrpd *rgd;
2227 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2230 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2231 rgd->rd_free += blen;
2232 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2233 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2234 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2235 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2237 /* Directories keep their data in the metadata address space */
2238 if (meta || ip->i_depth)
2239 gfs2_meta_wipe(ip, bstart, blen);
2243 * gfs2_free_meta - free a contiguous run of data block(s)
2244 * @ip: the inode these blocks are being freed from
2245 * @bstart: first block of a run of contiguous blocks
2246 * @blen: the length of the block run
2250 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2252 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2254 __gfs2_free_blocks(ip, bstart, blen, 1);
2255 gfs2_statfs_change(sdp, 0, +blen, 0);
2256 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2259 void gfs2_unlink_di(struct inode *inode)
2261 struct gfs2_inode *ip = GFS2_I(inode);
2262 struct gfs2_sbd *sdp = GFS2_SB(inode);
2263 struct gfs2_rgrpd *rgd;
2264 u64 blkno = ip->i_no_addr;
2266 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2269 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2270 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2271 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2272 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2273 update_rgrp_lvb_unlinked(rgd, 1);
2276 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2278 struct gfs2_sbd *sdp = rgd->rd_sbd;
2279 struct gfs2_rgrpd *tmp_rgd;
2281 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2284 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2286 if (!rgd->rd_dinodes)
2287 gfs2_consist_rgrpd(rgd);
2291 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2292 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2293 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2294 update_rgrp_lvb_unlinked(rgd, -1);
2296 gfs2_statfs_change(sdp, 0, +1, -1);
2300 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2302 gfs2_free_uninit_di(rgd, ip->i_no_addr);
2303 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2304 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2305 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2309 * gfs2_check_blk_type - Check the type of a block
2310 * @sdp: The superblock
2311 * @no_addr: The block number to check
2312 * @type: The block type we are looking for
2314 * Returns: 0 if the block type matches the expected type
2315 * -ESTALE if it doesn't match
2316 * or -ve errno if something went wrong while checking
2319 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2321 struct gfs2_rgrpd *rgd;
2322 struct gfs2_holder rgd_gh;
2323 int error = -EINVAL;
2325 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2329 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2333 if (gfs2_get_block_type(rgd, no_addr) != type)
2336 gfs2_glock_dq_uninit(&rgd_gh);
2342 * gfs2_rlist_add - add a RG to a list of RGs
2344 * @rlist: the list of resource groups
2347 * Figure out what RG a block belongs to and add that RG to the list
2349 * FIXME: Don't use NOFAIL
2353 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2356 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2357 struct gfs2_rgrpd *rgd;
2358 struct gfs2_rgrpd **tmp;
2359 unsigned int new_space;
2362 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2365 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2368 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2370 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2375 for (x = 0; x < rlist->rl_rgrps; x++)
2376 if (rlist->rl_rgd[x] == rgd)
2379 if (rlist->rl_rgrps == rlist->rl_space) {
2380 new_space = rlist->rl_space + 10;
2382 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2383 GFP_NOFS | __GFP_NOFAIL);
2385 if (rlist->rl_rgd) {
2386 memcpy(tmp, rlist->rl_rgd,
2387 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2388 kfree(rlist->rl_rgd);
2391 rlist->rl_space = new_space;
2392 rlist->rl_rgd = tmp;
2395 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2399 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2400 * and initialize an array of glock holders for them
2401 * @rlist: the list of resource groups
2402 * @state: the lock state to acquire the RG lock in
2404 * FIXME: Don't use NOFAIL
2408 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2412 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
2413 GFP_NOFS | __GFP_NOFAIL);
2414 for (x = 0; x < rlist->rl_rgrps; x++)
2415 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2421 * gfs2_rlist_free - free a resource group list
2422 * @list: the list of resource groups
2426 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2430 kfree(rlist->rl_rgd);
2432 if (rlist->rl_ghs) {
2433 for (x = 0; x < rlist->rl_rgrps; x++)
2434 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2435 kfree(rlist->rl_ghs);
2436 rlist->rl_ghs = NULL;