2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/prefetch.h>
17 #include <linux/blkdev.h>
32 #include "ops_address.h"
34 #define BFITNOENT ((u32)~0)
35 #define NO_BLOCK ((u64)~0)
37 #if BITS_PER_LONG == 32
38 #define LBITMASK (0x55555555UL)
39 #define LBITSKIP55 (0x55555555UL)
40 #define LBITSKIP00 (0x00000000UL)
42 #define LBITMASK (0x5555555555555555UL)
43 #define LBITSKIP55 (0x5555555555555555UL)
44 #define LBITSKIP00 (0x0000000000000000UL)
48 * These routines are used by the resource group routines (rgrp.c)
49 * to keep track of block allocation. Each block is represented by two
50 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
53 * 1 = Used (not metadata)
54 * 2 = Unlinked (still in use) inode
58 static const char valid_change[16] = {
66 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
67 unsigned char old_state, unsigned char new_state,
71 * gfs2_setbit - Set a bit in the bitmaps
72 * @buffer: the buffer that holds the bitmaps
73 * @buflen: the length (in bytes) of the buffer
74 * @block: the block to set
75 * @new_state: the new state of the block
79 static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1,
80 unsigned char *buf2, unsigned int offset,
81 unsigned int buflen, u32 block,
82 unsigned char new_state)
84 unsigned char *byte1, *byte2, *end, cur_state;
85 const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
87 byte1 = buf1 + offset + (block / GFS2_NBBY);
88 end = buf1 + offset + buflen;
92 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
94 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
95 gfs2_consist_rgrpd(rgd);
98 *byte1 ^= (cur_state ^ new_state) << bit;
101 byte2 = buf2 + offset + (block / GFS2_NBBY);
102 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
103 *byte2 ^= (cur_state ^ new_state) << bit;
108 * gfs2_testbit - test a bit in the bitmaps
109 * @buffer: the buffer that holds the bitmaps
110 * @buflen: the length (in bytes) of the buffer
111 * @block: the block to read
115 static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
116 const unsigned char *buffer,
117 unsigned int buflen, u32 block)
119 const unsigned char *byte, *end;
120 unsigned char cur_state;
123 byte = buffer + (block / GFS2_NBBY);
124 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
125 end = buffer + buflen;
127 gfs2_assert(rgd->rd_sbd, byte < end);
129 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
135 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
136 * a block in a given allocation state.
137 * @buffer: the buffer that holds the bitmaps
138 * @buflen: the length (in bytes) of the buffer
139 * @goal: start search at this block's bit-pair (within @buffer)
140 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for.
142 * Scope of @goal and returned block number is only within this bitmap buffer,
143 * not entire rgrp or filesystem. @buffer will be offset from the actual
144 * beginning of a bitmap block buffer, skipping any header structures.
146 * Return: the block number (bitmap buffer scope) that was found
149 static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal,
152 const u8 *byte, *start, *end;
154 u32 g1, g2, misaligned;
155 unsigned long *plong;
156 unsigned long lskipval;
158 lskipval = (old_state & GFS2_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
159 g1 = (goal / GFS2_NBBY);
162 end = buffer + buflen;
163 g2 = ALIGN(g1, sizeof(unsigned long));
164 plong = (unsigned long *)(buffer + g2);
165 startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
166 misaligned = g2 - g1;
169 /* parse the bitmap a byte at a time */
172 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) {
174 (((byte - start) * GFS2_NBBY) +
175 ((bit - startbit) >> 1));
177 bit += GFS2_BIT_SIZE;
178 if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
183 plong = (unsigned long *)byte;
190 /* parse the bitmap a unsigned long at a time */
192 /* Stop at "end - 1" or else prefetch can go past the end and segfault.
193 We could "if" it but we'd lose some of the performance gained.
194 This way will only slow down searching the very last 4/8 bytes
195 depending on architecture. I've experimented with several ways
196 of writing this section such as using an else before the goto
197 but this one seems to be the fastest. */
198 while ((unsigned char *)plong < end - sizeof(unsigned long)) {
200 if (((*plong) & LBITMASK) != lskipval)
204 if ((unsigned char *)plong < end) {
205 byte = (const u8 *)plong;
206 misaligned += sizeof(unsigned long) - 1;
213 * gfs2_bitcount - count the number of bits in a certain state
214 * @buffer: the buffer that holds the bitmaps
215 * @buflen: the length (in bytes) of the buffer
216 * @state: the state of the block we're looking for
218 * Returns: The number of bits
221 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
222 unsigned int buflen, u8 state)
224 const u8 *byte = buffer;
225 const u8 *end = buffer + buflen;
226 const u8 state1 = state << 2;
227 const u8 state2 = state << 4;
228 const u8 state3 = state << 6;
231 for (; byte < end; byte++) {
232 if (((*byte) & 0x03) == state)
234 if (((*byte) & 0x0C) == state1)
236 if (((*byte) & 0x30) == state2)
238 if (((*byte) & 0xC0) == state3)
246 * gfs2_rgrp_verify - Verify that a resource group is consistent
247 * @sdp: the filesystem
252 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
254 struct gfs2_sbd *sdp = rgd->rd_sbd;
255 struct gfs2_bitmap *bi = NULL;
256 u32 length = rgd->rd_length;
260 memset(count, 0, 4 * sizeof(u32));
262 /* Count # blocks in each of 4 possible allocation states */
263 for (buf = 0; buf < length; buf++) {
264 bi = rgd->rd_bits + buf;
265 for (x = 0; x < 4; x++)
266 count[x] += gfs2_bitcount(rgd,
272 if (count[0] != rgd->rd_free) {
273 if (gfs2_consist_rgrpd(rgd))
274 fs_err(sdp, "free data mismatch: %u != %u\n",
275 count[0], rgd->rd_free);
279 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
280 if (count[1] + count[2] != tmp) {
281 if (gfs2_consist_rgrpd(rgd))
282 fs_err(sdp, "used data mismatch: %u != %u\n",
287 if (count[3] != rgd->rd_dinodes) {
288 if (gfs2_consist_rgrpd(rgd))
289 fs_err(sdp, "used metadata mismatch: %u != %u\n",
290 count[3], rgd->rd_dinodes);
294 if (count[2] > count[3]) {
295 if (gfs2_consist_rgrpd(rgd))
296 fs_err(sdp, "unlinked inodes > inodes: %u\n",
303 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
305 u64 first = rgd->rd_data0;
306 u64 last = first + rgd->rd_data;
307 return first <= block && block < last;
311 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
312 * @sdp: The GFS2 superblock
313 * @n: The data block number
315 * Returns: The resource group, or NULL if not found
318 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk)
320 struct gfs2_rgrpd *rgd;
322 spin_lock(&sdp->sd_rindex_spin);
324 list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
325 if (rgrp_contains_block(rgd, blk)) {
326 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
327 spin_unlock(&sdp->sd_rindex_spin);
332 spin_unlock(&sdp->sd_rindex_spin);
338 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
339 * @sdp: The GFS2 superblock
341 * Returns: The first rgrp in the filesystem
344 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
346 gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
347 return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
351 * gfs2_rgrpd_get_next - get the next RG
354 * Returns: The next rgrp
357 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
359 if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
361 return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
364 static void clear_rgrpdi(struct gfs2_sbd *sdp)
366 struct list_head *head;
367 struct gfs2_rgrpd *rgd;
368 struct gfs2_glock *gl;
370 spin_lock(&sdp->sd_rindex_spin);
371 sdp->sd_rindex_forward = NULL;
372 spin_unlock(&sdp->sd_rindex_spin);
374 head = &sdp->sd_rindex_list;
375 while (!list_empty(head)) {
376 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
379 list_del(&rgd->rd_list);
380 list_del(&rgd->rd_list_mru);
383 gl->gl_object = NULL;
388 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
392 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
394 mutex_lock(&sdp->sd_rindex_mutex);
396 mutex_unlock(&sdp->sd_rindex_mutex);
399 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
401 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
402 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
403 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
404 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
405 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
409 * gfs2_compute_bitstructs - Compute the bitmap sizes
410 * @rgd: The resource group descriptor
412 * Calculates bitmap descriptors, one for each block that contains bitmap data
417 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
419 struct gfs2_sbd *sdp = rgd->rd_sbd;
420 struct gfs2_bitmap *bi;
421 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
422 u32 bytes_left, bytes;
428 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
432 bytes_left = rgd->rd_bitbytes;
434 for (x = 0; x < length; x++) {
435 bi = rgd->rd_bits + x;
437 /* small rgrp; bitmap stored completely in header block */
440 bi->bi_offset = sizeof(struct gfs2_rgrp);
445 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
446 bi->bi_offset = sizeof(struct gfs2_rgrp);
450 } else if (x + 1 == length) {
452 bi->bi_offset = sizeof(struct gfs2_meta_header);
453 bi->bi_start = rgd->rd_bitbytes - bytes_left;
457 bytes = sdp->sd_sb.sb_bsize -
458 sizeof(struct gfs2_meta_header);
459 bi->bi_offset = sizeof(struct gfs2_meta_header);
460 bi->bi_start = rgd->rd_bitbytes - bytes_left;
468 gfs2_consist_rgrpd(rgd);
471 bi = rgd->rd_bits + (length - 1);
472 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
473 if (gfs2_consist_rgrpd(rgd)) {
474 gfs2_rindex_print(rgd);
475 fs_err(sdp, "start=%u len=%u offset=%u\n",
476 bi->bi_start, bi->bi_len, bi->bi_offset);
485 * gfs2_ri_total - Total up the file system space, according to the rindex.
488 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
491 struct inode *inode = sdp->sd_rindex;
492 struct gfs2_inode *ip = GFS2_I(inode);
493 char buf[sizeof(struct gfs2_rindex)];
494 struct file_ra_state ra_state;
497 mutex_lock(&sdp->sd_rindex_mutex);
498 file_ra_state_init(&ra_state, inode->i_mapping);
499 for (rgrps = 0;; rgrps++) {
500 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
502 if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize)
504 error = gfs2_internal_read(ip, &ra_state, buf, &pos,
505 sizeof(struct gfs2_rindex));
506 if (error != sizeof(struct gfs2_rindex))
508 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
510 mutex_unlock(&sdp->sd_rindex_mutex);
514 static void gfs2_rindex_in(struct gfs2_rgrpd *rgd, const void *buf)
516 const struct gfs2_rindex *str = buf;
518 rgd->rd_addr = be64_to_cpu(str->ri_addr);
519 rgd->rd_length = be32_to_cpu(str->ri_length);
520 rgd->rd_data0 = be64_to_cpu(str->ri_data0);
521 rgd->rd_data = be32_to_cpu(str->ri_data);
522 rgd->rd_bitbytes = be32_to_cpu(str->ri_bitbytes);
526 * read_rindex_entry - Pull in a new resource index entry from the disk
527 * @gl: The glock covering the rindex inode
529 * Returns: 0 on success, error code otherwise
532 static int read_rindex_entry(struct gfs2_inode *ip,
533 struct file_ra_state *ra_state)
535 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
536 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
537 char buf[sizeof(struct gfs2_rindex)];
539 struct gfs2_rgrpd *rgd;
541 error = gfs2_internal_read(ip, ra_state, buf, &pos,
542 sizeof(struct gfs2_rindex));
545 if (error != sizeof(struct gfs2_rindex)) {
551 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
556 mutex_init(&rgd->rd_mutex);
557 lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
560 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
561 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
563 gfs2_rindex_in(rgd, buf);
564 error = compute_bitstructs(rgd);
568 error = gfs2_glock_get(sdp, rgd->rd_addr,
569 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
573 rgd->rd_gl->gl_object = rgd;
574 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
575 rgd->rd_flags |= GFS2_RDF_CHECK;
580 * gfs2_ri_update - Pull in a new resource index from the disk
581 * @ip: pointer to the rindex inode
583 * Returns: 0 on successful update, error code otherwise
586 static int gfs2_ri_update(struct gfs2_inode *ip)
588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589 struct inode *inode = &ip->i_inode;
590 struct file_ra_state ra_state;
591 u64 rgrp_count = ip->i_disksize;
594 if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) {
595 gfs2_consist_inode(ip);
601 file_ra_state_init(&ra_state, inode->i_mapping);
602 for (sdp->sd_rgrps = 0; sdp->sd_rgrps < rgrp_count; sdp->sd_rgrps++) {
603 error = read_rindex_entry(ip, &ra_state);
610 sdp->sd_rindex_uptodate = 1;
615 * gfs2_ri_update_special - Pull in a new resource index from the disk
617 * This is a special version that's safe to call from gfs2_inplace_reserve_i.
618 * In this case we know that we don't have any resource groups in memory yet.
620 * @ip: pointer to the rindex inode
622 * Returns: 0 on successful update, error code otherwise
624 static int gfs2_ri_update_special(struct gfs2_inode *ip)
626 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
627 struct inode *inode = &ip->i_inode;
628 struct file_ra_state ra_state;
631 file_ra_state_init(&ra_state, inode->i_mapping);
632 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
633 /* Ignore partials */
634 if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
637 error = read_rindex_entry(ip, &ra_state);
644 sdp->sd_rindex_uptodate = 1;
649 * gfs2_rindex_hold - Grab a lock on the rindex
650 * @sdp: The GFS2 superblock
651 * @ri_gh: the glock holder
653 * We grab a lock on the rindex inode to make sure that it doesn't
654 * change whilst we are performing an operation. We keep this lock
655 * for quite long periods of time compared to other locks. This
656 * doesn't matter, since it is shared and it is very, very rarely
657 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
659 * This makes sure that we're using the latest copy of the resource index
660 * special file, which might have been updated if someone expanded the
661 * filesystem (via gfs2_grow utility), which adds new resource groups.
663 * Returns: 0 on success, error code otherwise
666 int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
668 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
669 struct gfs2_glock *gl = ip->i_gl;
672 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
676 /* Read new copy from disk if we don't have the latest */
677 if (!sdp->sd_rindex_uptodate) {
678 mutex_lock(&sdp->sd_rindex_mutex);
679 if (!sdp->sd_rindex_uptodate) {
680 error = gfs2_ri_update(ip);
682 gfs2_glock_dq_uninit(ri_gh);
684 mutex_unlock(&sdp->sd_rindex_mutex);
690 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
692 const struct gfs2_rgrp *str = buf;
695 rg_flags = be32_to_cpu(str->rg_flags);
696 if (rg_flags & GFS2_RGF_NOALLOC)
697 rgd->rd_flags |= GFS2_RDF_NOALLOC;
699 rgd->rd_flags &= ~GFS2_RDF_NOALLOC;
700 rgd->rd_free = be32_to_cpu(str->rg_free);
701 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
702 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
705 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
707 struct gfs2_rgrp *str = buf;
710 if (rgd->rd_flags & GFS2_RDF_NOALLOC)
711 rg_flags |= GFS2_RGF_NOALLOC;
712 str->rg_flags = cpu_to_be32(rg_flags);
713 str->rg_free = cpu_to_be32(rgd->rd_free);
714 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
715 str->__pad = cpu_to_be32(0);
716 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
717 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
721 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
722 * @rgd: the struct gfs2_rgrpd describing the RG to read in
724 * Read in all of a Resource Group's header and bitmap blocks.
725 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
730 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
732 struct gfs2_sbd *sdp = rgd->rd_sbd;
733 struct gfs2_glock *gl = rgd->rd_gl;
734 unsigned int length = rgd->rd_length;
735 struct gfs2_bitmap *bi;
739 mutex_lock(&rgd->rd_mutex);
741 spin_lock(&sdp->sd_rindex_spin);
742 if (rgd->rd_bh_count) {
744 spin_unlock(&sdp->sd_rindex_spin);
745 mutex_unlock(&rgd->rd_mutex);
748 spin_unlock(&sdp->sd_rindex_spin);
750 for (x = 0; x < length; x++) {
751 bi = rgd->rd_bits + x;
752 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
757 for (y = length; y--;) {
758 bi = rgd->rd_bits + y;
759 error = gfs2_meta_wait(sdp, bi->bi_bh);
762 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
769 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
770 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
771 rgd->rd_flags |= GFS2_RDF_UPTODATE;
774 spin_lock(&sdp->sd_rindex_spin);
775 rgd->rd_free_clone = rgd->rd_free;
777 spin_unlock(&sdp->sd_rindex_spin);
779 mutex_unlock(&rgd->rd_mutex);
785 bi = rgd->rd_bits + x;
788 gfs2_assert_warn(sdp, !bi->bi_clone);
790 mutex_unlock(&rgd->rd_mutex);
795 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
797 struct gfs2_sbd *sdp = rgd->rd_sbd;
799 spin_lock(&sdp->sd_rindex_spin);
800 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
802 spin_unlock(&sdp->sd_rindex_spin);
806 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
807 * @rgd: the struct gfs2_rgrpd describing the RG to read in
811 void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
813 struct gfs2_sbd *sdp = rgd->rd_sbd;
814 int x, length = rgd->rd_length;
816 spin_lock(&sdp->sd_rindex_spin);
817 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
818 if (--rgd->rd_bh_count) {
819 spin_unlock(&sdp->sd_rindex_spin);
823 for (x = 0; x < length; x++) {
824 struct gfs2_bitmap *bi = rgd->rd_bits + x;
831 spin_unlock(&sdp->sd_rindex_spin);
834 static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
835 const struct gfs2_bitmap *bi)
837 struct super_block *sb = sdp->sd_vfs;
838 struct block_device *bdev = sb->s_bdev;
839 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
840 bdev_hardsect_size(sb->s_bdev);
843 sector_t nr_sects = 0;
847 for (x = 0; x < bi->bi_len; x++) {
848 const u8 *orig = bi->bi_bh->b_data + bi->bi_offset + x;
849 const u8 *clone = bi->bi_clone + bi->bi_offset + x;
850 u8 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
854 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
855 blk *= sects_per_blk; /* convert to sectors */
859 goto start_new_extent;
860 if ((start + nr_sects) != blk) {
861 rv = blkdev_issue_discard(bdev, start,
869 nr_sects += sects_per_blk;
872 blk += sects_per_blk;
876 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS);
882 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
883 sdp->sd_args.ar_discard = 0;
886 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
888 struct gfs2_sbd *sdp = rgd->rd_sbd;
889 unsigned int length = rgd->rd_length;
892 for (x = 0; x < length; x++) {
893 struct gfs2_bitmap *bi = rgd->rd_bits + x;
896 if (sdp->sd_args.ar_discard)
897 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bi);
898 memcpy(bi->bi_clone + bi->bi_offset,
899 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
902 spin_lock(&sdp->sd_rindex_spin);
903 rgd->rd_free_clone = rgd->rd_free;
904 spin_unlock(&sdp->sd_rindex_spin);
908 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
909 * @ip: the incore GFS2 inode structure
911 * Returns: the struct gfs2_alloc
914 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
916 BUG_ON(ip->i_alloc != NULL);
917 ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_KERNEL);
922 * try_rgrp_fit - See if a given reservation will fit in a given RG
924 * @al: the struct gfs2_alloc structure describing the reservation
926 * If there's room for the requested blocks to be allocated from the RG:
927 * Sets the $al_rgd field in @al.
929 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
932 static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
934 struct gfs2_sbd *sdp = rgd->rd_sbd;
937 if (rgd->rd_flags & GFS2_RDF_NOALLOC)
940 spin_lock(&sdp->sd_rindex_spin);
941 if (rgd->rd_free_clone >= al->al_requested) {
945 spin_unlock(&sdp->sd_rindex_spin);
951 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
954 * Returns: The inode, if one has been found
957 static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
962 struct gfs2_sbd *sdp = rgd->rd_sbd;
966 if (goal >= rgd->rd_data)
968 down_write(&sdp->sd_log_flush_lock);
970 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
971 GFS2_BLKST_UNLINKED, &n);
972 up_write(&sdp->sd_log_flush_lock);
973 if (block == BFITNOENT)
975 /* rgblk_search can return a block < goal, so we need to
976 keep it marching forward. */
977 no_addr = block + rgd->rd_data0;
979 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
981 *last_unlinked = no_addr;
982 inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
988 rgd->rd_flags &= ~GFS2_RDF_CHECK;
993 * recent_rgrp_next - get next RG from "recent" list
994 * @cur_rgd: current rgrp
996 * Returns: The next rgrp in the recent list
999 static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd)
1001 struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
1002 struct list_head *head;
1003 struct gfs2_rgrpd *rgd;
1005 spin_lock(&sdp->sd_rindex_spin);
1006 head = &sdp->sd_rindex_mru_list;
1007 if (unlikely(cur_rgd->rd_list_mru.next == head)) {
1008 spin_unlock(&sdp->sd_rindex_spin);
1011 rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru);
1012 spin_unlock(&sdp->sd_rindex_spin);
1017 * forward_rgrp_get - get an rgrp to try next from full list
1018 * @sdp: The GFS2 superblock
1020 * Returns: The rgrp to try next
1023 static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
1025 struct gfs2_rgrpd *rgd;
1026 unsigned int journals = gfs2_jindex_size(sdp);
1027 unsigned int rg = 0, x;
1029 spin_lock(&sdp->sd_rindex_spin);
1031 rgd = sdp->sd_rindex_forward;
1033 if (sdp->sd_rgrps >= journals)
1034 rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
1036 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg;
1037 x++, rgd = gfs2_rgrpd_get_next(rgd))
1040 sdp->sd_rindex_forward = rgd;
1043 spin_unlock(&sdp->sd_rindex_spin);
1049 * forward_rgrp_set - set the forward rgrp pointer
1050 * @sdp: the filesystem
1051 * @rgd: The new forward rgrp
1055 static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
1057 spin_lock(&sdp->sd_rindex_spin);
1058 sdp->sd_rindex_forward = rgd;
1059 spin_unlock(&sdp->sd_rindex_spin);
1063 * get_local_rgrp - Choose and lock a rgrp for allocation
1064 * @ip: the inode to reserve space for
1065 * @rgp: the chosen and locked rgrp
1067 * Try to acquire rgrp in way which avoids contending with others.
1072 static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1074 struct inode *inode = NULL;
1075 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1076 struct gfs2_rgrpd *rgd, *begin = NULL;
1077 struct gfs2_alloc *al = ip->i_alloc;
1078 int flags = LM_FLAG_TRY;
1081 int error, rg_locked;
1083 rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
1088 if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
1092 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1093 LM_FLAG_TRY, &al->al_rgd_gh);
1097 if (try_rgrp_fit(rgd, al))
1099 if (rgd->rd_flags & GFS2_RDF_CHECK)
1100 inode = try_rgrp_unlink(rgd, last_unlinked);
1102 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1107 rgd = recent_rgrp_next(rgd);
1111 return ERR_PTR(error);
1115 /* Go through full list of rgrps */
1117 begin = rgd = forward_rgrp_get(sdp);
1122 if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
1126 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags,
1131 if (try_rgrp_fit(rgd, al))
1133 if (rgd->rd_flags & GFS2_RDF_CHECK)
1134 inode = try_rgrp_unlink(rgd, last_unlinked);
1136 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1146 return ERR_PTR(error);
1149 rgd = gfs2_rgrpd_get_next(rgd);
1151 rgd = gfs2_rgrpd_get_first(sdp);
1155 return ERR_PTR(-ENOSPC);
1160 gfs2_log_flush(sdp, NULL);
1166 spin_lock(&sdp->sd_rindex_spin);
1167 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
1168 spin_unlock(&sdp->sd_rindex_spin);
1169 rgd = gfs2_rgrpd_get_next(rgd);
1171 rgd = gfs2_rgrpd_get_first(sdp);
1172 forward_rgrp_set(sdp, rgd);
1179 * gfs2_inplace_reserve_i - Reserve space in the filesystem
1180 * @ip: the inode to reserve space for
1185 int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
1187 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1188 struct gfs2_alloc *al = ip->i_alloc;
1189 struct inode *inode;
1191 u64 last_unlinked = NO_BLOCK;
1193 if (gfs2_assert_warn(sdp, al->al_requested))
1197 /* We need to hold the rindex unless the inode we're using is
1198 the rindex itself, in which case it's already held. */
1199 if (ip != GFS2_I(sdp->sd_rindex))
1200 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
1201 else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */
1202 error = gfs2_ri_update_special(ip);
1207 inode = get_local_rgrp(ip, &last_unlinked);
1209 if (ip != GFS2_I(sdp->sd_rindex))
1210 gfs2_glock_dq_uninit(&al->al_ri_gh);
1212 return PTR_ERR(inode);
1214 gfs2_log_flush(sdp, NULL);
1225 * gfs2_inplace_release - release an inplace reservation
1226 * @ip: the inode the reservation was taken out on
1228 * Release a reservation made by gfs2_inplace_reserve().
1231 void gfs2_inplace_release(struct gfs2_inode *ip)
1233 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1234 struct gfs2_alloc *al = ip->i_alloc;
1236 if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
1237 fs_warn(sdp, "al_alloced = %u, al_requested = %u "
1238 "al_file = %s, al_line = %u\n",
1239 al->al_alloced, al->al_requested, al->al_file,
1243 if (al->al_rgd_gh.gh_gl)
1244 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1245 if (ip != GFS2_I(sdp->sd_rindex))
1246 gfs2_glock_dq_uninit(&al->al_ri_gh);
1250 * gfs2_get_block_type - Check a block in a RG is of given type
1251 * @rgd: the resource group holding the block
1252 * @block: the block number
1254 * Returns: The block type (GFS2_BLKST_*)
1257 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1259 struct gfs2_bitmap *bi = NULL;
1260 u32 length, rgrp_block, buf_block;
1264 length = rgd->rd_length;
1265 rgrp_block = block - rgd->rd_data0;
1267 for (buf = 0; buf < length; buf++) {
1268 bi = rgd->rd_bits + buf;
1269 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1273 gfs2_assert(rgd->rd_sbd, buf < length);
1274 buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
1276 type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1277 bi->bi_len, buf_block);
1283 * rgblk_search - find a block in @old_state, change allocation
1284 * state to @new_state
1285 * @rgd: the resource group descriptor
1286 * @goal: the goal block within the RG (start here to search for avail block)
1287 * @old_state: GFS2_BLKST_XXX the before-allocation state to find
1288 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1289 * @n: The extent length
1291 * Walk rgrp's bitmap to find bits that represent a block in @old_state.
1292 * Add the found bitmap buffer to the transaction.
1293 * Set the found bits to @new_state to change block's allocation state.
1295 * This function never fails, because we wouldn't call it unless we
1296 * know (from reservation results, etc.) that a block is available.
1298 * Scope of @goal and returned block is just within rgrp, not the whole
1301 * Returns: the block number allocated
1304 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1305 unsigned char old_state, unsigned char new_state,
1308 struct gfs2_bitmap *bi = NULL;
1309 const u32 length = rgd->rd_length;
1311 unsigned int buf, x;
1312 const unsigned int elen = *n;
1316 /* Find bitmap block that contains bits for goal block */
1317 for (buf = 0; buf < length; buf++) {
1318 bi = rgd->rd_bits + buf;
1319 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1323 gfs2_assert(rgd->rd_sbd, buf < length);
1325 /* Convert scope of "goal" from rgrp-wide to within found bit block */
1326 goal -= bi->bi_start * GFS2_NBBY;
1328 /* Search (up to entire) bitmap in this rgrp for allocatable block.
1329 "x <= length", instead of "x < length", because we typically start
1330 the search in the middle of a bit block, but if we can't find an
1331 allocatable block anywhere else, we want to be able wrap around and
1332 search in the first part of our first-searched bit block. */
1333 for (x = 0; x <= length; x++) {
1334 /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
1335 bitmaps, so we must search the originals for that. */
1336 buffer = bi->bi_bh->b_data + bi->bi_offset;
1337 if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1338 buffer = bi->bi_clone + bi->bi_offset;
1340 blk = gfs2_bitfit(buffer, bi->bi_len, goal, old_state);
1341 if (blk != BFITNOENT)
1344 /* Try next bitmap block (wrap back to rgrp header if at end) */
1345 buf = (buf + 1) % length;
1346 bi = rgd->rd_bits + buf;
1350 if (blk != BFITNOENT && old_state != new_state) {
1352 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1353 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
1354 bi->bi_len, blk, new_state);
1358 if (goal >= (bi->bi_len * GFS2_NBBY))
1360 if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
1363 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone,
1364 bi->bi_offset, bi->bi_len, goal,
1370 return (blk == BFITNOENT) ? blk : (bi->bi_start * GFS2_NBBY) + blk;
1374 * rgblk_free - Change alloc state of given block(s)
1375 * @sdp: the filesystem
1376 * @bstart: the start of a run of blocks to free
1377 * @blen: the length of the block run (all must lie within ONE RG!)
1378 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1380 * Returns: Resource group containing the block(s)
1383 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1384 u32 blen, unsigned char new_state)
1386 struct gfs2_rgrpd *rgd;
1387 struct gfs2_bitmap *bi = NULL;
1388 u32 length, rgrp_blk, buf_blk;
1391 rgd = gfs2_blk2rgrpd(sdp, bstart);
1393 if (gfs2_consist(sdp))
1394 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
1398 length = rgd->rd_length;
1400 rgrp_blk = bstart - rgd->rd_data0;
1403 for (buf = 0; buf < length; buf++) {
1404 bi = rgd->rd_bits + buf;
1405 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1409 gfs2_assert(rgd->rd_sbd, buf < length);
1411 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1414 if (!bi->bi_clone) {
1415 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
1416 GFP_NOFS | __GFP_NOFAIL);
1417 memcpy(bi->bi_clone + bi->bi_offset,
1418 bi->bi_bh->b_data + bi->bi_offset,
1421 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1422 gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset,
1423 bi->bi_len, buf_blk, new_state);
1430 * gfs2_alloc_block - Allocate a block
1431 * @ip: the inode to allocate the block for
1433 * Returns: the allocated block
1436 u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
1438 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1439 struct gfs2_alloc *al = ip->i_alloc;
1440 struct gfs2_rgrpd *rgd = al->al_rgd;
1444 if (rgrp_contains_block(rgd, ip->i_goal))
1445 goal = ip->i_goal - rgd->rd_data0;
1447 goal = rgd->rd_last_alloc;
1449 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED, n);
1450 BUG_ON(blk == BFITNOENT);
1452 rgd->rd_last_alloc = blk;
1453 block = rgd->rd_data0 + blk;
1456 gfs2_assert_withdraw(sdp, rgd->rd_free >= *n);
1459 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1460 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1462 al->al_alloced += *n;
1464 gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
1465 gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
1467 spin_lock(&sdp->sd_rindex_spin);
1468 rgd->rd_free_clone -= *n;
1469 spin_unlock(&sdp->sd_rindex_spin);
1475 * gfs2_alloc_di - Allocate a dinode
1476 * @dip: the directory that the inode is going in
1478 * Returns: the block allocated
1481 u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1483 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1484 struct gfs2_alloc *al = dip->i_alloc;
1485 struct gfs2_rgrpd *rgd = al->al_rgd;
1490 blk = rgblk_search(rgd, rgd->rd_last_alloc,
1491 GFS2_BLKST_FREE, GFS2_BLKST_DINODE, &n);
1492 BUG_ON(blk == BFITNOENT);
1494 rgd->rd_last_alloc = blk;
1496 block = rgd->rd_data0 + blk;
1498 gfs2_assert_withdraw(sdp, rgd->rd_free);
1501 *generation = rgd->rd_igeneration++;
1502 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1503 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1507 gfs2_statfs_change(sdp, 0, -1, +1);
1508 gfs2_trans_add_unrevoke(sdp, block, 1);
1510 spin_lock(&sdp->sd_rindex_spin);
1511 rgd->rd_free_clone--;
1512 spin_unlock(&sdp->sd_rindex_spin);
1518 * gfs2_free_data - free a contiguous run of data block(s)
1519 * @ip: the inode these blocks are being freed from
1520 * @bstart: first block of a run of contiguous blocks
1521 * @blen: the length of the block run
1525 void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1527 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1528 struct gfs2_rgrpd *rgd;
1530 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1534 rgd->rd_free += blen;
1536 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1537 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1539 gfs2_trans_add_rg(rgd);
1541 gfs2_statfs_change(sdp, 0, +blen, 0);
1542 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1546 * gfs2_free_meta - free a contiguous run of data block(s)
1547 * @ip: the inode these blocks are being freed from
1548 * @bstart: first block of a run of contiguous blocks
1549 * @blen: the length of the block run
1553 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1555 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1556 struct gfs2_rgrpd *rgd;
1558 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1562 rgd->rd_free += blen;
1564 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1565 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1567 gfs2_trans_add_rg(rgd);
1569 gfs2_statfs_change(sdp, 0, +blen, 0);
1570 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1571 gfs2_meta_wipe(ip, bstart, blen);
1574 void gfs2_unlink_di(struct inode *inode)
1576 struct gfs2_inode *ip = GFS2_I(inode);
1577 struct gfs2_sbd *sdp = GFS2_SB(inode);
1578 struct gfs2_rgrpd *rgd;
1579 u64 blkno = ip->i_no_addr;
1581 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
1584 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1585 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1586 gfs2_trans_add_rg(rgd);
1589 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1591 struct gfs2_sbd *sdp = rgd->rd_sbd;
1592 struct gfs2_rgrpd *tmp_rgd;
1594 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
1597 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
1599 if (!rgd->rd_dinodes)
1600 gfs2_consist_rgrpd(rgd);
1604 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1605 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1607 gfs2_statfs_change(sdp, 0, +1, -1);
1608 gfs2_trans_add_rg(rgd);
1612 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1614 gfs2_free_uninit_di(rgd, ip->i_no_addr);
1615 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1616 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
1620 * gfs2_rlist_add - add a RG to a list of RGs
1621 * @sdp: the filesystem
1622 * @rlist: the list of resource groups
1625 * Figure out what RG a block belongs to and add that RG to the list
1627 * FIXME: Don't use NOFAIL
1631 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1634 struct gfs2_rgrpd *rgd;
1635 struct gfs2_rgrpd **tmp;
1636 unsigned int new_space;
1639 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
1642 rgd = gfs2_blk2rgrpd(sdp, block);
1644 if (gfs2_consist(sdp))
1645 fs_err(sdp, "block = %llu\n", (unsigned long long)block);
1649 for (x = 0; x < rlist->rl_rgrps; x++)
1650 if (rlist->rl_rgd[x] == rgd)
1653 if (rlist->rl_rgrps == rlist->rl_space) {
1654 new_space = rlist->rl_space + 10;
1656 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
1657 GFP_NOFS | __GFP_NOFAIL);
1659 if (rlist->rl_rgd) {
1660 memcpy(tmp, rlist->rl_rgd,
1661 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
1662 kfree(rlist->rl_rgd);
1665 rlist->rl_space = new_space;
1666 rlist->rl_rgd = tmp;
1669 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
1673 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1674 * and initialize an array of glock holders for them
1675 * @rlist: the list of resource groups
1676 * @state: the lock state to acquire the RG lock in
1677 * @flags: the modifier flags for the holder structures
1679 * FIXME: Don't use NOFAIL
1683 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
1687 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
1688 GFP_NOFS | __GFP_NOFAIL);
1689 for (x = 0; x < rlist->rl_rgrps; x++)
1690 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1696 * gfs2_rlist_free - free a resource group list
1697 * @list: the list of resource groups
1701 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
1705 kfree(rlist->rl_rgd);
1707 if (rlist->rl_ghs) {
1708 for (x = 0; x < rlist->rl_rgrps; x++)
1709 gfs2_holder_uninit(&rlist->rl_ghs[x]);
1710 kfree(rlist->rl_ghs);