]> git.karo-electronics.de Git - mv-sheeva.git/blob - fs/gfs2/rgrp.c
[GFS2] 80 Column audit of GFS2
[mv-sheeva.git] / fs / gfs2 / rgrp.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/fs.h>
16 #include <asm/semaphore.h>
17
18 #include "gfs2.h"
19 #include "bits.h"
20 #include "glock.h"
21 #include "glops.h"
22 #include "lops.h"
23 #include "meta_io.h"
24 #include "quota.h"
25 #include "rgrp.h"
26 #include "super.h"
27 #include "trans.h"
28 #include "ops_file.h"
29
30 /**
31  * gfs2_rgrp_verify - Verify that a resource group is consistent
32  * @sdp: the filesystem
33  * @rgd: the rgrp
34  *
35  */
36
37 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
38 {
39         struct gfs2_sbd *sdp = rgd->rd_sbd;
40         struct gfs2_bitmap *bi = NULL;
41         uint32_t length = rgd->rd_ri.ri_length;
42         uint32_t count[4], tmp;
43         int buf, x;
44
45         memset(count, 0, 4 * sizeof(uint32_t));
46
47         /* Count # blocks in each of 4 possible allocation states */
48         for (buf = 0; buf < length; buf++) {
49                 bi = rgd->rd_bits + buf;
50                 for (x = 0; x < 4; x++)
51                         count[x] += gfs2_bitcount(rgd,
52                                                   bi->bi_bh->b_data +
53                                                   bi->bi_offset,
54                                                   bi->bi_len, x);
55         }
56
57         if (count[0] != rgd->rd_rg.rg_free) {
58                 if (gfs2_consist_rgrpd(rgd))
59                         fs_err(sdp, "free data mismatch:  %u != %u\n",
60                                count[0], rgd->rd_rg.rg_free);
61                 return;
62         }
63
64         tmp = rgd->rd_ri.ri_data -
65                 rgd->rd_rg.rg_free -
66                 rgd->rd_rg.rg_dinodes;
67         if (count[1] != tmp) {
68                 if (gfs2_consist_rgrpd(rgd))
69                         fs_err(sdp, "used data mismatch:  %u != %u\n",
70                                count[1], tmp);
71                 return;
72         }
73
74         if (count[2]) {
75                 if (gfs2_consist_rgrpd(rgd))
76                         fs_err(sdp, "free metadata mismatch:  %u != 0\n",
77                                count[2]);
78                 return;
79         }
80
81         if (count[3] != rgd->rd_rg.rg_dinodes) {
82                 if (gfs2_consist_rgrpd(rgd))
83                         fs_err(sdp, "used metadata mismatch:  %u != %u\n",
84                                count[3], rgd->rd_rg.rg_dinodes);
85                 return;
86         }
87 }
88
89 static inline int rgrp_contains_block(struct gfs2_rindex *ri, uint64_t block)
90 {
91         uint64_t first = ri->ri_data0;
92         uint64_t last = first + ri->ri_data;
93         return !!(first <= block && block < last);
94 }
95
96 /**
97  * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
98  * @sdp: The GFS2 superblock
99  * @n: The data block number
100  *
101  * Returns: The resource group, or NULL if not found
102  */
103
104 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, uint64_t blk)
105 {
106         struct gfs2_rgrpd *rgd;
107
108         spin_lock(&sdp->sd_rindex_spin);
109
110         list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
111                 if (rgrp_contains_block(&rgd->rd_ri, blk)) {
112                         list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
113                         spin_unlock(&sdp->sd_rindex_spin);
114                         return rgd;
115                 }
116         }
117
118         spin_unlock(&sdp->sd_rindex_spin);
119
120         return NULL;
121 }
122
123 /**
124  * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
125  * @sdp: The GFS2 superblock
126  *
127  * Returns: The first rgrp in the filesystem
128  */
129
130 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
131 {
132         gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
133         return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
134 }
135
136 /**
137  * gfs2_rgrpd_get_next - get the next RG
138  * @rgd: A RG
139  *
140  * Returns: The next rgrp
141  */
142
143 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
144 {
145         if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
146                 return NULL;
147         return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
148 }
149
150 static void clear_rgrpdi(struct gfs2_sbd *sdp)
151 {
152         struct list_head *head;
153         struct gfs2_rgrpd *rgd;
154         struct gfs2_glock *gl;
155
156         spin_lock(&sdp->sd_rindex_spin);
157         sdp->sd_rindex_forward = NULL;
158         head = &sdp->sd_rindex_recent_list;
159         while (!list_empty(head)) {
160                 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
161                 list_del(&rgd->rd_recent);
162         }
163         spin_unlock(&sdp->sd_rindex_spin);
164
165         head = &sdp->sd_rindex_list;
166         while (!list_empty(head)) {
167                 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
168                 gl = rgd->rd_gl;
169
170                 list_del(&rgd->rd_list);
171                 list_del(&rgd->rd_list_mru);
172
173                 if (gl) {
174                         set_gl2rgd(gl, NULL);
175                         gfs2_glock_put(gl);
176                 }
177
178                 kfree(rgd->rd_bits);
179                 kfree(rgd);
180         }
181 }
182
183 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
184 {
185         mutex_lock(&sdp->sd_rindex_mutex);
186         clear_rgrpdi(sdp);
187         mutex_unlock(&sdp->sd_rindex_mutex);
188 }
189
190 /**
191  * gfs2_compute_bitstructs - Compute the bitmap sizes
192  * @rgd: The resource group descriptor
193  *
194  * Calculates bitmap descriptors, one for each block that contains bitmap data
195  *
196  * Returns: errno
197  */
198
199 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
200 {
201         struct gfs2_sbd *sdp = rgd->rd_sbd;
202         struct gfs2_bitmap *bi;
203         uint32_t length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */
204         uint32_t bytes_left, bytes;
205         int x;
206
207         rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_KERNEL);
208         if (!rgd->rd_bits)
209                 return -ENOMEM;
210
211         bytes_left = rgd->rd_ri.ri_bitbytes;
212
213         for (x = 0; x < length; x++) {
214                 bi = rgd->rd_bits + x;
215
216                 /* small rgrp; bitmap stored completely in header block */
217                 if (length == 1) {
218                         bytes = bytes_left;
219                         bi->bi_offset = sizeof(struct gfs2_rgrp);
220                         bi->bi_start = 0;
221                         bi->bi_len = bytes;
222                 /* header block */
223                 } else if (x == 0) {
224                         bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
225                         bi->bi_offset = sizeof(struct gfs2_rgrp);
226                         bi->bi_start = 0;
227                         bi->bi_len = bytes;
228                 /* last block */
229                 } else if (x + 1 == length) {
230                         bytes = bytes_left;
231                         bi->bi_offset = sizeof(struct gfs2_meta_header);
232                         bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
233                         bi->bi_len = bytes;
234                 /* other blocks */
235                 } else {
236                         bytes = sdp->sd_sb.sb_bsize -
237                                 sizeof(struct gfs2_meta_header);
238                         bi->bi_offset = sizeof(struct gfs2_meta_header);
239                         bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
240                         bi->bi_len = bytes;
241                 }
242
243                 bytes_left -= bytes;
244         }
245
246         if (bytes_left) {
247                 gfs2_consist_rgrpd(rgd);
248                 return -EIO;
249         }
250         bi = rgd->rd_bits + (length - 1);
251         if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) {
252                 if (gfs2_consist_rgrpd(rgd)) {
253                         gfs2_rindex_print(&rgd->rd_ri);
254                         fs_err(sdp, "start=%u len=%u offset=%u\n",
255                                bi->bi_start, bi->bi_len, bi->bi_offset);
256                 }
257                 return -EIO;
258         }
259
260         return 0;
261 }
262
263 /**
264  * gfs2_ri_update - Pull in a new resource index from the disk
265  * @gl: The glock covering the rindex inode
266  *
267  * Returns: 0 on successful update, error code otherwise
268  */
269
270 static int gfs2_ri_update(struct gfs2_inode *ip)
271 {
272         struct gfs2_sbd *sdp = ip->i_sbd;
273         struct inode *inode = ip->i_vnode;
274         struct gfs2_rgrpd *rgd;
275         char buf[sizeof(struct gfs2_rindex)];
276         struct file_ra_state ra_state;
277         uint64_t junk = ip->i_di.di_size;
278         int error;
279
280         if (do_div(junk, sizeof(struct gfs2_rindex))) {
281                 gfs2_consist_inode(ip);
282                 return -EIO;
283         }
284
285         clear_rgrpdi(sdp);
286
287         file_ra_state_init(&ra_state, inode->i_mapping);
288         for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
289                 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
290                 error = gfs2_internal_read(ip, &ra_state, buf, &pos,
291                                             sizeof(struct gfs2_rindex));
292                 if (!error)
293                         break;
294                 if (error != sizeof(struct gfs2_rindex)) {
295                         if (error > 0)
296                                 error = -EIO;
297                         goto fail;
298                 }
299
300                 rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_KERNEL);
301                 error = -ENOMEM;
302                 if (!rgd)
303                         goto fail;
304
305                 mutex_init(&rgd->rd_mutex);
306                 lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
307                 rgd->rd_sbd = sdp;
308
309                 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
310                 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
311
312                 gfs2_rindex_in(&rgd->rd_ri, buf);
313
314                 error = compute_bitstructs(rgd);
315                 if (error)
316                         goto fail;
317
318                 error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr,
319                                        &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
320                 if (error)
321                         goto fail;
322
323                 set_gl2rgd(rgd->rd_gl, rgd);
324                 rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
325         }
326
327         sdp->sd_rindex_vn = ip->i_gl->gl_vn;
328
329         return 0;
330
331  fail:
332         clear_rgrpdi(sdp);
333
334         return error;
335 }
336
337 /**
338  * gfs2_rindex_hold - Grab a lock on the rindex
339  * @sdp: The GFS2 superblock
340  * @ri_gh: the glock holder
341  *
342  * We grab a lock on the rindex inode to make sure that it doesn't
343  * change whilst we are performing an operation. We keep this lock
344  * for quite long periods of time compared to other locks. This
345  * doesn't matter, since it is shared and it is very, very rarely
346  * accessed in the exclusive mode (i.e. only when expanding the filesystem).
347  *
348  * This makes sure that we're using the latest copy of the resource index
349  * special file, which might have been updated if someone expanded the
350  * filesystem (via gfs2_grow utility), which adds new resource groups.
351  *
352  * Returns: 0 on success, error code otherwise
353  */
354
355 int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
356 {
357         struct gfs2_inode *ip = get_v2ip(sdp->sd_rindex);
358         struct gfs2_glock *gl = ip->i_gl;
359         int error;
360
361         error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
362         if (error)
363                 return error;
364
365         /* Read new copy from disk if we don't have the latest */
366         if (sdp->sd_rindex_vn != gl->gl_vn) {
367                 mutex_lock(&sdp->sd_rindex_mutex);
368                 if (sdp->sd_rindex_vn != gl->gl_vn) {
369                         error = gfs2_ri_update(ip);
370                         if (error)
371                                 gfs2_glock_dq_uninit(ri_gh);
372                 }
373                 mutex_unlock(&sdp->sd_rindex_mutex);
374         }
375
376         return error;
377 }
378
379 /**
380  * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
381  * @rgd: the struct gfs2_rgrpd describing the RG to read in
382  *
383  * Read in all of a Resource Group's header and bitmap blocks.
384  * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
385  *
386  * Returns: errno
387  */
388
389 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
390 {
391         struct gfs2_sbd *sdp = rgd->rd_sbd;
392         struct gfs2_glock *gl = rgd->rd_gl;
393         unsigned int length = rgd->rd_ri.ri_length;
394         struct gfs2_bitmap *bi;
395         unsigned int x, y;
396         int error;
397
398         mutex_lock(&rgd->rd_mutex);
399
400         spin_lock(&sdp->sd_rindex_spin);
401         if (rgd->rd_bh_count) {
402                 rgd->rd_bh_count++;
403                 spin_unlock(&sdp->sd_rindex_spin);
404                 mutex_unlock(&rgd->rd_mutex);
405                 return 0;
406         }
407         spin_unlock(&sdp->sd_rindex_spin);
408
409         for (x = 0; x < length; x++) {
410                 bi = rgd->rd_bits + x;
411                 error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, DIO_START,
412                                        &bi->bi_bh);
413                 if (error)
414                         goto fail;
415         }
416
417         for (y = length; y--;) {
418                 bi = rgd->rd_bits + y;
419                 error = gfs2_meta_reread(sdp, bi->bi_bh, DIO_WAIT);
420                 if (error)
421                         goto fail;
422                 if (gfs2_metatype_check(sdp, bi->bi_bh,
423                                         (y) ? GFS2_METATYPE_RB :
424                                               GFS2_METATYPE_RG)) {
425                         error = -EIO;
426                         goto fail;
427                 }
428         }
429
430         if (rgd->rd_rg_vn != gl->gl_vn) {
431                 gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
432                 rgd->rd_rg_vn = gl->gl_vn;
433         }
434
435         spin_lock(&sdp->sd_rindex_spin);
436         rgd->rd_free_clone = rgd->rd_rg.rg_free;
437         rgd->rd_bh_count++;
438         spin_unlock(&sdp->sd_rindex_spin);
439
440         mutex_unlock(&rgd->rd_mutex);
441
442         return 0;
443
444  fail:
445         while (x--) {
446                 bi = rgd->rd_bits + x;
447                 brelse(bi->bi_bh);
448                 bi->bi_bh = NULL;
449                 gfs2_assert_warn(sdp, !bi->bi_clone);
450         }
451         mutex_unlock(&rgd->rd_mutex);
452
453         return error;
454 }
455
456 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
457 {
458         struct gfs2_sbd *sdp = rgd->rd_sbd;
459
460         spin_lock(&sdp->sd_rindex_spin);
461         gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
462         rgd->rd_bh_count++;
463         spin_unlock(&sdp->sd_rindex_spin);
464 }
465
466 /**
467  * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
468  * @rgd: the struct gfs2_rgrpd describing the RG to read in
469  *
470  */
471
472 void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
473 {
474         struct gfs2_sbd *sdp = rgd->rd_sbd;
475         int x, length = rgd->rd_ri.ri_length;
476
477         spin_lock(&sdp->sd_rindex_spin);
478         gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
479         if (--rgd->rd_bh_count) {
480                 spin_unlock(&sdp->sd_rindex_spin);
481                 return;
482         }
483
484         for (x = 0; x < length; x++) {
485                 struct gfs2_bitmap *bi = rgd->rd_bits + x;
486                 kfree(bi->bi_clone);
487                 bi->bi_clone = NULL;
488                 brelse(bi->bi_bh);
489                 bi->bi_bh = NULL;
490         }
491
492         spin_unlock(&sdp->sd_rindex_spin);
493 }
494
495 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
496 {
497         struct gfs2_sbd *sdp = rgd->rd_sbd;
498         unsigned int length = rgd->rd_ri.ri_length;
499         unsigned int x;
500
501         for (x = 0; x < length; x++) {
502                 struct gfs2_bitmap *bi = rgd->rd_bits + x;
503                 if (!bi->bi_clone)
504                         continue;
505                 memcpy(bi->bi_clone + bi->bi_offset,
506                        bi->bi_bh->b_data + bi->bi_offset,
507                        bi->bi_len);
508         }
509
510         spin_lock(&sdp->sd_rindex_spin);
511         rgd->rd_free_clone = rgd->rd_rg.rg_free;
512         spin_unlock(&sdp->sd_rindex_spin);
513 }
514
515 /**
516  * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
517  * @ip: the incore GFS2 inode structure
518  *
519  * Returns: the struct gfs2_alloc
520  */
521
522 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
523 {
524         struct gfs2_alloc *al = &ip->i_alloc;
525
526         /* FIXME: Should assert that the correct locks are held here... */
527         memset(al, 0, sizeof(*al));
528         return al;
529 }
530
531 /**
532  * gfs2_alloc_put - throw away the struct gfs2_alloc for an inode
533  * @ip: the inode
534  *
535  */
536
537 void gfs2_alloc_put(struct gfs2_inode *ip)
538 {
539         return;
540 }
541
542 /**
543  * try_rgrp_fit - See if a given reservation will fit in a given RG
544  * @rgd: the RG data
545  * @al: the struct gfs2_alloc structure describing the reservation
546  *
547  * If there's room for the requested blocks to be allocated from the RG:
548  *   Sets the $al_reserved_data field in @al.
549  *   Sets the $al_reserved_meta field in @al.
550  *   Sets the $al_rgd field in @al.
551  *
552  * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
553  */
554
555 static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
556 {
557         struct gfs2_sbd *sdp = rgd->rd_sbd;
558         int ret = 0;
559
560         spin_lock(&sdp->sd_rindex_spin);
561         if (rgd->rd_free_clone >= al->al_requested) {
562                 al->al_rgd = rgd;
563                 ret = 1;
564         }
565         spin_unlock(&sdp->sd_rindex_spin);
566
567         return ret;
568 }
569
570 /**
571  * recent_rgrp_first - get first RG from "recent" list
572  * @sdp: The GFS2 superblock
573  * @rglast: address of the rgrp used last
574  *
575  * Returns: The first rgrp in the recent list
576  */
577
578 static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
579                                             uint64_t rglast)
580 {
581         struct gfs2_rgrpd *rgd = NULL;
582
583         spin_lock(&sdp->sd_rindex_spin);
584
585         if (list_empty(&sdp->sd_rindex_recent_list))
586                 goto out;
587
588         if (!rglast)
589                 goto first;
590
591         list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
592                 if (rgd->rd_ri.ri_addr == rglast)
593                         goto out;
594         }
595
596  first:
597         rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
598                          rd_recent);
599
600  out:
601         spin_unlock(&sdp->sd_rindex_spin);
602
603         return rgd;
604 }
605
606 /**
607  * recent_rgrp_next - get next RG from "recent" list
608  * @cur_rgd: current rgrp
609  * @remove:
610  *
611  * Returns: The next rgrp in the recent list
612  */
613
614 static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
615                                            int remove)
616 {
617         struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
618         struct list_head *head;
619         struct gfs2_rgrpd *rgd;
620
621         spin_lock(&sdp->sd_rindex_spin);
622
623         head = &sdp->sd_rindex_recent_list;
624
625         list_for_each_entry(rgd, head, rd_recent) {
626                 if (rgd == cur_rgd) {
627                         if (cur_rgd->rd_recent.next != head)
628                                 rgd = list_entry(cur_rgd->rd_recent.next,
629                                                  struct gfs2_rgrpd, rd_recent);
630                         else
631                                 rgd = NULL;
632
633                         if (remove)
634                                 list_del(&cur_rgd->rd_recent);
635
636                         goto out;
637                 }
638         }
639
640         rgd = NULL;
641         if (!list_empty(head))
642                 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
643
644  out:
645         spin_unlock(&sdp->sd_rindex_spin);
646
647         return rgd;
648 }
649
650 /**
651  * recent_rgrp_add - add an RG to tail of "recent" list
652  * @new_rgd: The rgrp to add
653  *
654  */
655
656 static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
657 {
658         struct gfs2_sbd *sdp = new_rgd->rd_sbd;
659         struct gfs2_rgrpd *rgd;
660         unsigned int count = 0;
661         unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
662
663         spin_lock(&sdp->sd_rindex_spin);
664
665         list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
666                 if (rgd == new_rgd)
667                         goto out;
668
669                 if (++count >= max)
670                         goto out;
671         }
672         list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
673
674  out:
675         spin_unlock(&sdp->sd_rindex_spin);
676 }
677
678 /**
679  * forward_rgrp_get - get an rgrp to try next from full list
680  * @sdp: The GFS2 superblock
681  *
682  * Returns: The rgrp to try next
683  */
684
685 static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
686 {
687         struct gfs2_rgrpd *rgd;
688         unsigned int journals = gfs2_jindex_size(sdp);
689         unsigned int rg = 0, x;
690
691         spin_lock(&sdp->sd_rindex_spin);
692
693         rgd = sdp->sd_rindex_forward;
694         if (!rgd) {
695                 if (sdp->sd_rgrps >= journals)
696                         rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
697
698                 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp);
699                      x < rg;
700                      x++, rgd = gfs2_rgrpd_get_next(rgd))
701                         /* Do Nothing */;
702
703                 sdp->sd_rindex_forward = rgd;
704         }
705
706         spin_unlock(&sdp->sd_rindex_spin);
707
708         return rgd;
709 }
710
711 /**
712  * forward_rgrp_set - set the forward rgrp pointer
713  * @sdp: the filesystem
714  * @rgd: The new forward rgrp
715  *
716  */
717
718 static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
719 {
720         spin_lock(&sdp->sd_rindex_spin);
721         sdp->sd_rindex_forward = rgd;
722         spin_unlock(&sdp->sd_rindex_spin);
723 }
724
725 /**
726  * get_local_rgrp - Choose and lock a rgrp for allocation
727  * @ip: the inode to reserve space for
728  * @rgp: the chosen and locked rgrp
729  *
730  * Try to acquire rgrp in way which avoids contending with others.
731  *
732  * Returns: errno
733  */
734
735 static int get_local_rgrp(struct gfs2_inode *ip)
736 {
737         struct gfs2_sbd *sdp = ip->i_sbd;
738         struct gfs2_rgrpd *rgd, *begin = NULL;
739         struct gfs2_alloc *al = &ip->i_alloc;
740         int flags = LM_FLAG_TRY;
741         int skipped = 0;
742         int loops = 0;
743         int error;
744
745         /* Try recently successful rgrps */
746
747         rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
748
749         while (rgd) {
750                 error = gfs2_glock_nq_init(rgd->rd_gl,
751                                           LM_ST_EXCLUSIVE, LM_FLAG_TRY,
752                                           &al->al_rgd_gh);
753                 switch (error) {
754                 case 0:
755                         if (try_rgrp_fit(rgd, al))
756                                 goto out;
757                         gfs2_glock_dq_uninit(&al->al_rgd_gh);
758                         rgd = recent_rgrp_next(rgd, 1);
759                         break;
760
761                 case GLR_TRYFAILED:
762                         rgd = recent_rgrp_next(rgd, 0);
763                         break;
764
765                 default:
766                         return error;
767                 }
768         }
769
770         /* Go through full list of rgrps */
771
772         begin = rgd = forward_rgrp_get(sdp);
773
774         for (;;) {
775                 error = gfs2_glock_nq_init(rgd->rd_gl,
776                                           LM_ST_EXCLUSIVE, flags,
777                                           &al->al_rgd_gh);
778                 switch (error) {
779                 case 0:
780                         if (try_rgrp_fit(rgd, al))
781                                 goto out;
782                         gfs2_glock_dq_uninit(&al->al_rgd_gh);
783                         break;
784
785                 case GLR_TRYFAILED:
786                         skipped++;
787                         break;
788
789                 default:
790                         return error;
791                 }
792
793                 rgd = gfs2_rgrpd_get_next(rgd);
794                 if (!rgd)
795                         rgd = gfs2_rgrpd_get_first(sdp);
796
797                 if (rgd == begin) {
798                         if (++loops >= 2 || !skipped)
799                                 return -ENOSPC;
800                         flags = 0;
801                 }
802         }
803
804  out:
805         ip->i_last_rg_alloc = rgd->rd_ri.ri_addr;
806
807         if (begin) {
808                 recent_rgrp_add(rgd);
809                 rgd = gfs2_rgrpd_get_next(rgd);
810                 if (!rgd)
811                         rgd = gfs2_rgrpd_get_first(sdp);
812                 forward_rgrp_set(sdp, rgd);
813         }
814
815         return 0;
816 }
817
818 /**
819  * gfs2_inplace_reserve_i - Reserve space in the filesystem
820  * @ip: the inode to reserve space for
821  *
822  * Returns: errno
823  */
824
825 int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
826 {
827         struct gfs2_sbd *sdp = ip->i_sbd;
828         struct gfs2_alloc *al = &ip->i_alloc;
829         int error;
830
831         if (gfs2_assert_warn(sdp, al->al_requested))
832                 return -EINVAL;
833
834         error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
835         if (error)
836                 return error;
837
838         error = get_local_rgrp(ip);
839         if (error) {
840                 gfs2_glock_dq_uninit(&al->al_ri_gh);
841                 return error;
842         }
843
844         al->al_file = file;
845         al->al_line = line;
846
847         return 0;
848 }
849
850 /**
851  * gfs2_inplace_release - release an inplace reservation
852  * @ip: the inode the reservation was taken out on
853  *
854  * Release a reservation made by gfs2_inplace_reserve().
855  */
856
857 void gfs2_inplace_release(struct gfs2_inode *ip)
858 {
859         struct gfs2_sbd *sdp = ip->i_sbd;
860         struct gfs2_alloc *al = &ip->i_alloc;
861
862         if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
863                 fs_warn(sdp, "al_alloced = %u, al_requested = %u "
864                              "al_file = %s, al_line = %u\n",
865                              al->al_alloced, al->al_requested, al->al_file,
866                              al->al_line);
867
868         al->al_rgd = NULL;
869         gfs2_glock_dq_uninit(&al->al_rgd_gh);
870         gfs2_glock_dq_uninit(&al->al_ri_gh);
871 }
872
873 /**
874  * gfs2_get_block_type - Check a block in a RG is of given type
875  * @rgd: the resource group holding the block
876  * @block: the block number
877  *
878  * Returns: The block type (GFS2_BLKST_*)
879  */
880
881 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, uint64_t block)
882 {
883         struct gfs2_bitmap *bi = NULL;
884         uint32_t length, rgrp_block, buf_block;
885         unsigned int buf;
886         unsigned char type;
887
888         length = rgd->rd_ri.ri_length;
889         rgrp_block = block - rgd->rd_ri.ri_data0;
890
891         for (buf = 0; buf < length; buf++) {
892                 bi = rgd->rd_bits + buf;
893                 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
894                         break;
895         }
896
897         gfs2_assert(rgd->rd_sbd, buf < length);
898         buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
899
900         type = gfs2_testbit(rgd,
901                            bi->bi_bh->b_data + bi->bi_offset,
902                            bi->bi_len, buf_block);
903
904         return type;
905 }
906
907 /**
908  * rgblk_search - find a block in @old_state, change allocation
909  *           state to @new_state
910  * @rgd: the resource group descriptor
911  * @goal: the goal block within the RG (start here to search for avail block)
912  * @old_state: GFS2_BLKST_XXX the before-allocation state to find
913  * @new_state: GFS2_BLKST_XXX the after-allocation block state
914  *
915  * Walk rgrp's bitmap to find bits that represent a block in @old_state.
916  * Add the found bitmap buffer to the transaction.
917  * Set the found bits to @new_state to change block's allocation state.
918  *
919  * This function never fails, because we wouldn't call it unless we
920  * know (from reservation results, etc.) that a block is available.
921  *
922  * Scope of @goal and returned block is just within rgrp, not the whole
923  * filesystem.
924  *
925  * Returns:  the block number allocated
926  */
927
928 static uint32_t rgblk_search(struct gfs2_rgrpd *rgd, uint32_t goal,
929                              unsigned char old_state, unsigned char new_state)
930 {
931         struct gfs2_bitmap *bi = NULL;
932         uint32_t length = rgd->rd_ri.ri_length;
933         uint32_t blk = 0;
934         unsigned int buf, x;
935
936         /* Find bitmap block that contains bits for goal block */
937         for (buf = 0; buf < length; buf++) {
938                 bi = rgd->rd_bits + buf;
939                 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
940                         break;
941         }
942
943         gfs2_assert(rgd->rd_sbd, buf < length);
944
945         /* Convert scope of "goal" from rgrp-wide to within found bit block */
946         goal -= bi->bi_start * GFS2_NBBY;
947
948         /* Search (up to entire) bitmap in this rgrp for allocatable block.
949            "x <= length", instead of "x < length", because we typically start
950            the search in the middle of a bit block, but if we can't find an
951            allocatable block anywhere else, we want to be able wrap around and
952            search in the first part of our first-searched bit block.  */
953         for (x = 0; x <= length; x++) {
954                 if (bi->bi_clone)
955                         blk = gfs2_bitfit(rgd,
956                                           bi->bi_clone + bi->bi_offset,
957                                           bi->bi_len, goal, old_state);
958                 else
959                         blk = gfs2_bitfit(rgd,
960                                           bi->bi_bh->b_data + bi->bi_offset,
961                                           bi->bi_len, goal, old_state);
962                 if (blk != BFITNOENT)
963                         break;
964
965                 /* Try next bitmap block (wrap back to rgrp header if at end) */
966                 buf = (buf + 1) % length;
967                 bi = rgd->rd_bits + buf;
968                 goal = 0;
969         }
970
971         if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length))
972                 blk = 0;
973
974         gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
975         gfs2_setbit(rgd,
976                     bi->bi_bh->b_data + bi->bi_offset,
977                     bi->bi_len, blk, new_state);
978         if (bi->bi_clone)
979                 gfs2_setbit(rgd,
980                             bi->bi_clone + bi->bi_offset,
981                             bi->bi_len, blk, new_state);
982
983         return bi->bi_start * GFS2_NBBY + blk;
984 }
985
986 /**
987  * rgblk_free - Change alloc state of given block(s)
988  * @sdp: the filesystem
989  * @bstart: the start of a run of blocks to free
990  * @blen: the length of the block run (all must lie within ONE RG!)
991  * @new_state: GFS2_BLKST_XXX the after-allocation block state
992  *
993  * Returns:  Resource group containing the block(s)
994  */
995
996 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, uint64_t bstart,
997                                      uint32_t blen, unsigned char new_state)
998 {
999         struct gfs2_rgrpd *rgd;
1000         struct gfs2_bitmap *bi = NULL;
1001         uint32_t length, rgrp_blk, buf_blk;
1002         unsigned int buf;
1003
1004         rgd = gfs2_blk2rgrpd(sdp, bstart);
1005         if (!rgd) {
1006                 if (gfs2_consist(sdp))
1007                         fs_err(sdp, "block = %llu\n", bstart);
1008                 return NULL;
1009         }
1010
1011         length = rgd->rd_ri.ri_length;
1012
1013         rgrp_blk = bstart - rgd->rd_ri.ri_data0;
1014
1015         while (blen--) {
1016                 for (buf = 0; buf < length; buf++) {
1017                         bi = rgd->rd_bits + buf;
1018                         if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1019                                 break;
1020                 }
1021
1022                 gfs2_assert(rgd->rd_sbd, buf < length);
1023
1024                 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1025                 rgrp_blk++;
1026
1027                 if (!bi->bi_clone) {
1028                         bi->bi_clone = kmalloc(bi->bi_bh->b_size,
1029                                                GFP_KERNEL | __GFP_NOFAIL);
1030                         memcpy(bi->bi_clone + bi->bi_offset,
1031                                bi->bi_bh->b_data + bi->bi_offset,
1032                                bi->bi_len);
1033                 }
1034                 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1035                 gfs2_setbit(rgd,
1036                             bi->bi_bh->b_data + bi->bi_offset,
1037                             bi->bi_len, buf_blk, new_state);
1038         }
1039
1040         return rgd;
1041 }
1042
1043 /**
1044  * gfs2_alloc_data - Allocate a data block
1045  * @ip: the inode to allocate the data block for
1046  *
1047  * Returns: the allocated block
1048  */
1049
1050 uint64_t gfs2_alloc_data(struct gfs2_inode *ip)
1051 {
1052         struct gfs2_sbd *sdp = ip->i_sbd;
1053         struct gfs2_alloc *al = &ip->i_alloc;
1054         struct gfs2_rgrpd *rgd = al->al_rgd;
1055         uint32_t goal, blk;
1056         uint64_t block;
1057
1058         if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data))
1059                 goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0;
1060         else
1061                 goal = rgd->rd_last_alloc_data;
1062
1063         blk = rgblk_search(rgd, goal,
1064                            GFS2_BLKST_FREE, GFS2_BLKST_USED);
1065         rgd->rd_last_alloc_data = blk;
1066
1067         block = rgd->rd_ri.ri_data0 + blk;
1068         ip->i_di.di_goal_data = block;
1069
1070         gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1071         rgd->rd_rg.rg_free--;
1072
1073         gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1074         gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1075
1076         al->al_alloced++;
1077
1078         gfs2_statfs_change(sdp, 0, -1, 0);
1079         gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1080
1081         spin_lock(&sdp->sd_rindex_spin);
1082         rgd->rd_free_clone--;
1083         spin_unlock(&sdp->sd_rindex_spin);
1084
1085         return block;
1086 }
1087
1088 /**
1089  * gfs2_alloc_meta - Allocate a metadata block
1090  * @ip: the inode to allocate the metadata block for
1091  *
1092  * Returns: the allocated block
1093  */
1094
1095 uint64_t gfs2_alloc_meta(struct gfs2_inode *ip)
1096 {
1097         struct gfs2_sbd *sdp = ip->i_sbd;
1098         struct gfs2_alloc *al = &ip->i_alloc;
1099         struct gfs2_rgrpd *rgd = al->al_rgd;
1100         uint32_t goal, blk;
1101         uint64_t block;
1102
1103         if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta))
1104                 goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0;
1105         else
1106                 goal = rgd->rd_last_alloc_meta;
1107
1108         blk = rgblk_search(rgd, goal,
1109                            GFS2_BLKST_FREE, GFS2_BLKST_USED);
1110         rgd->rd_last_alloc_meta = blk;
1111
1112         block = rgd->rd_ri.ri_data0 + blk;
1113         ip->i_di.di_goal_meta = block;
1114
1115         gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1116         rgd->rd_rg.rg_free--;
1117
1118         gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1119         gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1120
1121         al->al_alloced++;
1122
1123         gfs2_statfs_change(sdp, 0, -1, 0);
1124         gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1125         gfs2_trans_add_unrevoke(sdp, block);
1126
1127         spin_lock(&sdp->sd_rindex_spin);
1128         rgd->rd_free_clone--;
1129         spin_unlock(&sdp->sd_rindex_spin);
1130
1131         return block;
1132 }
1133
1134 /**
1135  * gfs2_alloc_di - Allocate a dinode
1136  * @dip: the directory that the inode is going in
1137  *
1138  * Returns: the block allocated
1139  */
1140
1141 uint64_t gfs2_alloc_di(struct gfs2_inode *dip)
1142 {
1143         struct gfs2_sbd *sdp = dip->i_sbd;
1144         struct gfs2_alloc *al = &dip->i_alloc;
1145         struct gfs2_rgrpd *rgd = al->al_rgd;
1146         uint32_t blk;
1147         uint64_t block;
1148
1149         blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
1150                            GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
1151
1152         rgd->rd_last_alloc_meta = blk;
1153
1154         block = rgd->rd_ri.ri_data0 + blk;
1155
1156         gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1157         rgd->rd_rg.rg_free--;
1158         rgd->rd_rg.rg_dinodes++;
1159
1160         gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1161         gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1162
1163         al->al_alloced++;
1164
1165         gfs2_statfs_change(sdp, 0, -1, +1);
1166         gfs2_trans_add_unrevoke(sdp, block);
1167
1168         spin_lock(&sdp->sd_rindex_spin);
1169         rgd->rd_free_clone--;
1170         spin_unlock(&sdp->sd_rindex_spin);
1171
1172         return block;
1173 }
1174
1175 /**
1176  * gfs2_free_data - free a contiguous run of data block(s)
1177  * @ip: the inode these blocks are being freed from
1178  * @bstart: first block of a run of contiguous blocks
1179  * @blen: the length of the block run
1180  *
1181  */
1182
1183 void gfs2_free_data(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
1184 {
1185         struct gfs2_sbd *sdp = ip->i_sbd;
1186         struct gfs2_rgrpd *rgd;
1187
1188         rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1189         if (!rgd)
1190                 return;
1191
1192         rgd->rd_rg.rg_free += blen;
1193
1194         gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1195         gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1196
1197         gfs2_trans_add_rg(rgd);
1198
1199         gfs2_statfs_change(sdp, 0, +blen, 0);
1200         gfs2_quota_change(ip, -(int64_t)blen,
1201                          ip->i_di.di_uid, ip->i_di.di_gid);
1202 }
1203
1204 /**
1205  * gfs2_free_meta - free a contiguous run of data block(s)
1206  * @ip: the inode these blocks are being freed from
1207  * @bstart: first block of a run of contiguous blocks
1208  * @blen: the length of the block run
1209  *
1210  */
1211
1212 void gfs2_free_meta(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
1213 {
1214         struct gfs2_sbd *sdp = ip->i_sbd;
1215         struct gfs2_rgrpd *rgd;
1216
1217         rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1218         if (!rgd)
1219                 return;
1220
1221         rgd->rd_rg.rg_free += blen;
1222
1223         gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1224         gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1225
1226         gfs2_trans_add_rg(rgd);
1227
1228         gfs2_statfs_change(sdp, 0, +blen, 0);
1229         gfs2_quota_change(ip, -(int64_t)blen,
1230                          ip->i_di.di_uid, ip->i_di.di_gid);
1231         gfs2_meta_wipe(ip, bstart, blen);
1232 }
1233
1234 void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, uint64_t blkno)
1235 {
1236         struct gfs2_sbd *sdp = rgd->rd_sbd;
1237         struct gfs2_rgrpd *tmp_rgd;
1238
1239         tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
1240         if (!tmp_rgd)
1241                 return;
1242         gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
1243
1244         if (!rgd->rd_rg.rg_dinodes)
1245                 gfs2_consist_rgrpd(rgd);
1246         rgd->rd_rg.rg_dinodes--;
1247         rgd->rd_rg.rg_free++;
1248
1249         gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1250         gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1251
1252         gfs2_statfs_change(sdp, 0, +1, -1);
1253         gfs2_trans_add_rg(rgd);
1254 }
1255
1256 /**
1257  * gfs2_free_uninit_di - free a dinode block
1258  * @rgd: the resource group that contains the dinode
1259  * @ip: the inode
1260  *
1261  */
1262
1263 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1264 {
1265         gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
1266         gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
1267         gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
1268 }
1269
1270 /**
1271  * gfs2_rlist_add - add a RG to a list of RGs
1272  * @sdp: the filesystem
1273  * @rlist: the list of resource groups
1274  * @block: the block
1275  *
1276  * Figure out what RG a block belongs to and add that RG to the list
1277  *
1278  * FIXME: Don't use NOFAIL
1279  *
1280  */
1281
1282 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1283                     uint64_t block)
1284 {
1285         struct gfs2_rgrpd *rgd;
1286         struct gfs2_rgrpd **tmp;
1287         unsigned int new_space;
1288         unsigned int x;
1289
1290         if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
1291                 return;
1292
1293         rgd = gfs2_blk2rgrpd(sdp, block);
1294         if (!rgd) {
1295                 if (gfs2_consist(sdp))
1296                         fs_err(sdp, "block = %llu\n", block);
1297                 return;
1298         }
1299
1300         for (x = 0; x < rlist->rl_rgrps; x++)
1301                 if (rlist->rl_rgd[x] == rgd)
1302                         return;
1303
1304         if (rlist->rl_rgrps == rlist->rl_space) {
1305                 new_space = rlist->rl_space + 10;
1306
1307                 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
1308                               GFP_KERNEL | __GFP_NOFAIL);
1309
1310                 if (rlist->rl_rgd) {
1311                         memcpy(tmp, rlist->rl_rgd,
1312                                rlist->rl_space * sizeof(struct gfs2_rgrpd *));
1313                         kfree(rlist->rl_rgd);
1314                 }
1315
1316                 rlist->rl_space = new_space;
1317                 rlist->rl_rgd = tmp;
1318         }
1319
1320         rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
1321 }
1322
1323 /**
1324  * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1325  *      and initialize an array of glock holders for them
1326  * @rlist: the list of resource groups
1327  * @state: the lock state to acquire the RG lock in
1328  * @flags: the modifier flags for the holder structures
1329  *
1330  * FIXME: Don't use NOFAIL
1331  *
1332  */
1333
1334 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
1335                       int flags)
1336 {
1337         unsigned int x;
1338
1339         rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
1340                                 GFP_KERNEL | __GFP_NOFAIL);
1341         for (x = 0; x < rlist->rl_rgrps; x++)
1342                 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1343                                 state, flags,
1344                                 &rlist->rl_ghs[x]);
1345 }
1346
1347 /**
1348  * gfs2_rlist_free - free a resource group list
1349  * @list: the list of resource groups
1350  *
1351  */
1352
1353 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
1354 {
1355         unsigned int x;
1356
1357         kfree(rlist->rl_rgd);
1358
1359         if (rlist->rl_ghs) {
1360                 for (x = 0; x < rlist->rl_rgrps; x++)
1361                         gfs2_holder_uninit(&rlist->rl_ghs[x]);
1362                 kfree(rlist->rl_ghs);
1363         }
1364 }
1365