2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/tty.h>
45 #include <linux/sort.h>
47 #include <asm/semaphore.h>
61 #include "ops_address.h"
66 static uint64_t qd2offset(struct gfs2_quota_data *qd)
70 offset = 2 * (uint64_t)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
71 offset *= sizeof(struct gfs2_quota);
76 static int qd_alloc(struct gfs2_sbd *sdp, int user, uint32_t id,
77 struct gfs2_quota_data **qdp)
79 struct gfs2_quota_data *qd;
82 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
89 set_bit(QDF_USER, &qd->qd_flags);
92 error = gfs2_glock_get(sdp, 2 * (uint64_t)id + !user,
93 &gfs2_quota_glops, CREATE, &qd->qd_gl);
97 error = gfs2_lvb_hold(qd->qd_gl);
98 gfs2_glock_put(qd->qd_gl);
111 static int qd_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
112 struct gfs2_quota_data **qdp)
114 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
121 spin_lock(&sdp->sd_quota_spin);
122 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
123 if (qd->qd_id == id &&
124 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
136 list_add(&qd->qd_list, &sdp->sd_quota_list);
137 atomic_inc(&sdp->sd_quota_count);
141 spin_unlock(&sdp->sd_quota_spin);
145 gfs2_lvb_unhold(new_qd->qd_gl);
152 error = qd_alloc(sdp, user, id, &new_qd);
158 static void qd_hold(struct gfs2_quota_data *qd)
160 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
162 spin_lock(&sdp->sd_quota_spin);
163 gfs2_assert(sdp, qd->qd_count);
165 spin_unlock(&sdp->sd_quota_spin);
168 static void qd_put(struct gfs2_quota_data *qd)
170 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
171 spin_lock(&sdp->sd_quota_spin);
172 gfs2_assert(sdp, qd->qd_count);
174 qd->qd_last_touched = jiffies;
175 spin_unlock(&sdp->sd_quota_spin);
178 static int slot_get(struct gfs2_quota_data *qd)
180 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
181 unsigned int c, o = 0, b;
182 unsigned char byte = 0;
184 spin_lock(&sdp->sd_quota_spin);
186 if (qd->qd_slot_count++) {
187 spin_unlock(&sdp->sd_quota_spin);
191 for (c = 0; c < sdp->sd_quota_chunks; c++)
192 for (o = 0; o < PAGE_SIZE; o++) {
193 byte = sdp->sd_quota_bitmap[c][o];
201 for (b = 0; b < 8; b++)
202 if (!(byte & (1 << b)))
204 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
206 if (qd->qd_slot >= sdp->sd_quota_slots)
209 sdp->sd_quota_bitmap[c][o] |= 1 << b;
211 spin_unlock(&sdp->sd_quota_spin);
217 spin_unlock(&sdp->sd_quota_spin);
221 static void slot_hold(struct gfs2_quota_data *qd)
223 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
225 spin_lock(&sdp->sd_quota_spin);
226 gfs2_assert(sdp, qd->qd_slot_count);
228 spin_unlock(&sdp->sd_quota_spin);
231 static void slot_put(struct gfs2_quota_data *qd)
233 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
235 spin_lock(&sdp->sd_quota_spin);
236 gfs2_assert(sdp, qd->qd_slot_count);
237 if (!--qd->qd_slot_count) {
238 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
241 spin_unlock(&sdp->sd_quota_spin);
244 static int bh_get(struct gfs2_quota_data *qd)
246 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
247 struct gfs2_inode *ip = get_v2ip(sdp->sd_qc_inode);
248 unsigned int block, offset;
251 struct buffer_head *bh;
254 mutex_lock(&sdp->sd_quota_mutex);
256 if (qd->qd_bh_count++) {
257 mutex_unlock(&sdp->sd_quota_mutex);
261 block = qd->qd_slot / sdp->sd_qc_per_block;
262 offset = qd->qd_slot % sdp->sd_qc_per_block;;
264 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
267 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
271 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
275 qd->qd_bh_qc = (struct gfs2_quota_change *)
276 (bh->b_data + sizeof(struct gfs2_meta_header) +
277 offset * sizeof(struct gfs2_quota_change));
279 mutex_lock(&sdp->sd_quota_mutex);
288 mutex_unlock(&sdp->sd_quota_mutex);
292 static void bh_put(struct gfs2_quota_data *qd)
294 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
296 mutex_lock(&sdp->sd_quota_mutex);
297 gfs2_assert(sdp, qd->qd_bh_count);
298 if (!--qd->qd_bh_count) {
303 mutex_unlock(&sdp->sd_quota_mutex);
306 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
308 struct gfs2_quota_data *qd = NULL;
314 if (sdp->sd_vfs->s_flags & MS_RDONLY)
317 spin_lock(&sdp->sd_quota_spin);
319 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
320 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
321 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
322 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
325 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
327 set_bit(QDF_LOCKED, &qd->qd_flags);
328 gfs2_assert_warn(sdp, qd->qd_count);
330 qd->qd_change_sync = qd->qd_change;
331 gfs2_assert_warn(sdp, qd->qd_slot_count);
341 spin_unlock(&sdp->sd_quota_spin);
344 gfs2_assert_warn(sdp, qd->qd_change_sync);
347 clear_bit(QDF_LOCKED, &qd->qd_flags);
359 static int qd_trylock(struct gfs2_quota_data *qd)
361 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
363 if (sdp->sd_vfs->s_flags & MS_RDONLY)
366 spin_lock(&sdp->sd_quota_spin);
368 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
369 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
370 spin_unlock(&sdp->sd_quota_spin);
374 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
376 set_bit(QDF_LOCKED, &qd->qd_flags);
377 gfs2_assert_warn(sdp, qd->qd_count);
379 qd->qd_change_sync = qd->qd_change;
380 gfs2_assert_warn(sdp, qd->qd_slot_count);
383 spin_unlock(&sdp->sd_quota_spin);
385 gfs2_assert_warn(sdp, qd->qd_change_sync);
387 clear_bit(QDF_LOCKED, &qd->qd_flags);
396 static void qd_unlock(struct gfs2_quota_data *qd)
398 gfs2_assert_warn(qd->qd_gl->gl_sbd,
399 test_bit(QDF_LOCKED, &qd->qd_flags));
400 clear_bit(QDF_LOCKED, &qd->qd_flags);
406 static int qdsb_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
407 struct gfs2_quota_data **qdp)
411 error = qd_get(sdp, user, id, create, qdp);
415 error = slot_get(*qdp);
419 error = bh_get(*qdp);
433 static void qdsb_put(struct gfs2_quota_data *qd)
440 int gfs2_quota_hold(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
442 struct gfs2_sbd *sdp = ip->i_sbd;
443 struct gfs2_alloc *al = &ip->i_alloc;
444 struct gfs2_quota_data **qd = al->al_qd;
447 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
448 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
451 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
454 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
460 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
466 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
467 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
474 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
475 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
484 gfs2_quota_unhold(ip);
489 void gfs2_quota_unhold(struct gfs2_inode *ip)
491 struct gfs2_sbd *sdp = ip->i_sbd;
492 struct gfs2_alloc *al = &ip->i_alloc;
495 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
497 for (x = 0; x < al->al_qd_num; x++) {
498 qdsb_put(al->al_qd[x]);
504 static int sort_qd(const void *a, const void *b)
506 struct gfs2_quota_data *qd_a = *(struct gfs2_quota_data **)a;
507 struct gfs2_quota_data *qd_b = *(struct gfs2_quota_data **)b;
510 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
511 !test_bit(QDF_USER, &qd_b->qd_flags)) {
512 if (test_bit(QDF_USER, &qd_a->qd_flags))
517 if (qd_a->qd_id < qd_b->qd_id)
519 else if (qd_a->qd_id > qd_b->qd_id)
526 static void do_qc(struct gfs2_quota_data *qd, int64_t change)
528 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
529 struct gfs2_inode *ip = get_v2ip(sdp->sd_qc_inode);
530 struct gfs2_quota_change *qc = qd->qd_bh_qc;
533 mutex_lock(&sdp->sd_quota_mutex);
534 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
536 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
539 if (test_bit(QDF_USER, &qd->qd_flags))
540 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
541 qc->qc_id = cpu_to_be32(qd->qd_id);
545 x = be64_to_cpu(x) + change;
546 qc->qc_change = cpu_to_be64(x);
548 spin_lock(&sdp->sd_quota_spin);
550 spin_unlock(&sdp->sd_quota_spin);
553 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
554 clear_bit(QDF_CHANGE, &qd->qd_flags);
559 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
564 mutex_unlock(&sdp->sd_quota_mutex);
570 * This function was mostly borrowed from gfs2_block_truncate_page which was
571 * in turn mostly borrowed from ext3
573 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
574 int64_t change, struct gfs2_quota_data *qd)
576 struct inode *inode = ip->i_vnode;
577 struct address_space *mapping = inode->i_mapping;
578 unsigned long index = loc >> PAGE_CACHE_SHIFT;
579 unsigned offset = loc & (PAGE_CACHE_SHIFT - 1);
580 unsigned blocksize, iblock, pos;
581 struct buffer_head *bh;
588 page = grab_cache_page(mapping, index);
592 blocksize = inode->i_sb->s_blocksize;
593 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
595 if (!page_has_buffers(page))
596 create_empty_buffers(page, blocksize, 0);
598 bh = page_buffers(page);
600 while (offset >= pos) {
601 bh = bh->b_this_page;
606 if (!buffer_mapped(bh)) {
607 gfs2_get_block(inode, iblock, bh, 1);
608 if (!buffer_mapped(bh))
612 if (PageUptodate(page))
613 set_buffer_uptodate(bh);
615 if (!buffer_uptodate(bh)) {
616 ll_rw_block(READ, 1, &bh);
618 if (!buffer_uptodate(bh))
622 gfs2_trans_add_bh(ip->i_gl, bh, 0);
624 kaddr = kmap_atomic(page, KM_USER0);
625 ptr = (__be64 *)(kaddr + offset);
626 value = *ptr = cpu_to_be64(be64_to_cpu(*ptr) + change);
627 flush_dcache_page(page);
628 kunmap_atomic(kaddr, KM_USER0);
630 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
632 qd->qd_qb.qb_limit = cpu_to_be64(q.qu_limit);
633 qd->qd_qb.qb_warn = cpu_to_be64(q.qu_warn);
635 qd->qd_qb.qb_value = cpu_to_be64(value);
638 page_cache_release(page);
642 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
644 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
645 struct gfs2_inode *ip = get_v2ip(sdp->sd_quota_inode);
646 unsigned int data_blocks, ind_blocks;
647 struct file_ra_state ra_state;
648 struct gfs2_holder *ghs, i_gh;
650 struct gfs2_quota_data *qd;
652 unsigned int nalloc = 0;
653 struct gfs2_alloc *al = NULL;
656 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
657 &data_blocks, &ind_blocks);
659 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
663 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
664 for (qx = 0; qx < num_qd; qx++) {
665 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
667 GL_NOCACHE, &ghs[qx]);
672 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
676 for (x = 0; x < num_qd; x++) {
679 offset = qd2offset(qda[x]);
680 error = gfs2_write_alloc_required(ip, offset,
681 sizeof(struct gfs2_quota),
690 al = gfs2_alloc_get(ip);
692 al->al_requested = nalloc * (data_blocks + ind_blocks);
694 error = gfs2_inplace_reserve(ip);
698 error = gfs2_trans_begin(sdp,
699 al->al_rgd->rd_ri.ri_length +
700 num_qd * data_blocks +
701 nalloc * ind_blocks +
702 RES_DINODE + num_qd +
707 error = gfs2_trans_begin(sdp,
708 num_qd * data_blocks +
709 RES_DINODE + num_qd, 0);
714 file_ra_state_init(&ra_state, ip->i_vnode->i_mapping);
715 for (x = 0; x < num_qd; x++) {
717 offset = qd2offset(qd);
718 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
719 (struct gfs2_quota_data *)
724 do_qc(qd, -qd->qd_change_sync);
734 gfs2_inplace_release(ip);
741 gfs2_glock_dq_uninit(&i_gh);
745 gfs2_glock_dq_uninit(&ghs[qx]);
747 gfs2_log_flush_glock(ip->i_gl);
752 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
753 struct gfs2_holder *q_gh)
755 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
756 struct gfs2_holder i_gh;
758 char buf[sizeof(struct gfs2_quota)];
759 struct file_ra_state ra_state;
762 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
764 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
768 gfs2_quota_lvb_in(&qd->qd_qb, qd->qd_gl->gl_lvb);
770 if (force_refresh || qd->qd_qb.qb_magic != GFS2_MAGIC) {
772 gfs2_glock_dq_uninit(q_gh);
773 error = gfs2_glock_nq_init(qd->qd_gl,
774 LM_ST_EXCLUSIVE, GL_NOCACHE,
779 error = gfs2_glock_nq_init(get_v2ip(sdp->sd_quota_inode)->i_gl,
785 memset(buf, 0, sizeof(struct gfs2_quota));
787 error = gfs2_internal_read(get_v2ip(sdp->sd_quota_inode),
790 sizeof(struct gfs2_quota));
794 gfs2_glock_dq_uninit(&i_gh);
796 gfs2_quota_in(&q, buf);
798 memset(&qd->qd_qb, 0, sizeof(struct gfs2_quota_lvb));
799 qd->qd_qb.qb_magic = GFS2_MAGIC;
800 qd->qd_qb.qb_limit = q.qu_limit;
801 qd->qd_qb.qb_warn = q.qu_warn;
802 qd->qd_qb.qb_value = q.qu_value;
804 gfs2_quota_lvb_out(&qd->qd_qb, qd->qd_gl->gl_lvb);
806 if (gfs2_glock_is_blocking(qd->qd_gl)) {
807 gfs2_glock_dq_uninit(q_gh);
816 gfs2_glock_dq_uninit(&i_gh);
819 gfs2_glock_dq_uninit(q_gh);
824 int gfs2_quota_lock(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
826 struct gfs2_sbd *sdp = ip->i_sbd;
827 struct gfs2_alloc *al = &ip->i_alloc;
831 gfs2_quota_hold(ip, uid, gid);
833 if (capable(CAP_SYS_RESOURCE) ||
834 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
837 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
840 for (x = 0; x < al->al_qd_num; x++) {
841 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
847 set_bit(GIF_QD_LOCKED, &ip->i_flags);
850 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
851 gfs2_quota_unhold(ip);
857 static int need_sync(struct gfs2_quota_data *qd)
859 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
860 struct gfs2_tune *gt = &sdp->sd_tune;
862 unsigned int num, den;
865 if (!qd->qd_qb.qb_limit)
868 spin_lock(&sdp->sd_quota_spin);
869 value = qd->qd_change;
870 spin_unlock(&sdp->sd_quota_spin);
872 spin_lock(>->gt_spin);
873 num = gt->gt_quota_scale_num;
874 den = gt->gt_quota_scale_den;
875 spin_unlock(>->gt_spin);
879 else if (qd->qd_qb.qb_value >= (int64_t)qd->qd_qb.qb_limit)
882 value *= gfs2_jindex_size(sdp) * num;
884 value += qd->qd_qb.qb_value;
885 if (value < (int64_t)qd->qd_qb.qb_limit)
892 void gfs2_quota_unlock(struct gfs2_inode *ip)
894 struct gfs2_alloc *al = &ip->i_alloc;
895 struct gfs2_quota_data *qda[4];
896 unsigned int count = 0;
899 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
902 for (x = 0; x < al->al_qd_num; x++) {
903 struct gfs2_quota_data *qd;
907 sync = need_sync(qd);
909 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
911 if (sync && qd_trylock(qd))
917 for (x = 0; x < count; x++)
922 gfs2_quota_unhold(ip);
927 static int print_message(struct gfs2_quota_data *qd, char *type)
929 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
933 line = kmalloc(MAX_LINE, GFP_KERNEL);
937 len = snprintf(line, MAX_LINE-1,
938 "GFS2: fsid=%s: quota %s for %s %u\r\n",
939 sdp->sd_fsname, type,
940 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
942 line[MAX_LINE-1] = 0;
944 if (current->signal) { /* Is this test still required? */
945 tty_write_message(current->signal->tty, line);
953 int gfs2_quota_check(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
955 struct gfs2_sbd *sdp = ip->i_sbd;
956 struct gfs2_alloc *al = &ip->i_alloc;
957 struct gfs2_quota_data *qd;
962 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
965 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
968 for (x = 0; x < al->al_qd_num; x++) {
971 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
972 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
975 value = qd->qd_qb.qb_value;
976 spin_lock(&sdp->sd_quota_spin);
977 value += qd->qd_change;
978 spin_unlock(&sdp->sd_quota_spin);
980 if (qd->qd_qb.qb_limit && (int64_t)qd->qd_qb.qb_limit < value) {
981 print_message(qd, "exceeded");
984 } else if (qd->qd_qb.qb_warn &&
985 (int64_t)qd->qd_qb.qb_warn < value &&
986 time_after_eq(jiffies, qd->qd_last_warn +
988 gt_quota_warn_period) * HZ)) {
989 error = print_message(qd, "warning");
990 qd->qd_last_warn = jiffies;
997 void gfs2_quota_change(struct gfs2_inode *ip, int64_t change,
998 uint32_t uid, uint32_t gid)
1000 struct gfs2_alloc *al = &ip->i_alloc;
1001 struct gfs2_quota_data *qd;
1003 unsigned int found = 0;
1005 if (gfs2_assert_warn(ip->i_sbd, change))
1007 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1010 for (x = 0; x < al->al_qd_num; x++) {
1013 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1014 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1021 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1023 struct gfs2_quota_data **qda;
1024 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1025 unsigned int num_qd;
1029 sdp->sd_quota_sync_gen++;
1031 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1039 error = qd_fish(sdp, qda + num_qd);
1040 if (error || !qda[num_qd])
1042 if (++num_qd == max_qd)
1048 error = do_sync(num_qd, qda);
1050 for (x = 0; x < num_qd; x++)
1051 qda[x]->qd_sync_gen =
1052 sdp->sd_quota_sync_gen;
1054 for (x = 0; x < num_qd; x++)
1057 } while (!error && num_qd == max_qd);
1064 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, uint32_t id)
1066 struct gfs2_quota_data *qd;
1067 struct gfs2_holder q_gh;
1070 error = qd_get(sdp, user, id, CREATE, &qd);
1074 error = do_glock(qd, FORCE, &q_gh);
1076 gfs2_glock_dq_uninit(&q_gh);
1083 int gfs2_quota_read(struct gfs2_sbd *sdp, int user, uint32_t id,
1084 struct gfs2_quota *q)
1086 struct gfs2_quota_data *qd;
1087 struct gfs2_holder q_gh;
1090 if (((user) ? (id != current->fsuid) : (!in_group_p(id))) &&
1091 !capable(CAP_SYS_ADMIN))
1094 error = qd_get(sdp, user, id, CREATE, &qd);
1098 error = do_glock(qd, NO_FORCE, &q_gh);
1102 memset(q, 0, sizeof(struct gfs2_quota));
1103 q->qu_limit = qd->qd_qb.qb_limit;
1104 q->qu_warn = qd->qd_qb.qb_warn;
1105 q->qu_value = qd->qd_qb.qb_value;
1107 spin_lock(&sdp->sd_quota_spin);
1108 q->qu_value += qd->qd_change;
1109 spin_unlock(&sdp->sd_quota_spin);
1111 gfs2_glock_dq_uninit(&q_gh);
1119 int gfs2_quota_init(struct gfs2_sbd *sdp)
1121 struct gfs2_inode *ip = get_v2ip(sdp->sd_qc_inode);
1122 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1123 unsigned int x, slot = 0;
1124 unsigned int found = 0;
1126 uint32_t extlen = 0;
1129 if (!ip->i_di.di_size ||
1130 ip->i_di.di_size > (64 << 20) ||
1131 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1132 gfs2_consist_inode(ip);
1135 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1136 sdp->sd_quota_chunks = DIV_RU(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1140 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1141 sizeof(unsigned char *), GFP_KERNEL);
1142 if (!sdp->sd_quota_bitmap)
1145 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1146 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1147 if (!sdp->sd_quota_bitmap[x])
1151 for (x = 0; x < blocks; x++) {
1152 struct buffer_head *bh;
1157 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
1161 gfs2_meta_ra(ip->i_gl, dblock, extlen);
1162 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
1167 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1173 y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1175 struct gfs2_quota_change qc;
1176 struct gfs2_quota_data *qd;
1178 gfs2_quota_change_in(&qc, bh->b_data +
1179 sizeof(struct gfs2_meta_header) +
1180 y * sizeof(struct gfs2_quota_change));
1184 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1191 set_bit(QDF_CHANGE, &qd->qd_flags);
1192 qd->qd_change = qc.qc_change;
1194 qd->qd_slot_count = 1;
1195 qd->qd_last_touched = jiffies;
1197 spin_lock(&sdp->sd_quota_spin);
1198 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1199 list_add(&qd->qd_list, &sdp->sd_quota_list);
1200 atomic_inc(&sdp->sd_quota_count);
1201 spin_unlock(&sdp->sd_quota_spin);
1212 fs_info(sdp, "found %u quota changes\n", found);
1217 gfs2_quota_cleanup(sdp);
1221 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1223 struct gfs2_quota_data *qd, *safe;
1226 spin_lock(&sdp->sd_quota_spin);
1227 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1228 if (!qd->qd_count &&
1229 time_after_eq(jiffies, qd->qd_last_touched +
1230 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1231 list_move(&qd->qd_list, &dead);
1232 gfs2_assert_warn(sdp,
1233 atomic_read(&sdp->sd_quota_count) > 0);
1234 atomic_dec(&sdp->sd_quota_count);
1237 spin_unlock(&sdp->sd_quota_spin);
1239 while (!list_empty(&dead)) {
1240 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1241 list_del(&qd->qd_list);
1243 gfs2_assert_warn(sdp, !qd->qd_change);
1244 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1245 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1247 gfs2_lvb_unhold(qd->qd_gl);
1252 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1254 struct list_head *head = &sdp->sd_quota_list;
1255 struct gfs2_quota_data *qd;
1258 spin_lock(&sdp->sd_quota_spin);
1259 while (!list_empty(head)) {
1260 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1262 if (qd->qd_count > 1 ||
1263 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1264 list_move(&qd->qd_list, head);
1265 spin_unlock(&sdp->sd_quota_spin);
1267 spin_lock(&sdp->sd_quota_spin);
1271 list_del(&qd->qd_list);
1272 atomic_dec(&sdp->sd_quota_count);
1273 spin_unlock(&sdp->sd_quota_spin);
1275 if (!qd->qd_count) {
1276 gfs2_assert_warn(sdp, !qd->qd_change);
1277 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1279 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1280 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1282 gfs2_lvb_unhold(qd->qd_gl);
1285 spin_lock(&sdp->sd_quota_spin);
1287 spin_unlock(&sdp->sd_quota_spin);
1289 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1291 if (sdp->sd_quota_bitmap) {
1292 for (x = 0; x < sdp->sd_quota_chunks; x++)
1293 kfree(sdp->sd_quota_bitmap[x]);
1294 kfree(sdp->sd_quota_bitmap);