2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
16 #include <linux/pagemap.h>
17 #include <asm/semaphore.h>
29 static void pfault_be_greedy(struct gfs2_inode *ip)
33 spin_lock(&ip->i_spin);
35 ip->i_last_pfault = jiffies;
36 spin_unlock(&ip->i_spin);
39 if (gfs2_glock_be_greedy(ip->i_gl, time))
43 static struct page *gfs2_private_nopage(struct vm_area_struct *area,
44 unsigned long address, int *type)
46 struct gfs2_inode *ip = get_v2ip(area->vm_file->f_mapping->host);
47 struct gfs2_holder i_gh;
51 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
55 set_bit(GIF_PAGED, &ip->i_flags);
57 result = filemap_nopage(area, address, type);
59 if (result && result != NOPAGE_OOM)
62 gfs2_glock_dq_uninit(&i_gh);
67 static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
69 struct gfs2_sbd *sdp = ip->i_sbd;
70 unsigned long index = page->index;
71 uint64_t lblock = index << (PAGE_CACHE_SHIFT -
72 sdp->sd_sb.sb_bsize_shift);
73 unsigned int blocks = PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift;
74 struct gfs2_alloc *al;
75 unsigned int data_blocks, ind_blocks;
79 al = gfs2_alloc_get(ip);
81 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
85 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
89 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE,
90 &data_blocks, &ind_blocks);
92 al->al_requested = data_blocks + ind_blocks;
94 error = gfs2_inplace_reserve(ip);
98 error = gfs2_trans_begin(sdp,
99 al->al_rgd->rd_ri.ri_length +
100 ind_blocks + RES_DINODE +
101 RES_STATFS + RES_QUOTA, 0);
105 if (gfs2_is_stuffed(ip)) {
106 error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_page, NULL);
111 for (x = 0; x < blocks; ) {
116 error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
124 gfs2_assert_warn(sdp, al->al_alloced);
130 gfs2_inplace_release(ip);
133 gfs2_quota_unlock(ip);
141 static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
142 unsigned long address, int *type)
144 struct gfs2_inode *ip = get_v2ip(area->vm_file->f_mapping->host);
145 struct gfs2_holder i_gh;
146 struct page *result = NULL;
147 unsigned long index = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) +
152 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
156 set_bit(GIF_PAGED, &ip->i_flags);
157 set_bit(GIF_SW_PAGED, &ip->i_flags);
159 error = gfs2_write_alloc_required(ip,
160 (uint64_t)index << PAGE_CACHE_SHIFT,
161 PAGE_CACHE_SIZE, &alloc_required);
165 result = filemap_nopage(area, address, type);
166 if (!result || result == NOPAGE_OOM)
169 if (alloc_required) {
170 error = alloc_page_backing(ip, result);
172 page_cache_release(result);
176 set_page_dirty(result);
179 pfault_be_greedy(ip);
182 gfs2_glock_dq_uninit(&i_gh);
187 struct vm_operations_struct gfs2_vm_ops_private = {
188 .nopage = gfs2_private_nopage,
191 struct vm_operations_struct gfs2_vm_ops_sharewrite = {
192 .nopage = gfs2_sharewrite_nopage,