* Return how many pages have been written.
*/
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
- enum cl_fsync_mode mode)
+ enum cl_fsync_mode mode, int ignore_layout)
{
struct cl_env_nest nest;
struct lu_env *env;
io = ccc_env_thread_io(env);
io->ci_obj = cl_i2info(inode)->lli_clob;
- io->ci_ignore_layout = 1;
+ io->ci_ignore_layout = ignore_layout;
/* initialize parameters for sync */
fio = &io->u.ci_fsync;
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
err = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
- CL_FSYNC_ALL);
+ CL_FSYNC_ALL, 0);
if (rc == 0 && err < 0)
rc = err;
if (rc < 0)
struct lu_fid ll_root_fid; /* root object fid */
int ll_flags;
+ int ll_umounting:1;
struct list_head ll_conn_chain; /* per-conn chain of SBs */
struct lustre_client_ocd ll_lco;
struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt);
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
- enum cl_fsync_mode mode);
+ enum cl_fsync_mode mode, int ignore_layout);
/** direct write pages */
struct ll_dio_pages {
/* we need restore s_dev from changed for clustred NFS before put_super
* because new kernels have cached s_dev and change sb->s_dev in
* put_super not affected real removing devices */
- if (sbi)
+ if (sbi) {
sb->s_dev = sbi->ll_sdev_orig;
+ sbi->ll_umounting = 1;
+ }
EXIT;
}
if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
/* discard all dirty pages before truncating them, required by
* osc_extent implementation at LU-1030. */
- cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_DISCARD);
+ cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
+ CL_FSYNC_DISCARD, 1);
truncate_inode_pages(&inode->i_data, 0);
OBD_FREE_PTR(ioc_data);
}
-
/* Really, we'd like to wait until there are no requests outstanding,
* and then continue. For now, we just invalidate the requests,
* schedule() and sleep one second if needed, and hope.
* PageWriteback or clean the page. */
result = cl_sync_file_range(inode, offset,
offset + PAGE_CACHE_SIZE - 1,
- CL_FSYNC_LOCAL);
+ CL_FSYNC_LOCAL, 1);
if (result > 0) {
/* actually we may have written more than one page.
* decreasing this page because the caller will count
int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
loff_t start;
loff_t end;
enum cl_fsync_mode mode;
int range_whole = 0;
int result;
+ int ignore_layout = 0;
ENTRY;
if (wbc->range_cyclic) {
if (wbc->sync_mode == WB_SYNC_ALL)
mode = CL_FSYNC_LOCAL;
- result = cl_sync_file_range(inode, start, end, mode);
+ if (sbi->ll_umounting)
+ /* if the mountpoint is being umounted, all pages have to be
+ * evicted to avoid hitting LBUG when truncate_inode_pages()
+ * is called later on. */
+ ignore_layout = 1;
+ result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
if (result > 0) {
wbc->nr_to_write -= result;
result = 0;
/* Enqueue layout lock and get layout version. We need to do this
* even for operations requiring to open file, such as read and write,
* because it might not grant layout lock in IT_OPEN. */
- if (result == 0 && !io->ci_ignore_layout)
+ if (result == 0 && !io->ci_ignore_layout) {
result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ if (result == -ENOENT)
+ /* If the inode on MDS has been removed, but the objects
+ * on OSTs haven't been destroyed (async unlink), layout
+ * fetch will return -ENOENT, we'd ingore this error
+ * and continue with dirty flush. LU-3230. */
+ result = 0;
+ if (result < 0)
+ CERROR("%s: refresh file layout " DFID " error %d.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(lu_object_fid(&obj->co_lu)), result);
+ }
RETURN(result);
}