4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
46 static inline int cifs_convert_flags(unsigned int flags)
48 if ((flags & O_ACCMODE) == O_RDONLY)
50 else if ((flags & O_ACCMODE) == O_WRONLY)
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
64 static u32 cifs_posix_convert_flags(unsigned int flags)
68 if ((flags & O_ACCMODE) == O_RDONLY)
69 posix_flags = SMB_O_RDONLY;
70 else if ((flags & O_ACCMODE) == O_WRONLY)
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
76 posix_flags |= SMB_O_CREAT;
78 posix_flags |= SMB_O_EXCL;
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
83 posix_flags |= SMB_O_SYNC;
84 if (flags & O_DIRECTORY)
85 posix_flags |= SMB_O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= SMB_O_NOFOLLOW;
89 posix_flags |= SMB_O_DIRECT;
94 static inline int cifs_get_disposition(unsigned int flags)
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
108 int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
118 struct cifs_tcon *tcon;
120 cFYI(1, "posix open %s", full_path);
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
126 tlink = cifs_sb_tlink(cifs_sb);
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
149 goto posix_open_ret; /* caller does not need info */
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
162 cifs_fattr_to_inode(*pinode, &fattr);
171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
173 __u16 *pnetfid, int xid)
178 int create_options = CREATE_NOT_DIR;
181 desiredAccess = cifs_convert_flags(f_flags);
183 /*********************************************************************
184 * open flag mapping table:
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
207 disposition = cifs_get_disposition(f_flags);
209 /* BB pass O_SYNC flag through on file attributes .. BB */
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
220 desiredAccess, create_options, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
244 struct cifsFileInfo *
245 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
257 pCifsFile->count = 1;
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
268 spin_lock(&cifs_file_list_lock);
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
275 spin_unlock(&cifs_file_list_lock);
277 cifs_set_oplock_level(pCifsInode, oplock);
278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
280 file->private_data = pCifsFile;
284 static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
287 * Release a reference on the file private data. This may involve closing
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
291 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
293 struct inode *inode = cifs_file->dentry->d_inode;
294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
297 struct cifsLockInfo *li, *tmp;
299 spin_lock(&cifs_file_list_lock);
300 if (--cifs_file->count > 0) {
301 spin_unlock(&cifs_file_list_lock);
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
319 cifs_set_oplock_level(cifsi, 0);
321 spin_unlock(&cifs_file_list_lock);
323 cancel_work_sync(&cifs_file->oplock_break);
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
333 /* Delete any outstanding lock records. We'll lose them when the file
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
340 list_del(&li->llist);
341 cifs_del_lock_waiters(li);
344 mutex_unlock(&cifsi->lock_mutex);
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
351 int cifs_open(struct inode *inode, struct file *file)
356 struct cifs_sb_info *cifs_sb;
357 struct cifs_tcon *tcon;
358 struct tcon_link *tlink;
359 struct cifsFileInfo *pCifsFile = NULL;
360 char *full_path = NULL;
361 bool posix_open_ok = false;
366 cifs_sb = CIFS_SB(inode->i_sb);
367 tlink = cifs_sb_tlink(cifs_sb);
370 return PTR_ERR(tlink);
372 tcon = tlink_tcon(tlink);
374 full_path = build_path_from_dentry(file->f_path.dentry);
375 if (full_path == NULL) {
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
392 /* can not refresh inode info since size could be stale */
393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
394 cifs_sb->mnt_file_mode /* ignored */,
395 file->f_flags, &oplock, &netfid, xid);
397 cFYI(1, "posix open succeeded");
398 posix_open_ok = true;
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
401 cERROR(1, "server %s of type %s returned"
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
406 tcon->ses->serverNOS);
407 tcon->broken_posix_open = true;
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
411 /* else fallthrough to retry open the old way on network i/o
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
423 if (pCifsFile == NULL) {
424 CIFSSMBClose(xid, tcon, netfid);
429 cifs_fscache_set_inode_cookie(inode, file);
431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
450 cifs_put_tlink(tlink);
454 /* Try to reacquire byte range locks that were released when session */
455 /* to server was lost */
456 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
460 /* BB list all locks open on this file and relock */
465 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
470 struct cifs_sb_info *cifs_sb;
471 struct cifs_tcon *tcon;
472 struct cifsInodeInfo *pCifsInode;
474 char *full_path = NULL;
476 int disposition = FILE_OPEN;
477 int create_options = CREATE_NOT_DIR;
481 mutex_lock(&pCifsFile->fh_mutex);
482 if (!pCifsFile->invalidHandle) {
483 mutex_unlock(&pCifsFile->fh_mutex);
489 inode = pCifsFile->dentry->d_inode;
490 cifs_sb = CIFS_SB(inode->i_sb);
491 tcon = tlink_tcon(pCifsFile->tlink);
493 /* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
497 full_path = build_path_from_dentry(pCifsFile->dentry);
498 if (full_path == NULL) {
500 mutex_unlock(&pCifsFile->fh_mutex);
505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
506 inode, pCifsFile->f_flags, full_path);
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
528 cFYI(1, "posix reopen succeeded");
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
540 /* Can not refresh inode by passing in file_info buf to be returned
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
547 create_options, &netfid, &oplock, NULL,
548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
549 CIFS_MOUNT_MAP_SPECIAL_CHR);
551 mutex_unlock(&pCifsFile->fh_mutex);
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
554 goto reopen_error_exit;
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
564 rc = filemap_write_and_wait(inode->i_mapping);
565 mapping_set_error(inode->i_mapping, rc);
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
581 cifs_set_oplock_level(pCifsInode, oplock);
583 cifs_relock_file(pCifsFile);
591 int cifs_close(struct inode *inode, struct file *file)
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
598 /* return code from the ->release op is always ignored */
602 int cifs_closedir(struct inode *inode, struct file *file)
606 struct cifsFileInfo *pCFileStruct = file->private_data;
609 cFYI(1, "Closedir inode = 0x%p", inode);
614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
616 cFYI(1, "Freeing private data in close dir");
617 spin_lock(&cifs_file_list_lock);
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
621 spin_unlock(&cifs_file_list_lock);
622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
623 cFYI(1, "Closing uncompleted readdir with rc %d",
625 /* not much we can do if it fails anyway, ignore rc */
628 spin_unlock(&cifs_file_list_lock);
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
631 cFYI(1, "closedir free smb buf in srch struct");
632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
633 if (pCFileStruct->srch_inf.smallBuf)
634 cifs_small_buf_release(ptmp);
636 cifs_buf_release(ptmp);
638 cifs_put_tlink(pCFileStruct->tlink);
639 kfree(file->private_data);
640 file->private_data = NULL;
642 /* BB can we lock the filestruct while this is going on? */
647 static struct cifsLockInfo *
648 cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
650 struct cifsLockInfo *li =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
658 li->pid = current->tgid;
659 INIT_LIST_HEAD(&li->blist);
660 init_waitqueue_head(&li->block_q);
665 cifs_del_lock_waiters(struct cifsLockInfo *lock)
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
675 cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
679 struct cifsLockInfo *li, *tmp;
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
698 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
699 __u8 type, __u16 netfid, struct file_lock *flock)
702 struct cifsLockInfo *conf_lock;
705 mutex_lock(&cinode->lock_mutex);
707 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
710 flock->fl_start = conf_lock->offset;
711 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
712 flock->fl_pid = conf_lock->pid;
713 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
714 flock->fl_type = F_RDLCK;
716 flock->fl_type = F_WRLCK;
717 } else if (!cinode->can_cache_brlcks)
720 flock->fl_type = F_UNLCK;
722 mutex_unlock(&cinode->lock_mutex);
727 cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
728 __u8 type, __u16 netfid)
730 struct cifsLockInfo *li;
732 li = cifs_lock_init(len, offset, type, netfid);
736 mutex_lock(&cinode->lock_mutex);
737 list_add_tail(&li->llist, &cinode->llist);
738 mutex_unlock(&cinode->lock_mutex);
743 cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
744 __u8 type, __u16 netfid, bool wait)
746 struct cifsLockInfo *lock, *conf_lock;
750 lock = cifs_lock_init(length, offset, type, netfid);
756 mutex_lock(&cinode->lock_mutex);
758 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
760 if (!exist && cinode->can_cache_brlcks) {
761 list_add_tail(&lock->llist, &cinode->llist);
762 mutex_unlock(&cinode->lock_mutex);
771 list_add_tail(&lock->blist, &conf_lock->blist);
772 mutex_unlock(&cinode->lock_mutex);
773 rc = wait_event_interruptible(lock->block_q,
774 (lock->blist.prev == &lock->blist) &&
775 (lock->blist.next == &lock->blist));
779 mutex_lock(&cinode->lock_mutex);
780 list_del_init(&lock->blist);
781 mutex_unlock(&cinode->lock_mutex);
786 mutex_unlock(&cinode->lock_mutex);
791 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
794 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
795 unsigned char saved_type = flock->fl_type;
797 mutex_lock(&cinode->lock_mutex);
798 posix_test_lock(file, flock);
800 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
801 flock->fl_type = saved_type;
805 mutex_unlock(&cinode->lock_mutex);
810 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
812 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
815 mutex_lock(&cinode->lock_mutex);
816 if (!cinode->can_cache_brlcks) {
817 mutex_unlock(&cinode->lock_mutex);
820 rc = posix_lock_file_wait(file, flock);
821 mutex_unlock(&cinode->lock_mutex);
826 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
828 int xid, rc = 0, stored_rc;
829 struct cifsLockInfo *li, *tmp;
830 struct cifs_tcon *tcon;
831 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
834 tcon = tlink_tcon(cfile->tlink);
836 mutex_lock(&cinode->lock_mutex);
837 if (!cinode->can_cache_brlcks) {
838 mutex_unlock(&cinode->lock_mutex);
843 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
844 stored_rc = CIFSSMBLock(xid, tcon, cfile->netfid,
845 li->pid, li->length, li->offset,
846 0, 1, li->type, 0, 0);
851 cinode->can_cache_brlcks = false;
852 mutex_unlock(&cinode->lock_mutex);
858 /* copied from fs/locks.c with a name change */
859 #define cifs_for_each_lock(inode, lockp) \
860 for (lockp = &inode->i_flock; *lockp != NULL; \
861 lockp = &(*lockp)->fl_next)
864 cifs_push_posix_locks(struct cifsFileInfo *cfile)
866 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
867 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
868 struct file_lock *flock, **before;
869 struct cifsLockInfo *lck, *tmp;
870 int rc = 0, xid, type;
872 struct list_head locks_to_send;
876 mutex_lock(&cinode->lock_mutex);
877 if (!cinode->can_cache_brlcks) {
878 mutex_unlock(&cinode->lock_mutex);
883 INIT_LIST_HEAD(&locks_to_send);
886 cifs_for_each_lock(cfile->dentry->d_inode, before) {
888 length = 1 + flock->fl_end - flock->fl_start;
889 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
894 lck = cifs_lock_init(length, flock->fl_start, type,
900 lck->pid = flock->fl_pid;
902 list_add_tail(&lck->llist, &locks_to_send);
908 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
909 struct file_lock tmp_lock;
912 tmp_lock.fl_start = lck->offset;
913 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
914 0, lck->length, &tmp_lock,
918 list_del(&lck->llist);
922 cinode->can_cache_brlcks = false;
923 mutex_unlock(&cinode->lock_mutex);
930 cifs_push_locks(struct cifsFileInfo *cfile)
932 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
933 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
935 if ((tcon->ses->capabilities & CAP_UNIX) &&
936 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
937 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
938 return cifs_push_posix_locks(cfile);
940 return cifs_push_mandatory_locks(cfile);
944 cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
947 if (flock->fl_flags & FL_POSIX)
949 if (flock->fl_flags & FL_FLOCK)
951 if (flock->fl_flags & FL_SLEEP) {
952 cFYI(1, "Blocking lock");
955 if (flock->fl_flags & FL_ACCESS)
956 cFYI(1, "Process suspended by mandatory locking - "
957 "not implemented yet");
958 if (flock->fl_flags & FL_LEASE)
959 cFYI(1, "Lease on file - not implemented yet");
960 if (flock->fl_flags &
961 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
962 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
964 *type = LOCKING_ANDX_LARGE_FILES;
965 if (flock->fl_type == F_WRLCK) {
968 } else if (flock->fl_type == F_UNLCK) {
971 /* Check if unlock includes more than one lock range */
972 } else if (flock->fl_type == F_RDLCK) {
974 *type |= LOCKING_ANDX_SHARED_LOCK;
976 } else if (flock->fl_type == F_EXLCK) {
979 } else if (flock->fl_type == F_SHLCK) {
981 *type |= LOCKING_ANDX_SHARED_LOCK;
984 cFYI(1, "Unknown type of lock");
988 cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
989 bool wait_flag, bool posix_lck, int xid)
992 __u64 length = 1 + flock->fl_end - flock->fl_start;
993 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
994 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
995 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
996 __u16 netfid = cfile->netfid;
1001 rc = cifs_posix_lock_test(file, flock);
1005 if (type & LOCKING_ANDX_SHARED_LOCK)
1006 posix_lock_type = CIFS_RDLCK;
1008 posix_lock_type = CIFS_WRLCK;
1009 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1010 1 /* get */, length, flock,
1011 posix_lock_type, wait_flag);
1015 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1020 /* BB we could chain these into one lock request BB */
1021 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1022 flock->fl_start, 0, 1, type, 0, 0);
1024 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1025 length, flock->fl_start, 1, 0,
1027 flock->fl_type = F_UNLCK;
1029 cERROR(1, "Error unlocking previously locked "
1030 "range %d during test of lock", rc);
1035 if (type & LOCKING_ANDX_SHARED_LOCK) {
1036 flock->fl_type = F_WRLCK;
1041 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1042 flock->fl_start, 0, 1,
1043 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1045 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1046 length, flock->fl_start, 1, 0,
1047 type | LOCKING_ANDX_SHARED_LOCK,
1049 flock->fl_type = F_RDLCK;
1051 cERROR(1, "Error unlocking previously locked "
1052 "range %d during test of lock", rc);
1054 flock->fl_type = F_WRLCK;
1061 cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1062 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1065 __u64 length = 1 + flock->fl_end - flock->fl_start;
1066 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1067 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1068 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
1069 __u16 netfid = cfile->netfid;
1072 int posix_lock_type;
1074 rc = cifs_posix_lock_set(file, flock);
1078 if (type & LOCKING_ANDX_SHARED_LOCK)
1079 posix_lock_type = CIFS_RDLCK;
1081 posix_lock_type = CIFS_WRLCK;
1084 posix_lock_type = CIFS_UNLCK;
1086 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1087 0 /* set */, length, flock,
1088 posix_lock_type, wait_flag);
1093 rc = cifs_lock_add_if(cinode, flock->fl_start, length,
1094 type, netfid, wait_flag);
1100 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1101 flock->fl_start, 0, 1, type, wait_flag, 0);
1103 /* For Windows locks we must store them. */
1104 rc = cifs_lock_add(cinode, length, flock->fl_start,
1107 } else if (unlock) {
1109 * For each stored lock that this unlock overlaps completely,
1113 struct cifsLockInfo *li, *tmp;
1115 mutex_lock(&cinode->lock_mutex);
1116 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1117 if (flock->fl_start > li->offset ||
1118 (flock->fl_start + length) <
1119 (li->offset + li->length))
1121 if (current->tgid != li->pid)
1123 if (cfile->netfid != li->netfid)
1126 if (!cinode->can_cache_brlcks)
1127 stored_rc = CIFSSMBLock(xid, tcon, netfid,
1129 li->length, li->offset,
1130 1, 0, li->type, 0, 0);
1137 list_del(&li->llist);
1138 cifs_del_lock_waiters(li);
1142 mutex_unlock(&cinode->lock_mutex);
1145 if (flock->fl_flags & FL_POSIX)
1146 posix_lock_file_wait(file, flock);
1150 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1153 int lock = 0, unlock = 0;
1154 bool wait_flag = false;
1155 bool posix_lck = false;
1156 struct cifs_sb_info *cifs_sb;
1157 struct cifs_tcon *tcon;
1158 struct cifsInodeInfo *cinode;
1159 struct cifsFileInfo *cfile;
1166 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1167 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1168 flock->fl_start, flock->fl_end);
1170 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1172 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1173 cfile = (struct cifsFileInfo *)file->private_data;
1174 tcon = tlink_tcon(cfile->tlink);
1175 netfid = cfile->netfid;
1176 cinode = CIFS_I(file->f_path.dentry->d_inode);
1178 if ((tcon->ses->capabilities & CAP_UNIX) &&
1179 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1180 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1183 * BB add code here to normalize offset and length to account for
1184 * negative length which we can not accept over the wire.
1186 if (IS_GETLK(cmd)) {
1187 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1192 if (!lock && !unlock) {
1194 * if no lock or unlock then nothing to do since we do not
1201 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1207 /* update the file size (if needed) after a write */
1209 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1210 unsigned int bytes_written)
1212 loff_t end_of_write = offset + bytes_written;
1214 if (end_of_write > cifsi->server_eof)
1215 cifsi->server_eof = end_of_write;
1218 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1219 const char *write_data, size_t write_size,
1223 unsigned int bytes_written = 0;
1224 unsigned int total_written;
1225 struct cifs_sb_info *cifs_sb;
1226 struct cifs_tcon *pTcon;
1228 struct dentry *dentry = open_file->dentry;
1229 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1230 struct cifs_io_parms io_parms;
1232 cifs_sb = CIFS_SB(dentry->d_sb);
1234 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1235 *poffset, dentry->d_name.name);
1237 pTcon = tlink_tcon(open_file->tlink);
1241 for (total_written = 0; write_size > total_written;
1242 total_written += bytes_written) {
1244 while (rc == -EAGAIN) {
1248 if (open_file->invalidHandle) {
1249 /* we could deadlock if we called
1250 filemap_fdatawait from here so tell
1251 reopen_file not to flush data to
1253 rc = cifs_reopen_file(open_file, false);
1258 len = min((size_t)cifs_sb->wsize,
1259 write_size - total_written);
1260 /* iov[0] is reserved for smb header */
1261 iov[1].iov_base = (char *)write_data + total_written;
1262 iov[1].iov_len = len;
1263 io_parms.netfid = open_file->netfid;
1265 io_parms.tcon = pTcon;
1266 io_parms.offset = *poffset;
1267 io_parms.length = len;
1268 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1271 if (rc || (bytes_written == 0)) {
1279 cifs_update_eof(cifsi, *poffset, bytes_written);
1280 *poffset += bytes_written;
1284 cifs_stats_bytes_written(pTcon, total_written);
1286 if (total_written > 0) {
1287 spin_lock(&dentry->d_inode->i_lock);
1288 if (*poffset > dentry->d_inode->i_size)
1289 i_size_write(dentry->d_inode, *poffset);
1290 spin_unlock(&dentry->d_inode->i_lock);
1292 mark_inode_dirty_sync(dentry->d_inode);
1294 return total_written;
1297 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1300 struct cifsFileInfo *open_file = NULL;
1301 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1303 /* only filter by fsuid on multiuser mounts */
1304 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1307 spin_lock(&cifs_file_list_lock);
1308 /* we could simply get the first_list_entry since write-only entries
1309 are always at the end of the list but since the first entry might
1310 have a close pending, we go through the whole list */
1311 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1312 if (fsuid_only && open_file->uid != current_fsuid())
1314 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1315 if (!open_file->invalidHandle) {
1316 /* found a good file */
1317 /* lock it so it will not be closed on us */
1318 cifsFileInfo_get(open_file);
1319 spin_unlock(&cifs_file_list_lock);
1321 } /* else might as well continue, and look for
1322 another, or simply have the caller reopen it
1323 again rather than trying to fix this handle */
1324 } else /* write only file */
1325 break; /* write only files are last so must be done */
1327 spin_unlock(&cifs_file_list_lock);
1331 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1334 struct cifsFileInfo *open_file;
1335 struct cifs_sb_info *cifs_sb;
1336 bool any_available = false;
1339 /* Having a null inode here (because mapping->host was set to zero by
1340 the VFS or MM) should not happen but we had reports of on oops (due to
1341 it being zero) during stress testcases so we need to check for it */
1343 if (cifs_inode == NULL) {
1344 cERROR(1, "Null inode passed to cifs_writeable_file");
1349 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1351 /* only filter by fsuid on multiuser mounts */
1352 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1355 spin_lock(&cifs_file_list_lock);
1357 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1358 if (!any_available && open_file->pid != current->tgid)
1360 if (fsuid_only && open_file->uid != current_fsuid())
1362 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1363 cifsFileInfo_get(open_file);
1365 if (!open_file->invalidHandle) {
1366 /* found a good writable file */
1367 spin_unlock(&cifs_file_list_lock);
1371 spin_unlock(&cifs_file_list_lock);
1373 /* Had to unlock since following call can block */
1374 rc = cifs_reopen_file(open_file, false);
1378 /* if it fails, try another handle if possible */
1379 cFYI(1, "wp failed on reopen file");
1380 cifsFileInfo_put(open_file);
1382 spin_lock(&cifs_file_list_lock);
1384 /* else we simply continue to the next entry. Thus
1385 we do not loop on reopen errors. If we
1386 can not reopen the file, for example if we
1387 reconnected to a server with another client
1388 racing to delete or lock the file we would not
1389 make progress if we restarted before the beginning
1390 of the loop here. */
1393 /* couldn't find useable FH with same pid, try any available */
1394 if (!any_available) {
1395 any_available = true;
1396 goto refind_writable;
1398 spin_unlock(&cifs_file_list_lock);
1402 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1404 struct address_space *mapping = page->mapping;
1405 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1408 int bytes_written = 0;
1409 struct inode *inode;
1410 struct cifsFileInfo *open_file;
1412 if (!mapping || !mapping->host)
1415 inode = page->mapping->host;
1417 offset += (loff_t)from;
1418 write_data = kmap(page);
1421 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1426 /* racing with truncate? */
1427 if (offset > mapping->host->i_size) {
1429 return 0; /* don't care */
1432 /* check to make sure that we are not extending the file */
1433 if (mapping->host->i_size - offset < (loff_t)to)
1434 to = (unsigned)(mapping->host->i_size - offset);
1436 open_file = find_writable_file(CIFS_I(mapping->host), false);
1438 bytes_written = cifs_write(open_file, open_file->pid,
1439 write_data, to - from, &offset);
1440 cifsFileInfo_put(open_file);
1441 /* Does mm or vfs already set times? */
1442 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1443 if ((bytes_written > 0) && (offset))
1445 else if (bytes_written < 0)
1448 cFYI(1, "No writeable filehandles for inode");
1456 static int cifs_writepages(struct address_space *mapping,
1457 struct writeback_control *wbc)
1459 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1460 bool done = false, scanned = false, range_whole = false;
1462 struct cifs_writedata *wdata;
1467 * If wsize is smaller than the page cache size, default to writing
1468 * one page at a time via cifs_writepage
1470 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1471 return generic_writepages(mapping, wbc);
1473 if (wbc->range_cyclic) {
1474 index = mapping->writeback_index; /* Start from prev offset */
1477 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1478 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1479 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1484 while (!done && index <= end) {
1485 unsigned int i, nr_pages, found_pages;
1486 pgoff_t next = 0, tofind;
1487 struct page **pages;
1489 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1492 wdata = cifs_writedata_alloc((unsigned int)tofind);
1499 * find_get_pages_tag seems to return a max of 256 on each
1500 * iteration, so we must call it several times in order to
1501 * fill the array or the wsize is effectively limited to
1502 * 256 * PAGE_CACHE_SIZE.
1505 pages = wdata->pages;
1507 nr_pages = find_get_pages_tag(mapping, &index,
1508 PAGECACHE_TAG_DIRTY,
1510 found_pages += nr_pages;
1513 } while (nr_pages && tofind && index <= end);
1515 if (found_pages == 0) {
1516 kref_put(&wdata->refcount, cifs_writedata_release);
1521 for (i = 0; i < found_pages; i++) {
1522 page = wdata->pages[i];
1524 * At this point we hold neither mapping->tree_lock nor
1525 * lock on the page itself: the page may be truncated or
1526 * invalidated (changing page->mapping to NULL), or even
1527 * swizzled back from swapper_space to tmpfs file
1533 else if (!trylock_page(page))
1536 if (unlikely(page->mapping != mapping)) {
1541 if (!wbc->range_cyclic && page->index > end) {
1547 if (next && (page->index != next)) {
1548 /* Not next consecutive page */
1553 if (wbc->sync_mode != WB_SYNC_NONE)
1554 wait_on_page_writeback(page);
1556 if (PageWriteback(page) ||
1557 !clear_page_dirty_for_io(page)) {
1563 * This actually clears the dirty bit in the radix tree.
1564 * See cifs_writepage() for more commentary.
1566 set_page_writeback(page);
1568 if (page_offset(page) >= mapping->host->i_size) {
1571 end_page_writeback(page);
1575 wdata->pages[i] = page;
1576 next = page->index + 1;
1580 /* reset index to refind any pages skipped */
1582 index = wdata->pages[0]->index + 1;
1584 /* put any pages we aren't going to use */
1585 for (i = nr_pages; i < found_pages; i++) {
1586 page_cache_release(wdata->pages[i]);
1587 wdata->pages[i] = NULL;
1590 /* nothing to write? */
1591 if (nr_pages == 0) {
1592 kref_put(&wdata->refcount, cifs_writedata_release);
1596 wdata->sync_mode = wbc->sync_mode;
1597 wdata->nr_pages = nr_pages;
1598 wdata->offset = page_offset(wdata->pages[0]);
1601 if (wdata->cfile != NULL)
1602 cifsFileInfo_put(wdata->cfile);
1603 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1605 if (!wdata->cfile) {
1606 cERROR(1, "No writable handles for inode");
1610 rc = cifs_async_writev(wdata);
1611 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1613 for (i = 0; i < nr_pages; ++i)
1614 unlock_page(wdata->pages[i]);
1616 /* send failure -- clean up the mess */
1618 for (i = 0; i < nr_pages; ++i) {
1620 redirty_page_for_writepage(wbc,
1623 SetPageError(wdata->pages[i]);
1624 end_page_writeback(wdata->pages[i]);
1625 page_cache_release(wdata->pages[i]);
1628 mapping_set_error(mapping, rc);
1630 kref_put(&wdata->refcount, cifs_writedata_release);
1632 wbc->nr_to_write -= nr_pages;
1633 if (wbc->nr_to_write <= 0)
1639 if (!scanned && !done) {
1641 * We hit the last page and there is more work to be done: wrap
1642 * back to the start of the file
1649 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1650 mapping->writeback_index = index;
1656 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1662 /* BB add check for wbc flags */
1663 page_cache_get(page);
1664 if (!PageUptodate(page))
1665 cFYI(1, "ppw - page not up to date");
1668 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1670 * A writepage() implementation always needs to do either this,
1671 * or re-dirty the page with "redirty_page_for_writepage()" in
1672 * the case of a failure.
1674 * Just unlocking the page will cause the radix tree tag-bits
1675 * to fail to update with the state of the page correctly.
1677 set_page_writeback(page);
1679 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1680 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1682 else if (rc == -EAGAIN)
1683 redirty_page_for_writepage(wbc, page);
1687 SetPageUptodate(page);
1688 end_page_writeback(page);
1689 page_cache_release(page);
1694 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1696 int rc = cifs_writepage_locked(page, wbc);
1701 static int cifs_write_end(struct file *file, struct address_space *mapping,
1702 loff_t pos, unsigned len, unsigned copied,
1703 struct page *page, void *fsdata)
1706 struct inode *inode = mapping->host;
1707 struct cifsFileInfo *cfile = file->private_data;
1708 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1711 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1714 pid = current->tgid;
1716 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1719 if (PageChecked(page)) {
1721 SetPageUptodate(page);
1722 ClearPageChecked(page);
1723 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1724 SetPageUptodate(page);
1726 if (!PageUptodate(page)) {
1728 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1732 /* this is probably better than directly calling
1733 partialpage_write since in this function the file handle is
1734 known which we might as well leverage */
1735 /* BB check if anything else missing out of ppw
1736 such as updating last write time */
1737 page_data = kmap(page);
1738 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1739 /* if (rc < 0) should we set writebehind rc? */
1746 set_page_dirty(page);
1750 spin_lock(&inode->i_lock);
1751 if (pos > inode->i_size)
1752 i_size_write(inode, pos);
1753 spin_unlock(&inode->i_lock);
1757 page_cache_release(page);
1762 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1767 struct cifs_tcon *tcon;
1768 struct cifsFileInfo *smbfile = file->private_data;
1769 struct inode *inode = file->f_path.dentry->d_inode;
1770 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1772 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1775 mutex_lock(&inode->i_mutex);
1779 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1780 file->f_path.dentry->d_name.name, datasync);
1782 if (!CIFS_I(inode)->clientCanCacheRead) {
1783 rc = cifs_invalidate_mapping(inode);
1785 cFYI(1, "rc: %d during invalidate phase", rc);
1786 rc = 0; /* don't care about it in fsync */
1790 tcon = tlink_tcon(smbfile->tlink);
1791 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1792 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1795 mutex_unlock(&inode->i_mutex);
1799 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1803 struct cifs_tcon *tcon;
1804 struct cifsFileInfo *smbfile = file->private_data;
1805 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1806 struct inode *inode = file->f_mapping->host;
1808 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1811 mutex_lock(&inode->i_mutex);
1815 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1816 file->f_path.dentry->d_name.name, datasync);
1818 tcon = tlink_tcon(smbfile->tlink);
1819 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1820 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1823 mutex_unlock(&inode->i_mutex);
1828 * As file closes, flush all cached write data for this inode checking
1829 * for write behind errors.
1831 int cifs_flush(struct file *file, fl_owner_t id)
1833 struct inode *inode = file->f_path.dentry->d_inode;
1836 if (file->f_mode & FMODE_WRITE)
1837 rc = filemap_write_and_wait(inode->i_mapping);
1839 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1845 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1850 for (i = 0; i < num_pages; i++) {
1851 pages[i] = alloc_page(__GFP_HIGHMEM);
1854 * save number of pages we have already allocated and
1855 * return with ENOMEM error
1866 for (i = 0; i < num_pages; i++)
1872 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1877 clen = min_t(const size_t, len, wsize);
1878 num_pages = clen / PAGE_CACHE_SIZE;
1879 if (clen % PAGE_CACHE_SIZE)
1889 cifs_iovec_write(struct file *file, const struct iovec *iov,
1890 unsigned long nr_segs, loff_t *poffset)
1892 unsigned int written;
1893 unsigned long num_pages, npages, i;
1894 size_t copied, len, cur_len;
1895 ssize_t total_written = 0;
1896 struct kvec *to_send;
1897 struct page **pages;
1899 struct inode *inode;
1900 struct cifsFileInfo *open_file;
1901 struct cifs_tcon *pTcon;
1902 struct cifs_sb_info *cifs_sb;
1903 struct cifs_io_parms io_parms;
1907 len = iov_length(iov, nr_segs);
1911 rc = generic_write_checks(file, poffset, &len, 0);
1915 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1916 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1918 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1922 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1928 rc = cifs_write_allocate_pages(pages, num_pages);
1936 open_file = file->private_data;
1938 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1939 pid = open_file->pid;
1941 pid = current->tgid;
1943 pTcon = tlink_tcon(open_file->tlink);
1944 inode = file->f_path.dentry->d_inode;
1946 iov_iter_init(&it, iov, nr_segs, len, 0);
1950 size_t save_len = cur_len;
1951 for (i = 0; i < npages; i++) {
1952 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1953 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1956 iov_iter_advance(&it, copied);
1957 to_send[i+1].iov_base = kmap(pages[i]);
1958 to_send[i+1].iov_len = copied;
1961 cur_len = save_len - cur_len;
1964 if (open_file->invalidHandle) {
1965 rc = cifs_reopen_file(open_file, false);
1969 io_parms.netfid = open_file->netfid;
1971 io_parms.tcon = pTcon;
1972 io_parms.offset = *poffset;
1973 io_parms.length = cur_len;
1974 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1976 } while (rc == -EAGAIN);
1978 for (i = 0; i < npages; i++)
1983 total_written += written;
1984 cifs_update_eof(CIFS_I(inode), *poffset, written);
1985 *poffset += written;
1986 } else if (rc < 0) {
1992 /* get length and number of kvecs of the next write */
1993 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1996 if (total_written > 0) {
1997 spin_lock(&inode->i_lock);
1998 if (*poffset > inode->i_size)
1999 i_size_write(inode, *poffset);
2000 spin_unlock(&inode->i_lock);
2003 cifs_stats_bytes_written(pTcon, total_written);
2004 mark_inode_dirty_sync(inode);
2006 for (i = 0; i < num_pages; i++)
2011 return total_written;
2014 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2015 unsigned long nr_segs, loff_t pos)
2018 struct inode *inode;
2020 inode = iocb->ki_filp->f_path.dentry->d_inode;
2023 * BB - optimize the way when signing is disabled. We can drop this
2024 * extra memory-to-memory copying and use iovec buffers for constructing
2028 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2030 CIFS_I(inode)->invalid_mapping = true;
2037 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2038 unsigned long nr_segs, loff_t pos)
2040 struct inode *inode;
2042 inode = iocb->ki_filp->f_path.dentry->d_inode;
2044 if (CIFS_I(inode)->clientCanCacheAll)
2045 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2048 * In strict cache mode we need to write the data to the server exactly
2049 * from the pos to pos+len-1 rather than flush all affected pages
2050 * because it may cause a error with mandatory locks on these pages but
2051 * not on the region from pos to ppos+len-1.
2054 return cifs_user_writev(iocb, iov, nr_segs, pos);
2058 cifs_iovec_read(struct file *file, const struct iovec *iov,
2059 unsigned long nr_segs, loff_t *poffset)
2064 unsigned int bytes_read = 0;
2065 size_t len, cur_len;
2067 struct cifs_sb_info *cifs_sb;
2068 struct cifs_tcon *pTcon;
2069 struct cifsFileInfo *open_file;
2070 struct smb_com_read_rsp *pSMBr;
2071 struct cifs_io_parms io_parms;
2079 len = iov_length(iov, nr_segs);
2084 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2086 /* FIXME: set up handlers for larger reads and/or convert to async */
2087 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2089 open_file = file->private_data;
2090 pTcon = tlink_tcon(open_file->tlink);
2092 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2093 pid = open_file->pid;
2095 pid = current->tgid;
2097 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2098 cFYI(1, "attempting read on write only file instance");
2100 for (total_read = 0; total_read < len; total_read += bytes_read) {
2101 cur_len = min_t(const size_t, len - total_read, rsize);
2105 while (rc == -EAGAIN) {
2106 int buf_type = CIFS_NO_BUFFER;
2107 if (open_file->invalidHandle) {
2108 rc = cifs_reopen_file(open_file, true);
2112 io_parms.netfid = open_file->netfid;
2114 io_parms.tcon = pTcon;
2115 io_parms.offset = *poffset;
2116 io_parms.length = cur_len;
2117 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2118 &read_data, &buf_type);
2119 pSMBr = (struct smb_com_read_rsp *)read_data;
2121 char *data_offset = read_data + 4 +
2122 le16_to_cpu(pSMBr->DataOffset);
2123 if (memcpy_toiovecend(iov, data_offset,
2124 iov_offset, bytes_read))
2126 if (buf_type == CIFS_SMALL_BUFFER)
2127 cifs_small_buf_release(read_data);
2128 else if (buf_type == CIFS_LARGE_BUFFER)
2129 cifs_buf_release(read_data);
2131 iov_offset += bytes_read;
2135 if (rc || (bytes_read == 0)) {
2143 cifs_stats_bytes_read(pTcon, bytes_read);
2144 *poffset += bytes_read;
2152 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2153 unsigned long nr_segs, loff_t pos)
2157 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2164 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2165 unsigned long nr_segs, loff_t pos)
2167 struct inode *inode;
2169 inode = iocb->ki_filp->f_path.dentry->d_inode;
2171 if (CIFS_I(inode)->clientCanCacheRead)
2172 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2175 * In strict cache mode we need to read from the server all the time
2176 * if we don't have level II oplock because the server can delay mtime
2177 * change - so we can't make a decision about inode invalidating.
2178 * And we can also fail with pagereading if there are mandatory locks
2179 * on pages affected by this read but not on the region from pos to
2183 return cifs_user_readv(iocb, iov, nr_segs, pos);
2186 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
2190 unsigned int bytes_read = 0;
2191 unsigned int total_read;
2192 unsigned int current_read_size;
2194 struct cifs_sb_info *cifs_sb;
2195 struct cifs_tcon *pTcon;
2197 char *current_offset;
2198 struct cifsFileInfo *open_file;
2199 struct cifs_io_parms io_parms;
2200 int buf_type = CIFS_NO_BUFFER;
2204 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2206 /* FIXME: set up handlers for larger reads and/or convert to async */
2207 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2209 if (file->private_data == NULL) {
2214 open_file = file->private_data;
2215 pTcon = tlink_tcon(open_file->tlink);
2217 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2218 pid = open_file->pid;
2220 pid = current->tgid;
2222 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2223 cFYI(1, "attempting read on write only file instance");
2225 for (total_read = 0, current_offset = read_data;
2226 read_size > total_read;
2227 total_read += bytes_read, current_offset += bytes_read) {
2228 current_read_size = min_t(uint, read_size - total_read, rsize);
2230 /* For windows me and 9x we do not want to request more
2231 than it negotiated since it will refuse the read then */
2233 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
2234 current_read_size = min_t(uint, current_read_size,
2238 while (rc == -EAGAIN) {
2239 if (open_file->invalidHandle) {
2240 rc = cifs_reopen_file(open_file, true);
2244 io_parms.netfid = open_file->netfid;
2246 io_parms.tcon = pTcon;
2247 io_parms.offset = *poffset;
2248 io_parms.length = current_read_size;
2249 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2250 ¤t_offset, &buf_type);
2252 if (rc || (bytes_read == 0)) {
2260 cifs_stats_bytes_read(pTcon, total_read);
2261 *poffset += bytes_read;
2269 * If the page is mmap'ed into a process' page tables, then we need to make
2270 * sure that it doesn't change while being written back.
2273 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2275 struct page *page = vmf->page;
2278 return VM_FAULT_LOCKED;
2281 static struct vm_operations_struct cifs_file_vm_ops = {
2282 .fault = filemap_fault,
2283 .page_mkwrite = cifs_page_mkwrite,
2286 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2289 struct inode *inode = file->f_path.dentry->d_inode;
2293 if (!CIFS_I(inode)->clientCanCacheRead) {
2294 rc = cifs_invalidate_mapping(inode);
2299 rc = generic_file_mmap(file, vma);
2301 vma->vm_ops = &cifs_file_vm_ops;
2306 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2311 rc = cifs_revalidate_file(file);
2313 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2317 rc = generic_file_mmap(file, vma);
2319 vma->vm_ops = &cifs_file_vm_ops;
2324 static int cifs_readpages(struct file *file, struct address_space *mapping,
2325 struct list_head *page_list, unsigned num_pages)
2328 struct list_head tmplist;
2329 struct cifsFileInfo *open_file = file->private_data;
2330 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2331 unsigned int rsize = cifs_sb->rsize;
2335 * Give up immediately if rsize is too small to read an entire page.
2336 * The VFS will fall back to readpage. We should never reach this
2337 * point however since we set ra_pages to 0 when the rsize is smaller
2338 * than a cache page.
2340 if (unlikely(rsize < PAGE_CACHE_SIZE))
2344 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2345 * immediately if the cookie is negative
2347 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2352 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2353 pid = open_file->pid;
2355 pid = current->tgid;
2358 INIT_LIST_HEAD(&tmplist);
2360 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2361 mapping, num_pages);
2364 * Start with the page at end of list and move it to private
2365 * list. Do the same with any following pages until we hit
2366 * the rsize limit, hit an index discontinuity, or run out of
2367 * pages. Issue the async read and then start the loop again
2368 * until the list is empty.
2370 * Note that list order is important. The page_list is in
2371 * the order of declining indexes. When we put the pages in
2372 * the rdata->pages, then we want them in increasing order.
2374 while (!list_empty(page_list)) {
2375 unsigned int bytes = PAGE_CACHE_SIZE;
2376 unsigned int expected_index;
2377 unsigned int nr_pages = 1;
2379 struct page *page, *tpage;
2380 struct cifs_readdata *rdata;
2382 page = list_entry(page_list->prev, struct page, lru);
2385 * Lock the page and put it in the cache. Since no one else
2386 * should have access to this page, we're safe to simply set
2387 * PG_locked without checking it first.
2389 __set_page_locked(page);
2390 rc = add_to_page_cache_locked(page, mapping,
2391 page->index, GFP_KERNEL);
2393 /* give up if we can't stick it in the cache */
2395 __clear_page_locked(page);
2399 /* move first page to the tmplist */
2400 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2401 list_move_tail(&page->lru, &tmplist);
2403 /* now try and add more pages onto the request */
2404 expected_index = page->index + 1;
2405 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2406 /* discontinuity ? */
2407 if (page->index != expected_index)
2410 /* would this page push the read over the rsize? */
2411 if (bytes + PAGE_CACHE_SIZE > rsize)
2414 __set_page_locked(page);
2415 if (add_to_page_cache_locked(page, mapping,
2416 page->index, GFP_KERNEL)) {
2417 __clear_page_locked(page);
2420 list_move_tail(&page->lru, &tmplist);
2421 bytes += PAGE_CACHE_SIZE;
2426 rdata = cifs_readdata_alloc(nr_pages);
2428 /* best to give up if we're out of mem */
2429 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2430 list_del(&page->lru);
2431 lru_cache_add_file(page);
2433 page_cache_release(page);
2439 spin_lock(&cifs_file_list_lock);
2440 cifsFileInfo_get(open_file);
2441 spin_unlock(&cifs_file_list_lock);
2442 rdata->cfile = open_file;
2443 rdata->mapping = mapping;
2444 rdata->offset = offset;
2445 rdata->bytes = bytes;
2447 list_splice_init(&tmplist, &rdata->pages);
2450 if (open_file->invalidHandle) {
2451 rc = cifs_reopen_file(open_file, true);
2455 rc = cifs_async_readv(rdata);
2456 } while (rc == -EAGAIN);
2459 list_for_each_entry_safe(page, tpage, &rdata->pages,
2461 list_del(&page->lru);
2462 lru_cache_add_file(page);
2464 page_cache_release(page);
2466 cifs_readdata_free(rdata);
2474 static int cifs_readpage_worker(struct file *file, struct page *page,
2480 /* Is the page cached? */
2481 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2485 page_cache_get(page);
2486 read_data = kmap(page);
2487 /* for reads over a certain size could initiate async read ahead */
2489 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2494 cFYI(1, "Bytes read %d", rc);
2496 file->f_path.dentry->d_inode->i_atime =
2497 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2499 if (PAGE_CACHE_SIZE > rc)
2500 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2502 flush_dcache_page(page);
2503 SetPageUptodate(page);
2505 /* send this page to the cache */
2506 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2512 page_cache_release(page);
2518 static int cifs_readpage(struct file *file, struct page *page)
2520 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2526 if (file->private_data == NULL) {
2532 cFYI(1, "readpage %p at offset %d 0x%x\n",
2533 page, (int)offset, (int)offset);
2535 rc = cifs_readpage_worker(file, page, &offset);
2543 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2545 struct cifsFileInfo *open_file;
2547 spin_lock(&cifs_file_list_lock);
2548 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2549 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2550 spin_unlock(&cifs_file_list_lock);
2554 spin_unlock(&cifs_file_list_lock);
2558 /* We do not want to update the file size from server for inodes
2559 open for write - to avoid races with writepage extending
2560 the file - in the future we could consider allowing
2561 refreshing the inode only on increases in the file size
2562 but this is tricky to do without racing with writebehind
2563 page caching in the current Linux kernel design */
2564 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2569 if (is_inode_writable(cifsInode)) {
2570 /* This inode is open for write at least once */
2571 struct cifs_sb_info *cifs_sb;
2573 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2574 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2575 /* since no page cache to corrupt on directio
2576 we can change size safely */
2580 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2588 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2589 loff_t pos, unsigned len, unsigned flags,
2590 struct page **pagep, void **fsdata)
2592 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2593 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2594 loff_t page_start = pos & PAGE_MASK;
2599 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2601 page = grab_cache_page_write_begin(mapping, index, flags);
2607 if (PageUptodate(page))
2611 * If we write a full page it will be up to date, no need to read from
2612 * the server. If the write is short, we'll end up doing a sync write
2615 if (len == PAGE_CACHE_SIZE)
2619 * optimize away the read when we have an oplock, and we're not
2620 * expecting to use any of the data we'd be reading in. That
2621 * is, when the page lies beyond the EOF, or straddles the EOF
2622 * and the write will cover all of the existing data.
2624 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2625 i_size = i_size_read(mapping->host);
2626 if (page_start >= i_size ||
2627 (offset == 0 && (pos + len) >= i_size)) {
2628 zero_user_segments(page, 0, offset,
2632 * PageChecked means that the parts of the page
2633 * to which we're not writing are considered up
2634 * to date. Once the data is copied to the
2635 * page, it can be set uptodate.
2637 SetPageChecked(page);
2642 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2644 * might as well read a page, it is fast enough. If we get
2645 * an error, we don't need to return it. cifs_write_end will
2646 * do a sync write instead since PG_uptodate isn't set.
2648 cifs_readpage_worker(file, page, &page_start);
2650 /* we could try using another file handle if there is one -
2651 but how would we lock it to prevent close of that handle
2652 racing with this read? In any case
2653 this will be written out by write_end so is fine */
2660 static int cifs_release_page(struct page *page, gfp_t gfp)
2662 if (PagePrivate(page))
2665 return cifs_fscache_release_page(page, gfp);
2668 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2670 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2673 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2676 static int cifs_launder_page(struct page *page)
2679 loff_t range_start = page_offset(page);
2680 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2681 struct writeback_control wbc = {
2682 .sync_mode = WB_SYNC_ALL,
2684 .range_start = range_start,
2685 .range_end = range_end,
2688 cFYI(1, "Launder page: %p", page);
2690 if (clear_page_dirty_for_io(page))
2691 rc = cifs_writepage_locked(page, &wbc);
2693 cifs_fscache_invalidate_page(page, page->mapping->host);
2697 void cifs_oplock_break(struct work_struct *work)
2699 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2701 struct inode *inode = cfile->dentry->d_inode;
2702 struct cifsInodeInfo *cinode = CIFS_I(inode);
2705 if (inode && S_ISREG(inode->i_mode)) {
2706 if (cinode->clientCanCacheRead)
2707 break_lease(inode, O_RDONLY);
2709 break_lease(inode, O_WRONLY);
2710 rc = filemap_fdatawrite(inode->i_mapping);
2711 if (cinode->clientCanCacheRead == 0) {
2712 rc = filemap_fdatawait(inode->i_mapping);
2713 mapping_set_error(inode->i_mapping, rc);
2714 invalidate_remote_inode(inode);
2716 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2719 rc = cifs_push_locks(cfile);
2721 cERROR(1, "Push locks rc = %d", rc);
2724 * releasing stale oplock after recent reconnect of smb session using
2725 * a now incorrect file handle is not a data integrity issue but do
2726 * not bother sending an oplock release if session to server still is
2727 * disconnected since oplock already released by the server
2729 if (!cfile->oplock_break_cancelled) {
2730 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2731 current->tgid, 0, 0, 0, 0,
2732 LOCKING_ANDX_OPLOCK_RELEASE, false,
2733 cinode->clientCanCacheRead ? 1 : 0);
2734 cFYI(1, "Oplock release rc = %d", rc);
2738 const struct address_space_operations cifs_addr_ops = {
2739 .readpage = cifs_readpage,
2740 .readpages = cifs_readpages,
2741 .writepage = cifs_writepage,
2742 .writepages = cifs_writepages,
2743 .write_begin = cifs_write_begin,
2744 .write_end = cifs_write_end,
2745 .set_page_dirty = __set_page_dirty_nobuffers,
2746 .releasepage = cifs_release_page,
2747 .invalidatepage = cifs_invalidate_page,
2748 .launder_page = cifs_launder_page,
2752 * cifs_readpages requires the server to support a buffer large enough to
2753 * contain the header plus one complete page of data. Otherwise, we need
2754 * to leave cifs_readpages out of the address space operations.
2756 const struct address_space_operations cifs_addr_ops_smallbuf = {
2757 .readpage = cifs_readpage,
2758 .writepage = cifs_writepage,
2759 .writepages = cifs_writepages,
2760 .write_begin = cifs_write_begin,
2761 .write_end = cifs_write_end,
2762 .set_page_dirty = __set_page_dirty_nobuffers,
2763 .releasepage = cifs_release_page,
2764 .invalidatepage = cifs_invalidate_page,
2765 .launder_page = cifs_launder_page,