4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
46 static inline int cifs_convert_flags(unsigned int flags)
48 if ((flags & O_ACCMODE) == O_RDONLY)
50 else if ((flags & O_ACCMODE) == O_WRONLY)
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
64 static u32 cifs_posix_convert_flags(unsigned int flags)
68 if ((flags & O_ACCMODE) == O_RDONLY)
69 posix_flags = SMB_O_RDONLY;
70 else if ((flags & O_ACCMODE) == O_WRONLY)
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
76 posix_flags |= SMB_O_CREAT;
78 posix_flags |= SMB_O_EXCL;
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
83 posix_flags |= SMB_O_SYNC;
84 if (flags & O_DIRECTORY)
85 posix_flags |= SMB_O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= SMB_O_NOFOLLOW;
89 posix_flags |= SMB_O_DIRECT;
94 static inline int cifs_get_disposition(unsigned int flags)
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
108 int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
118 struct cifs_tcon *tcon;
120 cFYI(1, "posix open %s", full_path);
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
126 tlink = cifs_sb_tlink(cifs_sb);
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
149 goto posix_open_ret; /* caller does not need info */
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
162 cifs_fattr_to_inode(*pinode, &fattr);
171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
173 __u16 *pnetfid, int xid)
178 int create_options = CREATE_NOT_DIR;
181 desiredAccess = cifs_convert_flags(f_flags);
183 /*********************************************************************
184 * open flag mapping table:
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
207 disposition = cifs_get_disposition(f_flags);
209 /* BB pass O_SYNC flag through on file attributes .. BB */
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
220 desiredAccess, create_options, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
244 struct cifsFileInfo *
245 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
257 pCifsFile->count = 1;
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
268 spin_lock(&cifs_file_list_lock);
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
275 spin_unlock(&cifs_file_list_lock);
277 cifs_set_oplock_level(pCifsInode, oplock);
278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
280 file->private_data = pCifsFile;
284 static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
287 * Release a reference on the file private data. This may involve closing
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
291 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
293 struct inode *inode = cifs_file->dentry->d_inode;
294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
297 struct cifsLockInfo *li, *tmp;
299 spin_lock(&cifs_file_list_lock);
300 if (--cifs_file->count > 0) {
301 spin_unlock(&cifs_file_list_lock);
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
319 cifs_set_oplock_level(cifsi, 0);
321 spin_unlock(&cifs_file_list_lock);
323 cancel_work_sync(&cifs_file->oplock_break);
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
333 /* Delete any outstanding lock records. We'll lose them when the file
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
340 list_del(&li->llist);
341 cifs_del_lock_waiters(li);
344 mutex_unlock(&cifsi->lock_mutex);
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
351 int cifs_open(struct inode *inode, struct file *file)
356 struct cifs_sb_info *cifs_sb;
357 struct cifs_tcon *tcon;
358 struct tcon_link *tlink;
359 struct cifsFileInfo *pCifsFile = NULL;
360 char *full_path = NULL;
361 bool posix_open_ok = false;
366 cifs_sb = CIFS_SB(inode->i_sb);
367 tlink = cifs_sb_tlink(cifs_sb);
370 return PTR_ERR(tlink);
372 tcon = tlink_tcon(tlink);
374 full_path = build_path_from_dentry(file->f_path.dentry);
375 if (full_path == NULL) {
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
392 /* can not refresh inode info since size could be stale */
393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
394 cifs_sb->mnt_file_mode /* ignored */,
395 file->f_flags, &oplock, &netfid, xid);
397 cFYI(1, "posix open succeeded");
398 posix_open_ok = true;
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
401 cERROR(1, "server %s of type %s returned"
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
406 tcon->ses->serverNOS);
407 tcon->broken_posix_open = true;
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
411 /* else fallthrough to retry open the old way on network i/o
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
423 if (pCifsFile == NULL) {
424 CIFSSMBClose(xid, tcon, netfid);
429 cifs_fscache_set_inode_cookie(inode, file);
431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
450 cifs_put_tlink(tlink);
454 /* Try to reacquire byte range locks that were released when session */
455 /* to server was lost */
456 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
460 /* BB list all locks open on this file and relock */
465 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
470 struct cifs_sb_info *cifs_sb;
471 struct cifs_tcon *tcon;
472 struct cifsInodeInfo *pCifsInode;
474 char *full_path = NULL;
476 int disposition = FILE_OPEN;
477 int create_options = CREATE_NOT_DIR;
481 mutex_lock(&pCifsFile->fh_mutex);
482 if (!pCifsFile->invalidHandle) {
483 mutex_unlock(&pCifsFile->fh_mutex);
489 inode = pCifsFile->dentry->d_inode;
490 cifs_sb = CIFS_SB(inode->i_sb);
491 tcon = tlink_tcon(pCifsFile->tlink);
493 /* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
497 full_path = build_path_from_dentry(pCifsFile->dentry);
498 if (full_path == NULL) {
500 mutex_unlock(&pCifsFile->fh_mutex);
505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
506 inode, pCifsFile->f_flags, full_path);
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
528 cFYI(1, "posix reopen succeeded");
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
540 /* Can not refresh inode by passing in file_info buf to be returned
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
547 create_options, &netfid, &oplock, NULL,
548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
549 CIFS_MOUNT_MAP_SPECIAL_CHR);
551 mutex_unlock(&pCifsFile->fh_mutex);
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
554 goto reopen_error_exit;
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
564 rc = filemap_write_and_wait(inode->i_mapping);
565 mapping_set_error(inode->i_mapping, rc);
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
581 cifs_set_oplock_level(pCifsInode, oplock);
583 cifs_relock_file(pCifsFile);
591 int cifs_close(struct inode *inode, struct file *file)
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
598 /* return code from the ->release op is always ignored */
602 int cifs_closedir(struct inode *inode, struct file *file)
606 struct cifsFileInfo *pCFileStruct = file->private_data;
609 cFYI(1, "Closedir inode = 0x%p", inode);
614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
616 cFYI(1, "Freeing private data in close dir");
617 spin_lock(&cifs_file_list_lock);
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
621 spin_unlock(&cifs_file_list_lock);
622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
623 cFYI(1, "Closing uncompleted readdir with rc %d",
625 /* not much we can do if it fails anyway, ignore rc */
628 spin_unlock(&cifs_file_list_lock);
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
631 cFYI(1, "closedir free smb buf in srch struct");
632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
633 if (pCFileStruct->srch_inf.smallBuf)
634 cifs_small_buf_release(ptmp);
636 cifs_buf_release(ptmp);
638 cifs_put_tlink(pCFileStruct->tlink);
639 kfree(file->private_data);
640 file->private_data = NULL;
642 /* BB can we lock the filestruct while this is going on? */
647 static struct cifsLockInfo *
648 cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
650 struct cifsLockInfo *li =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
658 li->pid = current->tgid;
659 INIT_LIST_HEAD(&li->blist);
660 init_waitqueue_head(&li->block_q);
665 cifs_del_lock_waiters(struct cifsLockInfo *lock)
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
675 cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
679 struct cifsLockInfo *li, *tmp;
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
698 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
699 __u8 type, __u16 netfid, struct file_lock *flock)
702 struct cifsLockInfo *conf_lock;
705 mutex_lock(&cinode->lock_mutex);
707 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
710 flock->fl_start = conf_lock->offset;
711 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
712 flock->fl_pid = conf_lock->pid;
713 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
714 flock->fl_type = F_RDLCK;
716 flock->fl_type = F_WRLCK;
717 } else if (!cinode->can_cache_brlcks)
720 flock->fl_type = F_UNLCK;
722 mutex_unlock(&cinode->lock_mutex);
727 cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
728 __u8 type, __u16 netfid)
730 struct cifsLockInfo *li;
732 li = cifs_lock_init(len, offset, type, netfid);
736 mutex_lock(&cinode->lock_mutex);
737 list_add_tail(&li->llist, &cinode->llist);
738 mutex_unlock(&cinode->lock_mutex);
743 cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
744 __u8 type, __u16 netfid, bool wait)
746 struct cifsLockInfo *lock, *conf_lock;
750 lock = cifs_lock_init(length, offset, type, netfid);
756 mutex_lock(&cinode->lock_mutex);
758 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
760 if (!exist && cinode->can_cache_brlcks) {
761 list_add_tail(&lock->llist, &cinode->llist);
762 mutex_unlock(&cinode->lock_mutex);
771 list_add_tail(&lock->blist, &conf_lock->blist);
772 mutex_unlock(&cinode->lock_mutex);
773 rc = wait_event_interruptible(lock->block_q,
774 (lock->blist.prev == &lock->blist) &&
775 (lock->blist.next == &lock->blist));
779 mutex_lock(&cinode->lock_mutex);
780 list_del_init(&lock->blist);
785 mutex_unlock(&cinode->lock_mutex);
790 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
793 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
794 unsigned char saved_type = flock->fl_type;
796 if ((flock->fl_flags & FL_POSIX) == 0)
799 mutex_lock(&cinode->lock_mutex);
800 posix_test_lock(file, flock);
802 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
803 flock->fl_type = saved_type;
807 mutex_unlock(&cinode->lock_mutex);
812 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
814 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
817 if ((flock->fl_flags & FL_POSIX) == 0)
820 mutex_lock(&cinode->lock_mutex);
821 if (!cinode->can_cache_brlcks) {
822 mutex_unlock(&cinode->lock_mutex);
825 rc = posix_lock_file_wait(file, flock);
826 mutex_unlock(&cinode->lock_mutex);
831 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
833 int xid, rc = 0, stored_rc;
834 struct cifsLockInfo *li, *tmp;
835 struct cifs_tcon *tcon;
836 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
837 unsigned int num, max_num;
838 LOCKING_ANDX_RANGE *buf, *cur;
839 int types[] = {LOCKING_ANDX_LARGE_FILES,
840 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
844 tcon = tlink_tcon(cfile->tlink);
846 mutex_lock(&cinode->lock_mutex);
847 if (!cinode->can_cache_brlcks) {
848 mutex_unlock(&cinode->lock_mutex);
853 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
854 sizeof(LOCKING_ANDX_RANGE);
855 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
857 mutex_unlock(&cinode->lock_mutex);
862 for (i = 0; i < 2; i++) {
865 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
866 if (li->type != types[i])
868 cur->Pid = cpu_to_le16(li->pid);
869 cur->LengthLow = cpu_to_le32((u32)li->length);
870 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
871 cur->OffsetLow = cpu_to_le32((u32)li->offset);
872 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
873 if (++num == max_num) {
874 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
875 li->type, 0, num, buf);
885 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
886 types[i], 0, num, buf);
892 cinode->can_cache_brlcks = false;
893 mutex_unlock(&cinode->lock_mutex);
900 /* copied from fs/locks.c with a name change */
901 #define cifs_for_each_lock(inode, lockp) \
902 for (lockp = &inode->i_flock; *lockp != NULL; \
903 lockp = &(*lockp)->fl_next)
906 cifs_push_posix_locks(struct cifsFileInfo *cfile)
908 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
909 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
910 struct file_lock *flock, **before;
911 struct cifsLockInfo *lck, *tmp;
912 int rc = 0, xid, type;
914 struct list_head locks_to_send;
918 mutex_lock(&cinode->lock_mutex);
919 if (!cinode->can_cache_brlcks) {
920 mutex_unlock(&cinode->lock_mutex);
925 INIT_LIST_HEAD(&locks_to_send);
928 cifs_for_each_lock(cfile->dentry->d_inode, before) {
930 length = 1 + flock->fl_end - flock->fl_start;
931 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
936 lck = cifs_lock_init(length, flock->fl_start, type,
942 lck->pid = flock->fl_pid;
944 list_add_tail(&lck->llist, &locks_to_send);
950 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
951 struct file_lock tmp_lock;
954 tmp_lock.fl_start = lck->offset;
955 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
956 0, lck->length, &tmp_lock,
960 list_del(&lck->llist);
964 cinode->can_cache_brlcks = false;
965 mutex_unlock(&cinode->lock_mutex);
972 cifs_push_locks(struct cifsFileInfo *cfile)
974 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
975 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
977 if ((tcon->ses->capabilities & CAP_UNIX) &&
978 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
979 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
980 return cifs_push_posix_locks(cfile);
982 return cifs_push_mandatory_locks(cfile);
986 cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
989 if (flock->fl_flags & FL_POSIX)
991 if (flock->fl_flags & FL_FLOCK)
993 if (flock->fl_flags & FL_SLEEP) {
994 cFYI(1, "Blocking lock");
997 if (flock->fl_flags & FL_ACCESS)
998 cFYI(1, "Process suspended by mandatory locking - "
999 "not implemented yet");
1000 if (flock->fl_flags & FL_LEASE)
1001 cFYI(1, "Lease on file - not implemented yet");
1002 if (flock->fl_flags &
1003 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1004 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1006 *type = LOCKING_ANDX_LARGE_FILES;
1007 if (flock->fl_type == F_WRLCK) {
1008 cFYI(1, "F_WRLCK ");
1010 } else if (flock->fl_type == F_UNLCK) {
1013 /* Check if unlock includes more than one lock range */
1014 } else if (flock->fl_type == F_RDLCK) {
1016 *type |= LOCKING_ANDX_SHARED_LOCK;
1018 } else if (flock->fl_type == F_EXLCK) {
1021 } else if (flock->fl_type == F_SHLCK) {
1023 *type |= LOCKING_ANDX_SHARED_LOCK;
1026 cFYI(1, "Unknown type of lock");
1030 cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1031 bool wait_flag, bool posix_lck, int xid)
1034 __u64 length = 1 + flock->fl_end - flock->fl_start;
1035 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1036 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1037 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1038 __u16 netfid = cfile->netfid;
1041 int posix_lock_type;
1043 rc = cifs_posix_lock_test(file, flock);
1047 if (type & LOCKING_ANDX_SHARED_LOCK)
1048 posix_lock_type = CIFS_RDLCK;
1050 posix_lock_type = CIFS_WRLCK;
1051 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1052 1 /* get */, length, flock,
1053 posix_lock_type, wait_flag);
1057 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1062 /* BB we could chain these into one lock request BB */
1063 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1064 flock->fl_start, 0, 1, type, 0, 0);
1066 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1067 length, flock->fl_start, 1, 0,
1069 flock->fl_type = F_UNLCK;
1071 cERROR(1, "Error unlocking previously locked "
1072 "range %d during test of lock", rc);
1077 if (type & LOCKING_ANDX_SHARED_LOCK) {
1078 flock->fl_type = F_WRLCK;
1083 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1084 flock->fl_start, 0, 1,
1085 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1087 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1088 length, flock->fl_start, 1, 0,
1089 type | LOCKING_ANDX_SHARED_LOCK,
1091 flock->fl_type = F_RDLCK;
1093 cERROR(1, "Error unlocking previously locked "
1094 "range %d during test of lock", rc);
1096 flock->fl_type = F_WRLCK;
1103 cifs_move_llist(struct list_head *source, struct list_head *dest)
1105 struct list_head *li, *tmp;
1106 list_for_each_safe(li, tmp, source)
1107 list_move(li, dest);
1111 cifs_free_llist(struct list_head *llist)
1113 struct cifsLockInfo *li, *tmp;
1114 list_for_each_entry_safe(li, tmp, llist, llist) {
1115 cifs_del_lock_waiters(li);
1116 list_del(&li->llist);
1122 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1124 int rc = 0, stored_rc;
1125 int types[] = {LOCKING_ANDX_LARGE_FILES,
1126 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1128 unsigned int max_num, num;
1129 LOCKING_ANDX_RANGE *buf, *cur;
1130 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1131 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1132 struct cifsLockInfo *li, *tmp;
1133 __u64 length = 1 + flock->fl_end - flock->fl_start;
1134 struct list_head tmp_llist;
1136 INIT_LIST_HEAD(&tmp_llist);
1138 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1139 sizeof(LOCKING_ANDX_RANGE);
1140 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1144 mutex_lock(&cinode->lock_mutex);
1145 for (i = 0; i < 2; i++) {
1148 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1149 if (flock->fl_start > li->offset ||
1150 (flock->fl_start + length) <
1151 (li->offset + li->length))
1153 if (current->tgid != li->pid)
1155 if (cfile->netfid != li->netfid)
1157 if (types[i] != li->type)
1159 if (!cinode->can_cache_brlcks) {
1160 cur->Pid = cpu_to_le16(li->pid);
1161 cur->LengthLow = cpu_to_le32((u32)li->length);
1163 cpu_to_le32((u32)(li->length>>32));
1164 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1166 cpu_to_le32((u32)(li->offset>>32));
1168 * We need to save a lock here to let us add
1169 * it again to the inode list if the unlock
1170 * range request fails on the server.
1172 list_move(&li->llist, &tmp_llist);
1173 if (++num == max_num) {
1174 stored_rc = cifs_lockv(xid, tcon,
1180 * We failed on the unlock range
1181 * request - add all locks from
1182 * the tmp list to the head of
1185 cifs_move_llist(&tmp_llist,
1190 * The unlock range request
1191 * succeed - free the tmp list.
1193 cifs_free_llist(&tmp_llist);
1200 * We can cache brlock requests - simply remove
1201 * a lock from the inode list.
1203 list_del(&li->llist);
1204 cifs_del_lock_waiters(li);
1209 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1210 types[i], num, 0, buf);
1212 cifs_move_llist(&tmp_llist, &cinode->llist);
1215 cifs_free_llist(&tmp_llist);
1219 mutex_unlock(&cinode->lock_mutex);
1225 cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1226 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1229 __u64 length = 1 + flock->fl_end - flock->fl_start;
1230 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1231 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1232 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
1233 __u16 netfid = cfile->netfid;
1236 int posix_lock_type;
1238 rc = cifs_posix_lock_set(file, flock);
1242 if (type & LOCKING_ANDX_SHARED_LOCK)
1243 posix_lock_type = CIFS_RDLCK;
1245 posix_lock_type = CIFS_WRLCK;
1248 posix_lock_type = CIFS_UNLCK;
1250 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1251 0 /* set */, length, flock,
1252 posix_lock_type, wait_flag);
1257 rc = cifs_lock_add_if(cinode, flock->fl_start, length,
1258 type, netfid, wait_flag);
1264 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1265 flock->fl_start, 0, 1, type, wait_flag, 0);
1267 /* For Windows locks we must store them. */
1268 rc = cifs_lock_add(cinode, length, flock->fl_start,
1272 rc = cifs_unlock_range(cfile, flock, xid);
1275 if (flock->fl_flags & FL_POSIX)
1276 posix_lock_file_wait(file, flock);
1280 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1283 int lock = 0, unlock = 0;
1284 bool wait_flag = false;
1285 bool posix_lck = false;
1286 struct cifs_sb_info *cifs_sb;
1287 struct cifs_tcon *tcon;
1288 struct cifsInodeInfo *cinode;
1289 struct cifsFileInfo *cfile;
1296 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1297 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1298 flock->fl_start, flock->fl_end);
1300 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1302 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1303 cfile = (struct cifsFileInfo *)file->private_data;
1304 tcon = tlink_tcon(cfile->tlink);
1305 netfid = cfile->netfid;
1306 cinode = CIFS_I(file->f_path.dentry->d_inode);
1308 if ((tcon->ses->capabilities & CAP_UNIX) &&
1309 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1310 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1313 * BB add code here to normalize offset and length to account for
1314 * negative length which we can not accept over the wire.
1316 if (IS_GETLK(cmd)) {
1317 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1322 if (!lock && !unlock) {
1324 * if no lock or unlock then nothing to do since we do not
1331 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1337 /* update the file size (if needed) after a write */
1339 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1340 unsigned int bytes_written)
1342 loff_t end_of_write = offset + bytes_written;
1344 if (end_of_write > cifsi->server_eof)
1345 cifsi->server_eof = end_of_write;
1348 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1349 const char *write_data, size_t write_size,
1353 unsigned int bytes_written = 0;
1354 unsigned int total_written;
1355 struct cifs_sb_info *cifs_sb;
1356 struct cifs_tcon *pTcon;
1358 struct dentry *dentry = open_file->dentry;
1359 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1360 struct cifs_io_parms io_parms;
1362 cifs_sb = CIFS_SB(dentry->d_sb);
1364 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1365 *poffset, dentry->d_name.name);
1367 pTcon = tlink_tcon(open_file->tlink);
1371 for (total_written = 0; write_size > total_written;
1372 total_written += bytes_written) {
1374 while (rc == -EAGAIN) {
1378 if (open_file->invalidHandle) {
1379 /* we could deadlock if we called
1380 filemap_fdatawait from here so tell
1381 reopen_file not to flush data to
1383 rc = cifs_reopen_file(open_file, false);
1388 len = min((size_t)cifs_sb->wsize,
1389 write_size - total_written);
1390 /* iov[0] is reserved for smb header */
1391 iov[1].iov_base = (char *)write_data + total_written;
1392 iov[1].iov_len = len;
1393 io_parms.netfid = open_file->netfid;
1395 io_parms.tcon = pTcon;
1396 io_parms.offset = *poffset;
1397 io_parms.length = len;
1398 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1401 if (rc || (bytes_written == 0)) {
1409 cifs_update_eof(cifsi, *poffset, bytes_written);
1410 *poffset += bytes_written;
1414 cifs_stats_bytes_written(pTcon, total_written);
1416 if (total_written > 0) {
1417 spin_lock(&dentry->d_inode->i_lock);
1418 if (*poffset > dentry->d_inode->i_size)
1419 i_size_write(dentry->d_inode, *poffset);
1420 spin_unlock(&dentry->d_inode->i_lock);
1422 mark_inode_dirty_sync(dentry->d_inode);
1424 return total_written;
1427 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1430 struct cifsFileInfo *open_file = NULL;
1431 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1433 /* only filter by fsuid on multiuser mounts */
1434 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1437 spin_lock(&cifs_file_list_lock);
1438 /* we could simply get the first_list_entry since write-only entries
1439 are always at the end of the list but since the first entry might
1440 have a close pending, we go through the whole list */
1441 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1442 if (fsuid_only && open_file->uid != current_fsuid())
1444 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1445 if (!open_file->invalidHandle) {
1446 /* found a good file */
1447 /* lock it so it will not be closed on us */
1448 cifsFileInfo_get(open_file);
1449 spin_unlock(&cifs_file_list_lock);
1451 } /* else might as well continue, and look for
1452 another, or simply have the caller reopen it
1453 again rather than trying to fix this handle */
1454 } else /* write only file */
1455 break; /* write only files are last so must be done */
1457 spin_unlock(&cifs_file_list_lock);
1461 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1464 struct cifsFileInfo *open_file;
1465 struct cifs_sb_info *cifs_sb;
1466 bool any_available = false;
1469 /* Having a null inode here (because mapping->host was set to zero by
1470 the VFS or MM) should not happen but we had reports of on oops (due to
1471 it being zero) during stress testcases so we need to check for it */
1473 if (cifs_inode == NULL) {
1474 cERROR(1, "Null inode passed to cifs_writeable_file");
1479 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1481 /* only filter by fsuid on multiuser mounts */
1482 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1485 spin_lock(&cifs_file_list_lock);
1487 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1488 if (!any_available && open_file->pid != current->tgid)
1490 if (fsuid_only && open_file->uid != current_fsuid())
1492 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1493 cifsFileInfo_get(open_file);
1495 if (!open_file->invalidHandle) {
1496 /* found a good writable file */
1497 spin_unlock(&cifs_file_list_lock);
1501 spin_unlock(&cifs_file_list_lock);
1503 /* Had to unlock since following call can block */
1504 rc = cifs_reopen_file(open_file, false);
1508 /* if it fails, try another handle if possible */
1509 cFYI(1, "wp failed on reopen file");
1510 cifsFileInfo_put(open_file);
1512 spin_lock(&cifs_file_list_lock);
1514 /* else we simply continue to the next entry. Thus
1515 we do not loop on reopen errors. If we
1516 can not reopen the file, for example if we
1517 reconnected to a server with another client
1518 racing to delete or lock the file we would not
1519 make progress if we restarted before the beginning
1520 of the loop here. */
1523 /* couldn't find useable FH with same pid, try any available */
1524 if (!any_available) {
1525 any_available = true;
1526 goto refind_writable;
1528 spin_unlock(&cifs_file_list_lock);
1532 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1534 struct address_space *mapping = page->mapping;
1535 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1538 int bytes_written = 0;
1539 struct inode *inode;
1540 struct cifsFileInfo *open_file;
1542 if (!mapping || !mapping->host)
1545 inode = page->mapping->host;
1547 offset += (loff_t)from;
1548 write_data = kmap(page);
1551 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1556 /* racing with truncate? */
1557 if (offset > mapping->host->i_size) {
1559 return 0; /* don't care */
1562 /* check to make sure that we are not extending the file */
1563 if (mapping->host->i_size - offset < (loff_t)to)
1564 to = (unsigned)(mapping->host->i_size - offset);
1566 open_file = find_writable_file(CIFS_I(mapping->host), false);
1568 bytes_written = cifs_write(open_file, open_file->pid,
1569 write_data, to - from, &offset);
1570 cifsFileInfo_put(open_file);
1571 /* Does mm or vfs already set times? */
1572 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1573 if ((bytes_written > 0) && (offset))
1575 else if (bytes_written < 0)
1578 cFYI(1, "No writeable filehandles for inode");
1586 static int cifs_writepages(struct address_space *mapping,
1587 struct writeback_control *wbc)
1589 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1590 bool done = false, scanned = false, range_whole = false;
1592 struct cifs_writedata *wdata;
1597 * If wsize is smaller than the page cache size, default to writing
1598 * one page at a time via cifs_writepage
1600 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1601 return generic_writepages(mapping, wbc);
1603 if (wbc->range_cyclic) {
1604 index = mapping->writeback_index; /* Start from prev offset */
1607 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1608 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1609 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1614 while (!done && index <= end) {
1615 unsigned int i, nr_pages, found_pages;
1616 pgoff_t next = 0, tofind;
1617 struct page **pages;
1619 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1622 wdata = cifs_writedata_alloc((unsigned int)tofind);
1629 * find_get_pages_tag seems to return a max of 256 on each
1630 * iteration, so we must call it several times in order to
1631 * fill the array or the wsize is effectively limited to
1632 * 256 * PAGE_CACHE_SIZE.
1635 pages = wdata->pages;
1637 nr_pages = find_get_pages_tag(mapping, &index,
1638 PAGECACHE_TAG_DIRTY,
1640 found_pages += nr_pages;
1643 } while (nr_pages && tofind && index <= end);
1645 if (found_pages == 0) {
1646 kref_put(&wdata->refcount, cifs_writedata_release);
1651 for (i = 0; i < found_pages; i++) {
1652 page = wdata->pages[i];
1654 * At this point we hold neither mapping->tree_lock nor
1655 * lock on the page itself: the page may be truncated or
1656 * invalidated (changing page->mapping to NULL), or even
1657 * swizzled back from swapper_space to tmpfs file
1663 else if (!trylock_page(page))
1666 if (unlikely(page->mapping != mapping)) {
1671 if (!wbc->range_cyclic && page->index > end) {
1677 if (next && (page->index != next)) {
1678 /* Not next consecutive page */
1683 if (wbc->sync_mode != WB_SYNC_NONE)
1684 wait_on_page_writeback(page);
1686 if (PageWriteback(page) ||
1687 !clear_page_dirty_for_io(page)) {
1693 * This actually clears the dirty bit in the radix tree.
1694 * See cifs_writepage() for more commentary.
1696 set_page_writeback(page);
1698 if (page_offset(page) >= mapping->host->i_size) {
1701 end_page_writeback(page);
1705 wdata->pages[i] = page;
1706 next = page->index + 1;
1710 /* reset index to refind any pages skipped */
1712 index = wdata->pages[0]->index + 1;
1714 /* put any pages we aren't going to use */
1715 for (i = nr_pages; i < found_pages; i++) {
1716 page_cache_release(wdata->pages[i]);
1717 wdata->pages[i] = NULL;
1720 /* nothing to write? */
1721 if (nr_pages == 0) {
1722 kref_put(&wdata->refcount, cifs_writedata_release);
1726 wdata->sync_mode = wbc->sync_mode;
1727 wdata->nr_pages = nr_pages;
1728 wdata->offset = page_offset(wdata->pages[0]);
1731 if (wdata->cfile != NULL)
1732 cifsFileInfo_put(wdata->cfile);
1733 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1735 if (!wdata->cfile) {
1736 cERROR(1, "No writable handles for inode");
1740 rc = cifs_async_writev(wdata);
1741 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1743 for (i = 0; i < nr_pages; ++i)
1744 unlock_page(wdata->pages[i]);
1746 /* send failure -- clean up the mess */
1748 for (i = 0; i < nr_pages; ++i) {
1750 redirty_page_for_writepage(wbc,
1753 SetPageError(wdata->pages[i]);
1754 end_page_writeback(wdata->pages[i]);
1755 page_cache_release(wdata->pages[i]);
1758 mapping_set_error(mapping, rc);
1760 kref_put(&wdata->refcount, cifs_writedata_release);
1762 wbc->nr_to_write -= nr_pages;
1763 if (wbc->nr_to_write <= 0)
1769 if (!scanned && !done) {
1771 * We hit the last page and there is more work to be done: wrap
1772 * back to the start of the file
1779 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1780 mapping->writeback_index = index;
1786 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1792 /* BB add check for wbc flags */
1793 page_cache_get(page);
1794 if (!PageUptodate(page))
1795 cFYI(1, "ppw - page not up to date");
1798 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1800 * A writepage() implementation always needs to do either this,
1801 * or re-dirty the page with "redirty_page_for_writepage()" in
1802 * the case of a failure.
1804 * Just unlocking the page will cause the radix tree tag-bits
1805 * to fail to update with the state of the page correctly.
1807 set_page_writeback(page);
1809 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1810 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1812 else if (rc == -EAGAIN)
1813 redirty_page_for_writepage(wbc, page);
1817 SetPageUptodate(page);
1818 end_page_writeback(page);
1819 page_cache_release(page);
1824 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1826 int rc = cifs_writepage_locked(page, wbc);
1831 static int cifs_write_end(struct file *file, struct address_space *mapping,
1832 loff_t pos, unsigned len, unsigned copied,
1833 struct page *page, void *fsdata)
1836 struct inode *inode = mapping->host;
1837 struct cifsFileInfo *cfile = file->private_data;
1838 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1841 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1844 pid = current->tgid;
1846 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1849 if (PageChecked(page)) {
1851 SetPageUptodate(page);
1852 ClearPageChecked(page);
1853 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1854 SetPageUptodate(page);
1856 if (!PageUptodate(page)) {
1858 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1862 /* this is probably better than directly calling
1863 partialpage_write since in this function the file handle is
1864 known which we might as well leverage */
1865 /* BB check if anything else missing out of ppw
1866 such as updating last write time */
1867 page_data = kmap(page);
1868 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1869 /* if (rc < 0) should we set writebehind rc? */
1876 set_page_dirty(page);
1880 spin_lock(&inode->i_lock);
1881 if (pos > inode->i_size)
1882 i_size_write(inode, pos);
1883 spin_unlock(&inode->i_lock);
1887 page_cache_release(page);
1892 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1897 struct cifs_tcon *tcon;
1898 struct cifsFileInfo *smbfile = file->private_data;
1899 struct inode *inode = file->f_path.dentry->d_inode;
1900 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1902 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1905 mutex_lock(&inode->i_mutex);
1909 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1910 file->f_path.dentry->d_name.name, datasync);
1912 if (!CIFS_I(inode)->clientCanCacheRead) {
1913 rc = cifs_invalidate_mapping(inode);
1915 cFYI(1, "rc: %d during invalidate phase", rc);
1916 rc = 0; /* don't care about it in fsync */
1920 tcon = tlink_tcon(smbfile->tlink);
1921 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1922 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1925 mutex_unlock(&inode->i_mutex);
1929 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1933 struct cifs_tcon *tcon;
1934 struct cifsFileInfo *smbfile = file->private_data;
1935 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1936 struct inode *inode = file->f_mapping->host;
1938 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1941 mutex_lock(&inode->i_mutex);
1945 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1946 file->f_path.dentry->d_name.name, datasync);
1948 tcon = tlink_tcon(smbfile->tlink);
1949 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1950 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1953 mutex_unlock(&inode->i_mutex);
1958 * As file closes, flush all cached write data for this inode checking
1959 * for write behind errors.
1961 int cifs_flush(struct file *file, fl_owner_t id)
1963 struct inode *inode = file->f_path.dentry->d_inode;
1966 if (file->f_mode & FMODE_WRITE)
1967 rc = filemap_write_and_wait(inode->i_mapping);
1969 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1975 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1980 for (i = 0; i < num_pages; i++) {
1981 pages[i] = alloc_page(__GFP_HIGHMEM);
1984 * save number of pages we have already allocated and
1985 * return with ENOMEM error
1996 for (i = 0; i < num_pages; i++)
2002 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2007 clen = min_t(const size_t, len, wsize);
2008 num_pages = clen / PAGE_CACHE_SIZE;
2009 if (clen % PAGE_CACHE_SIZE)
2019 cifs_iovec_write(struct file *file, const struct iovec *iov,
2020 unsigned long nr_segs, loff_t *poffset)
2022 unsigned int written;
2023 unsigned long num_pages, npages, i;
2024 size_t copied, len, cur_len;
2025 ssize_t total_written = 0;
2026 struct kvec *to_send;
2027 struct page **pages;
2029 struct inode *inode;
2030 struct cifsFileInfo *open_file;
2031 struct cifs_tcon *pTcon;
2032 struct cifs_sb_info *cifs_sb;
2033 struct cifs_io_parms io_parms;
2037 len = iov_length(iov, nr_segs);
2041 rc = generic_write_checks(file, poffset, &len, 0);
2045 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2046 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2048 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
2052 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
2058 rc = cifs_write_allocate_pages(pages, num_pages);
2066 open_file = file->private_data;
2068 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2069 pid = open_file->pid;
2071 pid = current->tgid;
2073 pTcon = tlink_tcon(open_file->tlink);
2074 inode = file->f_path.dentry->d_inode;
2076 iov_iter_init(&it, iov, nr_segs, len, 0);
2080 size_t save_len = cur_len;
2081 for (i = 0; i < npages; i++) {
2082 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
2083 copied = iov_iter_copy_from_user(pages[i], &it, 0,
2086 iov_iter_advance(&it, copied);
2087 to_send[i+1].iov_base = kmap(pages[i]);
2088 to_send[i+1].iov_len = copied;
2091 cur_len = save_len - cur_len;
2094 if (open_file->invalidHandle) {
2095 rc = cifs_reopen_file(open_file, false);
2099 io_parms.netfid = open_file->netfid;
2101 io_parms.tcon = pTcon;
2102 io_parms.offset = *poffset;
2103 io_parms.length = cur_len;
2104 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
2106 } while (rc == -EAGAIN);
2108 for (i = 0; i < npages; i++)
2113 total_written += written;
2114 cifs_update_eof(CIFS_I(inode), *poffset, written);
2115 *poffset += written;
2116 } else if (rc < 0) {
2122 /* get length and number of kvecs of the next write */
2123 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
2126 if (total_written > 0) {
2127 spin_lock(&inode->i_lock);
2128 if (*poffset > inode->i_size)
2129 i_size_write(inode, *poffset);
2130 spin_unlock(&inode->i_lock);
2133 cifs_stats_bytes_written(pTcon, total_written);
2134 mark_inode_dirty_sync(inode);
2136 for (i = 0; i < num_pages; i++)
2141 return total_written;
2144 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2145 unsigned long nr_segs, loff_t pos)
2148 struct inode *inode;
2150 inode = iocb->ki_filp->f_path.dentry->d_inode;
2153 * BB - optimize the way when signing is disabled. We can drop this
2154 * extra memory-to-memory copying and use iovec buffers for constructing
2158 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2160 CIFS_I(inode)->invalid_mapping = true;
2167 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2168 unsigned long nr_segs, loff_t pos)
2170 struct inode *inode;
2172 inode = iocb->ki_filp->f_path.dentry->d_inode;
2174 if (CIFS_I(inode)->clientCanCacheAll)
2175 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2178 * In strict cache mode we need to write the data to the server exactly
2179 * from the pos to pos+len-1 rather than flush all affected pages
2180 * because it may cause a error with mandatory locks on these pages but
2181 * not on the region from pos to ppos+len-1.
2184 return cifs_user_writev(iocb, iov, nr_segs, pos);
2188 cifs_iovec_read(struct file *file, const struct iovec *iov,
2189 unsigned long nr_segs, loff_t *poffset)
2194 unsigned int bytes_read = 0;
2195 size_t len, cur_len;
2197 struct cifs_sb_info *cifs_sb;
2198 struct cifs_tcon *pTcon;
2199 struct cifsFileInfo *open_file;
2200 struct smb_com_read_rsp *pSMBr;
2201 struct cifs_io_parms io_parms;
2209 len = iov_length(iov, nr_segs);
2214 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2216 /* FIXME: set up handlers for larger reads and/or convert to async */
2217 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2219 open_file = file->private_data;
2220 pTcon = tlink_tcon(open_file->tlink);
2222 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2223 pid = open_file->pid;
2225 pid = current->tgid;
2227 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2228 cFYI(1, "attempting read on write only file instance");
2230 for (total_read = 0; total_read < len; total_read += bytes_read) {
2231 cur_len = min_t(const size_t, len - total_read, rsize);
2235 while (rc == -EAGAIN) {
2236 int buf_type = CIFS_NO_BUFFER;
2237 if (open_file->invalidHandle) {
2238 rc = cifs_reopen_file(open_file, true);
2242 io_parms.netfid = open_file->netfid;
2244 io_parms.tcon = pTcon;
2245 io_parms.offset = *poffset;
2246 io_parms.length = cur_len;
2247 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2248 &read_data, &buf_type);
2249 pSMBr = (struct smb_com_read_rsp *)read_data;
2251 char *data_offset = read_data + 4 +
2252 le16_to_cpu(pSMBr->DataOffset);
2253 if (memcpy_toiovecend(iov, data_offset,
2254 iov_offset, bytes_read))
2256 if (buf_type == CIFS_SMALL_BUFFER)
2257 cifs_small_buf_release(read_data);
2258 else if (buf_type == CIFS_LARGE_BUFFER)
2259 cifs_buf_release(read_data);
2261 iov_offset += bytes_read;
2265 if (rc || (bytes_read == 0)) {
2273 cifs_stats_bytes_read(pTcon, bytes_read);
2274 *poffset += bytes_read;
2282 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2283 unsigned long nr_segs, loff_t pos)
2287 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2294 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2295 unsigned long nr_segs, loff_t pos)
2297 struct inode *inode;
2299 inode = iocb->ki_filp->f_path.dentry->d_inode;
2301 if (CIFS_I(inode)->clientCanCacheRead)
2302 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2305 * In strict cache mode we need to read from the server all the time
2306 * if we don't have level II oplock because the server can delay mtime
2307 * change - so we can't make a decision about inode invalidating.
2308 * And we can also fail with pagereading if there are mandatory locks
2309 * on pages affected by this read but not on the region from pos to
2313 return cifs_user_readv(iocb, iov, nr_segs, pos);
2316 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
2320 unsigned int bytes_read = 0;
2321 unsigned int total_read;
2322 unsigned int current_read_size;
2324 struct cifs_sb_info *cifs_sb;
2325 struct cifs_tcon *pTcon;
2327 char *current_offset;
2328 struct cifsFileInfo *open_file;
2329 struct cifs_io_parms io_parms;
2330 int buf_type = CIFS_NO_BUFFER;
2334 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2336 /* FIXME: set up handlers for larger reads and/or convert to async */
2337 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2339 if (file->private_data == NULL) {
2344 open_file = file->private_data;
2345 pTcon = tlink_tcon(open_file->tlink);
2347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2348 pid = open_file->pid;
2350 pid = current->tgid;
2352 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2353 cFYI(1, "attempting read on write only file instance");
2355 for (total_read = 0, current_offset = read_data;
2356 read_size > total_read;
2357 total_read += bytes_read, current_offset += bytes_read) {
2358 current_read_size = min_t(uint, read_size - total_read, rsize);
2360 /* For windows me and 9x we do not want to request more
2361 than it negotiated since it will refuse the read then */
2363 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
2364 current_read_size = min_t(uint, current_read_size,
2368 while (rc == -EAGAIN) {
2369 if (open_file->invalidHandle) {
2370 rc = cifs_reopen_file(open_file, true);
2374 io_parms.netfid = open_file->netfid;
2376 io_parms.tcon = pTcon;
2377 io_parms.offset = *poffset;
2378 io_parms.length = current_read_size;
2379 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2380 ¤t_offset, &buf_type);
2382 if (rc || (bytes_read == 0)) {
2390 cifs_stats_bytes_read(pTcon, total_read);
2391 *poffset += bytes_read;
2399 * If the page is mmap'ed into a process' page tables, then we need to make
2400 * sure that it doesn't change while being written back.
2403 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2405 struct page *page = vmf->page;
2408 return VM_FAULT_LOCKED;
2411 static struct vm_operations_struct cifs_file_vm_ops = {
2412 .fault = filemap_fault,
2413 .page_mkwrite = cifs_page_mkwrite,
2416 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2419 struct inode *inode = file->f_path.dentry->d_inode;
2423 if (!CIFS_I(inode)->clientCanCacheRead) {
2424 rc = cifs_invalidate_mapping(inode);
2429 rc = generic_file_mmap(file, vma);
2431 vma->vm_ops = &cifs_file_vm_ops;
2436 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2441 rc = cifs_revalidate_file(file);
2443 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2447 rc = generic_file_mmap(file, vma);
2449 vma->vm_ops = &cifs_file_vm_ops;
2454 static int cifs_readpages(struct file *file, struct address_space *mapping,
2455 struct list_head *page_list, unsigned num_pages)
2458 struct list_head tmplist;
2459 struct cifsFileInfo *open_file = file->private_data;
2460 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2461 unsigned int rsize = cifs_sb->rsize;
2465 * Give up immediately if rsize is too small to read an entire page.
2466 * The VFS will fall back to readpage. We should never reach this
2467 * point however since we set ra_pages to 0 when the rsize is smaller
2468 * than a cache page.
2470 if (unlikely(rsize < PAGE_CACHE_SIZE))
2474 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2475 * immediately if the cookie is negative
2477 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2483 pid = open_file->pid;
2485 pid = current->tgid;
2488 INIT_LIST_HEAD(&tmplist);
2490 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2491 mapping, num_pages);
2494 * Start with the page at end of list and move it to private
2495 * list. Do the same with any following pages until we hit
2496 * the rsize limit, hit an index discontinuity, or run out of
2497 * pages. Issue the async read and then start the loop again
2498 * until the list is empty.
2500 * Note that list order is important. The page_list is in
2501 * the order of declining indexes. When we put the pages in
2502 * the rdata->pages, then we want them in increasing order.
2504 while (!list_empty(page_list)) {
2505 unsigned int bytes = PAGE_CACHE_SIZE;
2506 unsigned int expected_index;
2507 unsigned int nr_pages = 1;
2509 struct page *page, *tpage;
2510 struct cifs_readdata *rdata;
2512 page = list_entry(page_list->prev, struct page, lru);
2515 * Lock the page and put it in the cache. Since no one else
2516 * should have access to this page, we're safe to simply set
2517 * PG_locked without checking it first.
2519 __set_page_locked(page);
2520 rc = add_to_page_cache_locked(page, mapping,
2521 page->index, GFP_KERNEL);
2523 /* give up if we can't stick it in the cache */
2525 __clear_page_locked(page);
2529 /* move first page to the tmplist */
2530 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2531 list_move_tail(&page->lru, &tmplist);
2533 /* now try and add more pages onto the request */
2534 expected_index = page->index + 1;
2535 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2536 /* discontinuity ? */
2537 if (page->index != expected_index)
2540 /* would this page push the read over the rsize? */
2541 if (bytes + PAGE_CACHE_SIZE > rsize)
2544 __set_page_locked(page);
2545 if (add_to_page_cache_locked(page, mapping,
2546 page->index, GFP_KERNEL)) {
2547 __clear_page_locked(page);
2550 list_move_tail(&page->lru, &tmplist);
2551 bytes += PAGE_CACHE_SIZE;
2556 rdata = cifs_readdata_alloc(nr_pages);
2558 /* best to give up if we're out of mem */
2559 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2560 list_del(&page->lru);
2561 lru_cache_add_file(page);
2563 page_cache_release(page);
2569 spin_lock(&cifs_file_list_lock);
2570 cifsFileInfo_get(open_file);
2571 spin_unlock(&cifs_file_list_lock);
2572 rdata->cfile = open_file;
2573 rdata->mapping = mapping;
2574 rdata->offset = offset;
2575 rdata->bytes = bytes;
2577 list_splice_init(&tmplist, &rdata->pages);
2580 if (open_file->invalidHandle) {
2581 rc = cifs_reopen_file(open_file, true);
2585 rc = cifs_async_readv(rdata);
2586 } while (rc == -EAGAIN);
2589 list_for_each_entry_safe(page, tpage, &rdata->pages,
2591 list_del(&page->lru);
2592 lru_cache_add_file(page);
2594 page_cache_release(page);
2596 cifs_readdata_free(rdata);
2604 static int cifs_readpage_worker(struct file *file, struct page *page,
2610 /* Is the page cached? */
2611 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2615 page_cache_get(page);
2616 read_data = kmap(page);
2617 /* for reads over a certain size could initiate async read ahead */
2619 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2624 cFYI(1, "Bytes read %d", rc);
2626 file->f_path.dentry->d_inode->i_atime =
2627 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2629 if (PAGE_CACHE_SIZE > rc)
2630 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2632 flush_dcache_page(page);
2633 SetPageUptodate(page);
2635 /* send this page to the cache */
2636 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2642 page_cache_release(page);
2648 static int cifs_readpage(struct file *file, struct page *page)
2650 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2656 if (file->private_data == NULL) {
2662 cFYI(1, "readpage %p at offset %d 0x%x\n",
2663 page, (int)offset, (int)offset);
2665 rc = cifs_readpage_worker(file, page, &offset);
2673 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2675 struct cifsFileInfo *open_file;
2677 spin_lock(&cifs_file_list_lock);
2678 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2679 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2680 spin_unlock(&cifs_file_list_lock);
2684 spin_unlock(&cifs_file_list_lock);
2688 /* We do not want to update the file size from server for inodes
2689 open for write - to avoid races with writepage extending
2690 the file - in the future we could consider allowing
2691 refreshing the inode only on increases in the file size
2692 but this is tricky to do without racing with writebehind
2693 page caching in the current Linux kernel design */
2694 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2699 if (is_inode_writable(cifsInode)) {
2700 /* This inode is open for write at least once */
2701 struct cifs_sb_info *cifs_sb;
2703 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2704 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2705 /* since no page cache to corrupt on directio
2706 we can change size safely */
2710 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2718 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2719 loff_t pos, unsigned len, unsigned flags,
2720 struct page **pagep, void **fsdata)
2722 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2723 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2724 loff_t page_start = pos & PAGE_MASK;
2729 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2731 page = grab_cache_page_write_begin(mapping, index, flags);
2737 if (PageUptodate(page))
2741 * If we write a full page it will be up to date, no need to read from
2742 * the server. If the write is short, we'll end up doing a sync write
2745 if (len == PAGE_CACHE_SIZE)
2749 * optimize away the read when we have an oplock, and we're not
2750 * expecting to use any of the data we'd be reading in. That
2751 * is, when the page lies beyond the EOF, or straddles the EOF
2752 * and the write will cover all of the existing data.
2754 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2755 i_size = i_size_read(mapping->host);
2756 if (page_start >= i_size ||
2757 (offset == 0 && (pos + len) >= i_size)) {
2758 zero_user_segments(page, 0, offset,
2762 * PageChecked means that the parts of the page
2763 * to which we're not writing are considered up
2764 * to date. Once the data is copied to the
2765 * page, it can be set uptodate.
2767 SetPageChecked(page);
2772 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2774 * might as well read a page, it is fast enough. If we get
2775 * an error, we don't need to return it. cifs_write_end will
2776 * do a sync write instead since PG_uptodate isn't set.
2778 cifs_readpage_worker(file, page, &page_start);
2780 /* we could try using another file handle if there is one -
2781 but how would we lock it to prevent close of that handle
2782 racing with this read? In any case
2783 this will be written out by write_end so is fine */
2790 static int cifs_release_page(struct page *page, gfp_t gfp)
2792 if (PagePrivate(page))
2795 return cifs_fscache_release_page(page, gfp);
2798 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2800 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2803 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2806 static int cifs_launder_page(struct page *page)
2809 loff_t range_start = page_offset(page);
2810 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2811 struct writeback_control wbc = {
2812 .sync_mode = WB_SYNC_ALL,
2814 .range_start = range_start,
2815 .range_end = range_end,
2818 cFYI(1, "Launder page: %p", page);
2820 if (clear_page_dirty_for_io(page))
2821 rc = cifs_writepage_locked(page, &wbc);
2823 cifs_fscache_invalidate_page(page, page->mapping->host);
2827 void cifs_oplock_break(struct work_struct *work)
2829 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2831 struct inode *inode = cfile->dentry->d_inode;
2832 struct cifsInodeInfo *cinode = CIFS_I(inode);
2835 if (inode && S_ISREG(inode->i_mode)) {
2836 if (cinode->clientCanCacheRead)
2837 break_lease(inode, O_RDONLY);
2839 break_lease(inode, O_WRONLY);
2840 rc = filemap_fdatawrite(inode->i_mapping);
2841 if (cinode->clientCanCacheRead == 0) {
2842 rc = filemap_fdatawait(inode->i_mapping);
2843 mapping_set_error(inode->i_mapping, rc);
2844 invalidate_remote_inode(inode);
2846 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2849 rc = cifs_push_locks(cfile);
2851 cERROR(1, "Push locks rc = %d", rc);
2854 * releasing stale oplock after recent reconnect of smb session using
2855 * a now incorrect file handle is not a data integrity issue but do
2856 * not bother sending an oplock release if session to server still is
2857 * disconnected since oplock already released by the server
2859 if (!cfile->oplock_break_cancelled) {
2860 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2861 current->tgid, 0, 0, 0, 0,
2862 LOCKING_ANDX_OPLOCK_RELEASE, false,
2863 cinode->clientCanCacheRead ? 1 : 0);
2864 cFYI(1, "Oplock release rc = %d", rc);
2868 const struct address_space_operations cifs_addr_ops = {
2869 .readpage = cifs_readpage,
2870 .readpages = cifs_readpages,
2871 .writepage = cifs_writepage,
2872 .writepages = cifs_writepages,
2873 .write_begin = cifs_write_begin,
2874 .write_end = cifs_write_end,
2875 .set_page_dirty = __set_page_dirty_nobuffers,
2876 .releasepage = cifs_release_page,
2877 .invalidatepage = cifs_invalidate_page,
2878 .launder_page = cifs_launder_page,
2882 * cifs_readpages requires the server to support a buffer large enough to
2883 * contain the header plus one complete page of data. Otherwise, we need
2884 * to leave cifs_readpages out of the address space operations.
2886 const struct address_space_operations cifs_addr_ops_smallbuf = {
2887 .readpage = cifs_readpage,
2888 .writepage = cifs_writepage,
2889 .writepages = cifs_writepages,
2890 .write_begin = cifs_write_begin,
2891 .write_end = cifs_write_end,
2892 .set_page_dirty = __set_page_dirty_nobuffers,
2893 .releasepage = cifs_release_page,
2894 .invalidatepage = cifs_invalidate_page,
2895 .launder_page = cifs_launder_page,