]> git.karo-electronics.de Git - mv-sheeva.git/blob - fs/cifs/file.c
[CIFS] Cleanup unused variable build warning
[mv-sheeva.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
44
45 static inline int cifs_convert_flags(unsigned int flags)
46 {
47         if ((flags & O_ACCMODE) == O_RDONLY)
48                 return GENERIC_READ;
49         else if ((flags & O_ACCMODE) == O_WRONLY)
50                 return GENERIC_WRITE;
51         else if ((flags & O_ACCMODE) == O_RDWR) {
52                 /* GENERIC_ALL is too much permission to request
53                    can cause unnecessary access denied on create */
54                 /* return GENERIC_ALL; */
55                 return (GENERIC_READ | GENERIC_WRITE);
56         }
57
58         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60                 FILE_READ_DATA);
61 }
62
63 static u32 cifs_posix_convert_flags(unsigned int flags)
64 {
65         u32 posix_flags = 0;
66
67         if ((flags & O_ACCMODE) == O_RDONLY)
68                 posix_flags = SMB_O_RDONLY;
69         else if ((flags & O_ACCMODE) == O_WRONLY)
70                 posix_flags = SMB_O_WRONLY;
71         else if ((flags & O_ACCMODE) == O_RDWR)
72                 posix_flags = SMB_O_RDWR;
73
74         if (flags & O_CREAT)
75                 posix_flags |= SMB_O_CREAT;
76         if (flags & O_EXCL)
77                 posix_flags |= SMB_O_EXCL;
78         if (flags & O_TRUNC)
79                 posix_flags |= SMB_O_TRUNC;
80         /* be safe and imply O_SYNC for O_DSYNC */
81         if (flags & O_DSYNC)
82                 posix_flags |= SMB_O_SYNC;
83         if (flags & O_DIRECTORY)
84                 posix_flags |= SMB_O_DIRECTORY;
85         if (flags & O_NOFOLLOW)
86                 posix_flags |= SMB_O_NOFOLLOW;
87         if (flags & O_DIRECT)
88                 posix_flags |= SMB_O_DIRECT;
89
90         return posix_flags;
91 }
92
93 static inline int cifs_get_disposition(unsigned int flags)
94 {
95         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96                 return FILE_CREATE;
97         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98                 return FILE_OVERWRITE_IF;
99         else if ((flags & O_CREAT) == O_CREAT)
100                 return FILE_OPEN_IF;
101         else if ((flags & O_TRUNC) == O_TRUNC)
102                 return FILE_OVERWRITE;
103         else
104                 return FILE_OPEN;
105 }
106
107 static inline int cifs_open_inode_helper(struct inode *inode,
108         struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
109         char *full_path, int xid)
110 {
111         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
112         struct timespec temp;
113         int rc;
114
115         if (pCifsInode->clientCanCacheRead) {
116                 /* we have the inode open somewhere else
117                    no need to discard cache data */
118                 goto client_can_cache;
119         }
120
121         /* BB need same check in cifs_create too? */
122         /* if not oplocked, invalidate inode pages if mtime or file
123            size changed */
124         temp = cifs_NTtimeToUnix(buf->LastWriteTime);
125         if (timespec_equal(&inode->i_mtime, &temp) &&
126                            (inode->i_size ==
127                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
128                 cFYI(1, "inode unchanged on server");
129         } else {
130                 if (inode->i_mapping) {
131                         /* BB no need to lock inode until after invalidate
132                         since namei code should already have it locked? */
133                         rc = filemap_write_and_wait(inode->i_mapping);
134                         mapping_set_error(inode->i_mapping, rc);
135                 }
136                 cFYI(1, "invalidating remote inode since open detected it "
137                          "changed");
138                 invalidate_remote_inode(inode);
139         }
140
141 client_can_cache:
142         if (pTcon->unix_ext)
143                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
144                                               xid);
145         else
146                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
147                                          xid, NULL);
148
149         cifs_set_oplock_level(inode, oplock);
150
151         return rc;
152 }
153
154 int cifs_posix_open(char *full_path, struct inode **pinode,
155                         struct super_block *sb, int mode, unsigned int f_flags,
156                         __u32 *poplock, __u16 *pnetfid, int xid)
157 {
158         int rc;
159         FILE_UNIX_BASIC_INFO *presp_data;
160         __u32 posix_flags = 0;
161         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
162         struct cifs_fattr fattr;
163         struct tcon_link *tlink;
164         struct cifsTconInfo *tcon;
165
166         cFYI(1, "posix open %s", full_path);
167
168         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
169         if (presp_data == NULL)
170                 return -ENOMEM;
171
172         tlink = cifs_sb_tlink(cifs_sb);
173         if (IS_ERR(tlink)) {
174                 rc = PTR_ERR(tlink);
175                 goto posix_open_ret;
176         }
177
178         tcon = tlink_tcon(tlink);
179         mode &= ~current_umask();
180
181         posix_flags = cifs_posix_convert_flags(f_flags);
182         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
183                              poplock, full_path, cifs_sb->local_nls,
184                              cifs_sb->mnt_cifs_flags &
185                                         CIFS_MOUNT_MAP_SPECIAL_CHR);
186         cifs_put_tlink(tlink);
187
188         if (rc)
189                 goto posix_open_ret;
190
191         if (presp_data->Type == cpu_to_le32(-1))
192                 goto posix_open_ret; /* open ok, caller does qpathinfo */
193
194         if (!pinode)
195                 goto posix_open_ret; /* caller does not need info */
196
197         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
198
199         /* get new inode and set it up */
200         if (*pinode == NULL) {
201                 cifs_fill_uniqueid(sb, &fattr);
202                 *pinode = cifs_iget(sb, &fattr);
203                 if (!*pinode) {
204                         rc = -ENOMEM;
205                         goto posix_open_ret;
206                 }
207         } else {
208                 cifs_fattr_to_inode(*pinode, &fattr);
209         }
210
211 posix_open_ret:
212         kfree(presp_data);
213         return rc;
214 }
215
216 struct cifsFileInfo *
217 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
218                   struct tcon_link *tlink, __u32 oplock)
219 {
220         struct dentry *dentry = file->f_path.dentry;
221         struct inode *inode = dentry->d_inode;
222         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
223         struct cifsFileInfo *pCifsFile;
224
225         pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
226         if (pCifsFile == NULL)
227                 return pCifsFile;
228
229         pCifsFile->count = 1;
230         pCifsFile->netfid = fileHandle;
231         pCifsFile->pid = current->tgid;
232         pCifsFile->uid = current_fsuid();
233         pCifsFile->dentry = dget(dentry);
234         pCifsFile->f_flags = file->f_flags;
235         pCifsFile->invalidHandle = false;
236         pCifsFile->tlink = cifs_get_tlink(tlink);
237         mutex_init(&pCifsFile->fh_mutex);
238         mutex_init(&pCifsFile->lock_mutex);
239         INIT_LIST_HEAD(&pCifsFile->llist);
240         INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
241
242         spin_lock(&cifs_file_list_lock);
243         list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
244         /* if readable file instance put first in list*/
245         if (file->f_mode & FMODE_READ)
246                 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
247         else
248                 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
249         spin_unlock(&cifs_file_list_lock);
250
251         cifs_set_oplock_level(inode, oplock);
252
253         file->private_data = pCifsFile;
254         return pCifsFile;
255 }
256
257 /*
258  * Release a reference on the file private data. This may involve closing
259  * the filehandle out on the server. Must be called without holding
260  * cifs_file_list_lock.
261  */
262 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
263 {
264         struct inode *inode = cifs_file->dentry->d_inode;
265         struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
266         struct cifsInodeInfo *cifsi = CIFS_I(inode);
267         struct cifsLockInfo *li, *tmp;
268
269         spin_lock(&cifs_file_list_lock);
270         if (--cifs_file->count > 0) {
271                 spin_unlock(&cifs_file_list_lock);
272                 return;
273         }
274
275         /* remove it from the lists */
276         list_del(&cifs_file->flist);
277         list_del(&cifs_file->tlist);
278
279         if (list_empty(&cifsi->openFileList)) {
280                 cFYI(1, "closing last open instance for inode %p",
281                         cifs_file->dentry->d_inode);
282                 cifs_set_oplock_level(inode, 0);
283         }
284         spin_unlock(&cifs_file_list_lock);
285
286         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
287                 int xid, rc;
288
289                 xid = GetXid();
290                 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
291                 FreeXid(xid);
292         }
293
294         /* Delete any outstanding lock records. We'll lose them when the file
295          * is closed anyway.
296          */
297         mutex_lock(&cifs_file->lock_mutex);
298         list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
299                 list_del(&li->llist);
300                 kfree(li);
301         }
302         mutex_unlock(&cifs_file->lock_mutex);
303
304         cifs_put_tlink(cifs_file->tlink);
305         dput(cifs_file->dentry);
306         kfree(cifs_file);
307 }
308
309 int cifs_open(struct inode *inode, struct file *file)
310 {
311         int rc = -EACCES;
312         int xid;
313         __u32 oplock;
314         struct cifs_sb_info *cifs_sb;
315         struct cifsTconInfo *tcon;
316         struct tcon_link *tlink;
317         struct cifsFileInfo *pCifsFile = NULL;
318         struct cifsInodeInfo *pCifsInode;
319         char *full_path = NULL;
320         int desiredAccess;
321         int disposition;
322         __u16 netfid;
323         FILE_ALL_INFO *buf = NULL;
324
325         xid = GetXid();
326
327         cifs_sb = CIFS_SB(inode->i_sb);
328         tlink = cifs_sb_tlink(cifs_sb);
329         if (IS_ERR(tlink)) {
330                 FreeXid(xid);
331                 return PTR_ERR(tlink);
332         }
333         tcon = tlink_tcon(tlink);
334
335         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
336
337         full_path = build_path_from_dentry(file->f_path.dentry);
338         if (full_path == NULL) {
339                 rc = -ENOMEM;
340                 goto out;
341         }
342
343         cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
344                  inode, file->f_flags, full_path);
345
346         if (oplockEnabled)
347                 oplock = REQ_OPLOCK;
348         else
349                 oplock = 0;
350
351         if (!tcon->broken_posix_open && tcon->unix_ext &&
352             (tcon->ses->capabilities & CAP_UNIX) &&
353             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
354                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
355                 /* can not refresh inode info since size could be stale */
356                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
357                                 cifs_sb->mnt_file_mode /* ignored */,
358                                 file->f_flags, &oplock, &netfid, xid);
359                 if (rc == 0) {
360                         cFYI(1, "posix open succeeded");
361
362                         pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
363                                                       oplock);
364                         if (pCifsFile == NULL) {
365                                 CIFSSMBClose(xid, tcon, netfid);
366                                 rc = -ENOMEM;
367                         }
368
369                         cifs_fscache_set_inode_cookie(inode, file);
370
371                         goto out;
372                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
373                         if (tcon->ses->serverNOS)
374                                 cERROR(1, "server %s of type %s returned"
375                                            " unexpected error on SMB posix open"
376                                            ", disabling posix open support."
377                                            " Check if server update available.",
378                                            tcon->ses->serverName,
379                                            tcon->ses->serverNOS);
380                         tcon->broken_posix_open = true;
381                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
382                          (rc != -EOPNOTSUPP)) /* path not found or net err */
383                         goto out;
384                 /* else fallthrough to retry open the old way on network i/o
385                    or DFS errors */
386         }
387
388         desiredAccess = cifs_convert_flags(file->f_flags);
389
390 /*********************************************************************
391  *  open flag mapping table:
392  *
393  *      POSIX Flag            CIFS Disposition
394  *      ----------            ----------------
395  *      O_CREAT               FILE_OPEN_IF
396  *      O_CREAT | O_EXCL      FILE_CREATE
397  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
398  *      O_TRUNC               FILE_OVERWRITE
399  *      none of the above     FILE_OPEN
400  *
401  *      Note that there is not a direct match between disposition
402  *      FILE_SUPERSEDE (ie create whether or not file exists although
403  *      O_CREAT | O_TRUNC is similar but truncates the existing
404  *      file rather than creating a new file as FILE_SUPERSEDE does
405  *      (which uses the attributes / metadata passed in on open call)
406  *?
407  *?  O_SYNC is a reasonable match to CIFS writethrough flag
408  *?  and the read write flags match reasonably.  O_LARGEFILE
409  *?  is irrelevant because largefile support is always used
410  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
411  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
412  *********************************************************************/
413
414         disposition = cifs_get_disposition(file->f_flags);
415
416         /* BB pass O_SYNC flag through on file attributes .. BB */
417
418         /* Also refresh inode by passing in file_info buf returned by SMBOpen
419            and calling get_inode_info with returned buf (at least helps
420            non-Unix server case) */
421
422         /* BB we can not do this if this is the second open of a file
423            and the first handle has writebehind data, we might be
424            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
425         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
426         if (!buf) {
427                 rc = -ENOMEM;
428                 goto out;
429         }
430
431         if (tcon->ses->capabilities & CAP_NT_SMBS)
432                 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
433                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
434                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
435                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
436         else
437                 rc = -EIO; /* no NT SMB support fall into legacy open below */
438
439         if (rc == -EIO) {
440                 /* Old server, try legacy style OpenX */
441                 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
442                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
443                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
444                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
445         }
446         if (rc) {
447                 cFYI(1, "cifs_open returned 0x%x", rc);
448                 goto out;
449         }
450
451         rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
452         if (rc != 0)
453                 goto out;
454
455         pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
456         if (pCifsFile == NULL) {
457                 rc = -ENOMEM;
458                 goto out;
459         }
460
461         cifs_fscache_set_inode_cookie(inode, file);
462
463         if (oplock & CIFS_CREATE_ACTION) {
464                 /* time to set mode which we can not set earlier due to
465                    problems creating new read-only files */
466                 if (tcon->unix_ext) {
467                         struct cifs_unix_set_info_args args = {
468                                 .mode   = inode->i_mode,
469                                 .uid    = NO_CHANGE_64,
470                                 .gid    = NO_CHANGE_64,
471                                 .ctime  = NO_CHANGE_64,
472                                 .atime  = NO_CHANGE_64,
473                                 .mtime  = NO_CHANGE_64,
474                                 .device = 0,
475                         };
476                         CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
477                                                cifs_sb->local_nls,
478                                                cifs_sb->mnt_cifs_flags &
479                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
480                 }
481         }
482
483 out:
484         kfree(buf);
485         kfree(full_path);
486         FreeXid(xid);
487         cifs_put_tlink(tlink);
488         return rc;
489 }
490
491 /* Try to reacquire byte range locks that were released when session */
492 /* to server was lost */
493 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
494 {
495         int rc = 0;
496
497 /* BB list all locks open on this file and relock */
498
499         return rc;
500 }
501
502 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
503 {
504         int rc = -EACCES;
505         int xid;
506         __u32 oplock;
507         struct cifs_sb_info *cifs_sb;
508         struct cifsTconInfo *tcon;
509         struct cifsInodeInfo *pCifsInode;
510         struct inode *inode;
511         char *full_path = NULL;
512         int desiredAccess;
513         int disposition = FILE_OPEN;
514         __u16 netfid;
515
516         xid = GetXid();
517         mutex_lock(&pCifsFile->fh_mutex);
518         if (!pCifsFile->invalidHandle) {
519                 mutex_unlock(&pCifsFile->fh_mutex);
520                 rc = 0;
521                 FreeXid(xid);
522                 return rc;
523         }
524
525         inode = pCifsFile->dentry->d_inode;
526         cifs_sb = CIFS_SB(inode->i_sb);
527         tcon = tlink_tcon(pCifsFile->tlink);
528
529 /* can not grab rename sem here because various ops, including
530    those that already have the rename sem can end up causing writepage
531    to get called and if the server was down that means we end up here,
532    and we can never tell if the caller already has the rename_sem */
533         full_path = build_path_from_dentry(pCifsFile->dentry);
534         if (full_path == NULL) {
535                 rc = -ENOMEM;
536                 mutex_unlock(&pCifsFile->fh_mutex);
537                 FreeXid(xid);
538                 return rc;
539         }
540
541         cFYI(1, "inode = 0x%p file flags 0x%x for %s",
542                  inode, pCifsFile->f_flags, full_path);
543
544         if (oplockEnabled)
545                 oplock = REQ_OPLOCK;
546         else
547                 oplock = 0;
548
549         if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
550             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
551                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
552
553                 /*
554                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
555                  * original open. Must mask them off for a reopen.
556                  */
557                 unsigned int oflags = pCifsFile->f_flags &
558                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
559
560                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
561                                 cifs_sb->mnt_file_mode /* ignored */,
562                                 oflags, &oplock, &netfid, xid);
563                 if (rc == 0) {
564                         cFYI(1, "posix reopen succeeded");
565                         goto reopen_success;
566                 }
567                 /* fallthrough to retry open the old way on errors, especially
568                    in the reconnect path it is important to retry hard */
569         }
570
571         desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
572
573         /* Can not refresh inode by passing in file_info buf to be returned
574            by SMBOpen and then calling get_inode_info with returned buf
575            since file might have write behind data that needs to be flushed
576            and server version of file size can be stale. If we knew for sure
577            that inode was not dirty locally we could do this */
578
579         rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
580                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
581                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
582                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
583         if (rc) {
584                 mutex_unlock(&pCifsFile->fh_mutex);
585                 cFYI(1, "cifs_open returned 0x%x", rc);
586                 cFYI(1, "oplock: %d", oplock);
587                 goto reopen_error_exit;
588         }
589
590 reopen_success:
591         pCifsFile->netfid = netfid;
592         pCifsFile->invalidHandle = false;
593         mutex_unlock(&pCifsFile->fh_mutex);
594         pCifsInode = CIFS_I(inode);
595
596         if (can_flush) {
597                 rc = filemap_write_and_wait(inode->i_mapping);
598                 mapping_set_error(inode->i_mapping, rc);
599
600                 if (tcon->unix_ext)
601                         rc = cifs_get_inode_info_unix(&inode,
602                                 full_path, inode->i_sb, xid);
603                 else
604                         rc = cifs_get_inode_info(&inode,
605                                 full_path, NULL, inode->i_sb,
606                                 xid, NULL);
607         } /* else we are writing out data to server already
608              and could deadlock if we tried to flush data, and
609              since we do not know if we have data that would
610              invalidate the current end of file on the server
611              we can not go to the server to get the new inod
612              info */
613
614         cifs_set_oplock_level(inode, oplock);
615
616         cifs_relock_file(pCifsFile);
617
618 reopen_error_exit:
619         kfree(full_path);
620         FreeXid(xid);
621         return rc;
622 }
623
624 int cifs_close(struct inode *inode, struct file *file)
625 {
626         cifsFileInfo_put(file->private_data);
627         file->private_data = NULL;
628
629         /* return code from the ->release op is always ignored */
630         return 0;
631 }
632
633 int cifs_closedir(struct inode *inode, struct file *file)
634 {
635         int rc = 0;
636         int xid;
637         struct cifsFileInfo *pCFileStruct = file->private_data;
638         char *ptmp;
639
640         cFYI(1, "Closedir inode = 0x%p", inode);
641
642         xid = GetXid();
643
644         if (pCFileStruct) {
645                 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
646
647                 cFYI(1, "Freeing private data in close dir");
648                 spin_lock(&cifs_file_list_lock);
649                 if (!pCFileStruct->srch_inf.endOfSearch &&
650                     !pCFileStruct->invalidHandle) {
651                         pCFileStruct->invalidHandle = true;
652                         spin_unlock(&cifs_file_list_lock);
653                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
654                         cFYI(1, "Closing uncompleted readdir with rc %d",
655                                  rc);
656                         /* not much we can do if it fails anyway, ignore rc */
657                         rc = 0;
658                 } else
659                         spin_unlock(&cifs_file_list_lock);
660                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
661                 if (ptmp) {
662                         cFYI(1, "closedir free smb buf in srch struct");
663                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
664                         if (pCFileStruct->srch_inf.smallBuf)
665                                 cifs_small_buf_release(ptmp);
666                         else
667                                 cifs_buf_release(ptmp);
668                 }
669                 cifs_put_tlink(pCFileStruct->tlink);
670                 kfree(file->private_data);
671                 file->private_data = NULL;
672         }
673         /* BB can we lock the filestruct while this is going on? */
674         FreeXid(xid);
675         return rc;
676 }
677
678 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
679                                 __u64 offset, __u8 lockType)
680 {
681         struct cifsLockInfo *li =
682                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
683         if (li == NULL)
684                 return -ENOMEM;
685         li->offset = offset;
686         li->length = len;
687         li->type = lockType;
688         mutex_lock(&fid->lock_mutex);
689         list_add(&li->llist, &fid->llist);
690         mutex_unlock(&fid->lock_mutex);
691         return 0;
692 }
693
694 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
695 {
696         int rc, xid;
697         __u32 numLock = 0;
698         __u32 numUnlock = 0;
699         __u64 length;
700         bool wait_flag = false;
701         struct cifs_sb_info *cifs_sb;
702         struct cifsTconInfo *tcon;
703         __u16 netfid;
704         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
705         bool posix_locking = 0;
706
707         length = 1 + pfLock->fl_end - pfLock->fl_start;
708         rc = -EACCES;
709         xid = GetXid();
710
711         cFYI(1, "Lock parm: 0x%x flockflags: "
712                  "0x%x flocktype: 0x%x start: %lld end: %lld",
713                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
714                 pfLock->fl_end);
715
716         if (pfLock->fl_flags & FL_POSIX)
717                 cFYI(1, "Posix");
718         if (pfLock->fl_flags & FL_FLOCK)
719                 cFYI(1, "Flock");
720         if (pfLock->fl_flags & FL_SLEEP) {
721                 cFYI(1, "Blocking lock");
722                 wait_flag = true;
723         }
724         if (pfLock->fl_flags & FL_ACCESS)
725                 cFYI(1, "Process suspended by mandatory locking - "
726                          "not implemented yet");
727         if (pfLock->fl_flags & FL_LEASE)
728                 cFYI(1, "Lease on file - not implemented yet");
729         if (pfLock->fl_flags &
730             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
731                 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
732
733         if (pfLock->fl_type == F_WRLCK) {
734                 cFYI(1, "F_WRLCK ");
735                 numLock = 1;
736         } else if (pfLock->fl_type == F_UNLCK) {
737                 cFYI(1, "F_UNLCK");
738                 numUnlock = 1;
739                 /* Check if unlock includes more than
740                 one lock range */
741         } else if (pfLock->fl_type == F_RDLCK) {
742                 cFYI(1, "F_RDLCK");
743                 lockType |= LOCKING_ANDX_SHARED_LOCK;
744                 numLock = 1;
745         } else if (pfLock->fl_type == F_EXLCK) {
746                 cFYI(1, "F_EXLCK");
747                 numLock = 1;
748         } else if (pfLock->fl_type == F_SHLCK) {
749                 cFYI(1, "F_SHLCK");
750                 lockType |= LOCKING_ANDX_SHARED_LOCK;
751                 numLock = 1;
752         } else
753                 cFYI(1, "Unknown type of lock");
754
755         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
756         tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
757
758         if (file->private_data == NULL) {
759                 rc = -EBADF;
760                 FreeXid(xid);
761                 return rc;
762         }
763         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
764
765         if ((tcon->ses->capabilities & CAP_UNIX) &&
766             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
767             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
768                 posix_locking = 1;
769         /* BB add code here to normalize offset and length to
770         account for negative length which we can not accept over the
771         wire */
772         if (IS_GETLK(cmd)) {
773                 if (posix_locking) {
774                         int posix_lock_type;
775                         if (lockType & LOCKING_ANDX_SHARED_LOCK)
776                                 posix_lock_type = CIFS_RDLCK;
777                         else
778                                 posix_lock_type = CIFS_WRLCK;
779                         rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
780                                         length, pfLock,
781                                         posix_lock_type, wait_flag);
782                         FreeXid(xid);
783                         return rc;
784                 }
785
786                 /* BB we could chain these into one lock request BB */
787                 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
788                                  0, 1, lockType, 0 /* wait flag */ );
789                 if (rc == 0) {
790                         rc = CIFSSMBLock(xid, tcon, netfid, length,
791                                          pfLock->fl_start, 1 /* numUnlock */ ,
792                                          0 /* numLock */ , lockType,
793                                          0 /* wait flag */ );
794                         pfLock->fl_type = F_UNLCK;
795                         if (rc != 0)
796                                 cERROR(1, "Error unlocking previously locked "
797                                            "range %d during test of lock", rc);
798                         rc = 0;
799
800                 } else {
801                         /* if rc == ERR_SHARING_VIOLATION ? */
802                         rc = 0;
803
804                         if (lockType & LOCKING_ANDX_SHARED_LOCK) {
805                                 pfLock->fl_type = F_WRLCK;
806                         } else {
807                                 rc = CIFSSMBLock(xid, tcon, netfid, length,
808                                         pfLock->fl_start, 0, 1,
809                                         lockType | LOCKING_ANDX_SHARED_LOCK,
810                                         0 /* wait flag */);
811                                 if (rc == 0) {
812                                         rc = CIFSSMBLock(xid, tcon, netfid,
813                                                 length, pfLock->fl_start, 1, 0,
814                                                 lockType |
815                                                 LOCKING_ANDX_SHARED_LOCK,
816                                                 0 /* wait flag */);
817                                         pfLock->fl_type = F_RDLCK;
818                                         if (rc != 0)
819                                                 cERROR(1, "Error unlocking "
820                                                 "previously locked range %d "
821                                                 "during test of lock", rc);
822                                         rc = 0;
823                                 } else {
824                                         pfLock->fl_type = F_WRLCK;
825                                         rc = 0;
826                                 }
827                         }
828                 }
829
830                 FreeXid(xid);
831                 return rc;
832         }
833
834         if (!numLock && !numUnlock) {
835                 /* if no lock or unlock then nothing
836                 to do since we do not know what it is */
837                 FreeXid(xid);
838                 return -EOPNOTSUPP;
839         }
840
841         if (posix_locking) {
842                 int posix_lock_type;
843                 if (lockType & LOCKING_ANDX_SHARED_LOCK)
844                         posix_lock_type = CIFS_RDLCK;
845                 else
846                         posix_lock_type = CIFS_WRLCK;
847
848                 if (numUnlock == 1)
849                         posix_lock_type = CIFS_UNLCK;
850
851                 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
852                                       length, pfLock,
853                                       posix_lock_type, wait_flag);
854         } else {
855                 struct cifsFileInfo *fid = file->private_data;
856
857                 if (numLock) {
858                         rc = CIFSSMBLock(xid, tcon, netfid, length,
859                                         pfLock->fl_start,
860                                         0, numLock, lockType, wait_flag);
861
862                         if (rc == 0) {
863                                 /* For Windows locks we must store them. */
864                                 rc = store_file_lock(fid, length,
865                                                 pfLock->fl_start, lockType);
866                         }
867                 } else if (numUnlock) {
868                         /* For each stored lock that this unlock overlaps
869                            completely, unlock it. */
870                         int stored_rc = 0;
871                         struct cifsLockInfo *li, *tmp;
872
873                         rc = 0;
874                         mutex_lock(&fid->lock_mutex);
875                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
876                                 if (pfLock->fl_start <= li->offset &&
877                                                 (pfLock->fl_start + length) >=
878                                                 (li->offset + li->length)) {
879                                         stored_rc = CIFSSMBLock(xid, tcon,
880                                                         netfid,
881                                                         li->length, li->offset,
882                                                         1, 0, li->type, false);
883                                         if (stored_rc)
884                                                 rc = stored_rc;
885                                         else {
886                                                 list_del(&li->llist);
887                                                 kfree(li);
888                                         }
889                                 }
890                         }
891                         mutex_unlock(&fid->lock_mutex);
892                 }
893         }
894
895         if (pfLock->fl_flags & FL_POSIX)
896                 posix_lock_file_wait(file, pfLock);
897         FreeXid(xid);
898         return rc;
899 }
900
901 /*
902  * Set the timeout on write requests past EOF. For some servers (Windows)
903  * these calls can be very long.
904  *
905  * If we're writing >10M past the EOF we give a 180s timeout. Anything less
906  * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
907  * The 10M cutoff is totally arbitrary. A better scheme for this would be
908  * welcome if someone wants to suggest one.
909  *
910  * We may be able to do a better job with this if there were some way to
911  * declare that a file should be sparse.
912  */
913 static int
914 cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
915 {
916         if (offset <= cifsi->server_eof)
917                 return CIFS_STD_OP;
918         else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
919                 return CIFS_VLONG_OP;
920         else
921                 return CIFS_LONG_OP;
922 }
923
924 /* update the file size (if needed) after a write */
925 static void
926 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
927                       unsigned int bytes_written)
928 {
929         loff_t end_of_write = offset + bytes_written;
930
931         if (end_of_write > cifsi->server_eof)
932                 cifsi->server_eof = end_of_write;
933 }
934
935 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
936         size_t write_size, loff_t *poffset)
937 {
938         struct inode *inode = file->f_path.dentry->d_inode;
939         int rc = 0;
940         unsigned int bytes_written = 0;
941         unsigned int total_written;
942         struct cifs_sb_info *cifs_sb;
943         struct cifsTconInfo *pTcon;
944         int xid, long_op;
945         struct cifsFileInfo *open_file;
946         struct cifsInodeInfo *cifsi = CIFS_I(inode);
947
948         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
949
950         /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
951            *poffset, file->f_path.dentry->d_name.name); */
952
953         if (file->private_data == NULL)
954                 return -EBADF;
955
956         open_file = file->private_data;
957         pTcon = tlink_tcon(open_file->tlink);
958
959         rc = generic_write_checks(file, poffset, &write_size, 0);
960         if (rc)
961                 return rc;
962
963         xid = GetXid();
964
965         long_op = cifs_write_timeout(cifsi, *poffset);
966         for (total_written = 0; write_size > total_written;
967              total_written += bytes_written) {
968                 rc = -EAGAIN;
969                 while (rc == -EAGAIN) {
970                         if (file->private_data == NULL) {
971                                 /* file has been closed on us */
972                                 FreeXid(xid);
973                         /* if we have gotten here we have written some data
974                            and blocked, and the file has been freed on us while
975                            we blocked so return what we managed to write */
976                                 return total_written;
977                         }
978                         if (open_file->invalidHandle) {
979                                 /* we could deadlock if we called
980                                    filemap_fdatawait from here so tell
981                                    reopen_file not to flush data to server
982                                    now */
983                                 rc = cifs_reopen_file(open_file, false);
984                                 if (rc != 0)
985                                         break;
986                         }
987
988                         rc = CIFSSMBWrite(xid, pTcon,
989                                 open_file->netfid,
990                                 min_t(const int, cifs_sb->wsize,
991                                       write_size - total_written),
992                                 *poffset, &bytes_written,
993                                 NULL, write_data + total_written, long_op);
994                 }
995                 if (rc || (bytes_written == 0)) {
996                         if (total_written)
997                                 break;
998                         else {
999                                 FreeXid(xid);
1000                                 return rc;
1001                         }
1002                 } else {
1003                         cifs_update_eof(cifsi, *poffset, bytes_written);
1004                         *poffset += bytes_written;
1005                 }
1006                 long_op = CIFS_STD_OP; /* subsequent writes fast -
1007                                     15 seconds is plenty */
1008         }
1009
1010         cifs_stats_bytes_written(pTcon, total_written);
1011
1012 /* Do not update local mtime - server will set its actual value on write
1013  *      inode->i_ctime = inode->i_mtime =
1014  *              current_fs_time(inode->i_sb);*/
1015         if (total_written > 0) {
1016                 spin_lock(&inode->i_lock);
1017                 if (*poffset > inode->i_size)
1018                         i_size_write(inode, *poffset);
1019                 spin_unlock(&inode->i_lock);
1020         }
1021         mark_inode_dirty_sync(inode);
1022
1023         FreeXid(xid);
1024         return total_written;
1025 }
1026
1027 static ssize_t cifs_write(struct cifsFileInfo *open_file,
1028                           const char *write_data, size_t write_size,
1029                           loff_t *poffset)
1030 {
1031         int rc = 0;
1032         unsigned int bytes_written = 0;
1033         unsigned int total_written;
1034         struct cifs_sb_info *cifs_sb;
1035         struct cifsTconInfo *pTcon;
1036         int xid, long_op;
1037         struct dentry *dentry = open_file->dentry;
1038         struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1039
1040         cifs_sb = CIFS_SB(dentry->d_sb);
1041
1042         cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1043            *poffset, dentry->d_name.name);
1044
1045         pTcon = tlink_tcon(open_file->tlink);
1046
1047         xid = GetXid();
1048
1049         long_op = cifs_write_timeout(cifsi, *poffset);
1050         for (total_written = 0; write_size > total_written;
1051              total_written += bytes_written) {
1052                 rc = -EAGAIN;
1053                 while (rc == -EAGAIN) {
1054                         if (open_file->invalidHandle) {
1055                                 /* we could deadlock if we called
1056                                    filemap_fdatawait from here so tell
1057                                    reopen_file not to flush data to
1058                                    server now */
1059                                 rc = cifs_reopen_file(open_file, false);
1060                                 if (rc != 0)
1061                                         break;
1062                         }
1063                         if (experimEnabled || (pTcon->ses->server &&
1064                                 ((pTcon->ses->server->secMode &
1065                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1066                                 == 0))) {
1067                                 struct kvec iov[2];
1068                                 unsigned int len;
1069
1070                                 len = min((size_t)cifs_sb->wsize,
1071                                           write_size - total_written);
1072                                 /* iov[0] is reserved for smb header */
1073                                 iov[1].iov_base = (char *)write_data +
1074                                                   total_written;
1075                                 iov[1].iov_len = len;
1076                                 rc = CIFSSMBWrite2(xid, pTcon,
1077                                                 open_file->netfid, len,
1078                                                 *poffset, &bytes_written,
1079                                                 iov, 1, long_op);
1080                         } else
1081                                 rc = CIFSSMBWrite(xid, pTcon,
1082                                          open_file->netfid,
1083                                          min_t(const int, cifs_sb->wsize,
1084                                                write_size - total_written),
1085                                          *poffset, &bytes_written,
1086                                          write_data + total_written,
1087                                          NULL, long_op);
1088                 }
1089                 if (rc || (bytes_written == 0)) {
1090                         if (total_written)
1091                                 break;
1092                         else {
1093                                 FreeXid(xid);
1094                                 return rc;
1095                         }
1096                 } else {
1097                         cifs_update_eof(cifsi, *poffset, bytes_written);
1098                         *poffset += bytes_written;
1099                 }
1100                 long_op = CIFS_STD_OP; /* subsequent writes fast -
1101                                     15 seconds is plenty */
1102         }
1103
1104         cifs_stats_bytes_written(pTcon, total_written);
1105
1106         if (total_written > 0) {
1107                 spin_lock(&dentry->d_inode->i_lock);
1108                 if (*poffset > dentry->d_inode->i_size)
1109                         i_size_write(dentry->d_inode, *poffset);
1110                 spin_unlock(&dentry->d_inode->i_lock);
1111         }
1112         mark_inode_dirty_sync(dentry->d_inode);
1113         FreeXid(xid);
1114         return total_written;
1115 }
1116
1117 #ifdef CONFIG_CIFS_EXPERIMENTAL
1118 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1119                                         bool fsuid_only)
1120 {
1121         struct cifsFileInfo *open_file = NULL;
1122         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1123
1124         /* only filter by fsuid on multiuser mounts */
1125         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1126                 fsuid_only = false;
1127
1128         spin_lock(&cifs_file_list_lock);
1129         /* we could simply get the first_list_entry since write-only entries
1130            are always at the end of the list but since the first entry might
1131            have a close pending, we go through the whole list */
1132         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1133                 if (fsuid_only && open_file->uid != current_fsuid())
1134                         continue;
1135                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1136                         if (!open_file->invalidHandle) {
1137                                 /* found a good file */
1138                                 /* lock it so it will not be closed on us */
1139                                 cifsFileInfo_get(open_file);
1140                                 spin_unlock(&cifs_file_list_lock);
1141                                 return open_file;
1142                         } /* else might as well continue, and look for
1143                              another, or simply have the caller reopen it
1144                              again rather than trying to fix this handle */
1145                 } else /* write only file */
1146                         break; /* write only files are last so must be done */
1147         }
1148         spin_unlock(&cifs_file_list_lock);
1149         return NULL;
1150 }
1151 #endif
1152
1153 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1154                                         bool fsuid_only)
1155 {
1156         struct cifsFileInfo *open_file;
1157         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1158         bool any_available = false;
1159         int rc;
1160
1161         /* Having a null inode here (because mapping->host was set to zero by
1162         the VFS or MM) should not happen but we had reports of on oops (due to
1163         it being zero) during stress testcases so we need to check for it */
1164
1165         if (cifs_inode == NULL) {
1166                 cERROR(1, "Null inode passed to cifs_writeable_file");
1167                 dump_stack();
1168                 return NULL;
1169         }
1170
1171         /* only filter by fsuid on multiuser mounts */
1172         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1173                 fsuid_only = false;
1174
1175         spin_lock(&cifs_file_list_lock);
1176 refind_writable:
1177         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1178                 if (!any_available && open_file->pid != current->tgid)
1179                         continue;
1180                 if (fsuid_only && open_file->uid != current_fsuid())
1181                         continue;
1182                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1183                         cifsFileInfo_get(open_file);
1184
1185                         if (!open_file->invalidHandle) {
1186                                 /* found a good writable file */
1187                                 spin_unlock(&cifs_file_list_lock);
1188                                 return open_file;
1189                         }
1190
1191                         spin_unlock(&cifs_file_list_lock);
1192
1193                         /* Had to unlock since following call can block */
1194                         rc = cifs_reopen_file(open_file, false);
1195                         if (!rc)
1196                                 return open_file;
1197
1198                         /* if it fails, try another handle if possible */
1199                         cFYI(1, "wp failed on reopen file");
1200                         cifsFileInfo_put(open_file);
1201
1202                         spin_lock(&cifs_file_list_lock);
1203
1204                         /* else we simply continue to the next entry. Thus
1205                            we do not loop on reopen errors.  If we
1206                            can not reopen the file, for example if we
1207                            reconnected to a server with another client
1208                            racing to delete or lock the file we would not
1209                            make progress if we restarted before the beginning
1210                            of the loop here. */
1211                 }
1212         }
1213         /* couldn't find useable FH with same pid, try any available */
1214         if (!any_available) {
1215                 any_available = true;
1216                 goto refind_writable;
1217         }
1218         spin_unlock(&cifs_file_list_lock);
1219         return NULL;
1220 }
1221
1222 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1223 {
1224         struct address_space *mapping = page->mapping;
1225         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1226         char *write_data;
1227         int rc = -EFAULT;
1228         int bytes_written = 0;
1229         struct cifs_sb_info *cifs_sb;
1230         struct inode *inode;
1231         struct cifsFileInfo *open_file;
1232
1233         if (!mapping || !mapping->host)
1234                 return -EFAULT;
1235
1236         inode = page->mapping->host;
1237         cifs_sb = CIFS_SB(inode->i_sb);
1238
1239         offset += (loff_t)from;
1240         write_data = kmap(page);
1241         write_data += from;
1242
1243         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1244                 kunmap(page);
1245                 return -EIO;
1246         }
1247
1248         /* racing with truncate? */
1249         if (offset > mapping->host->i_size) {
1250                 kunmap(page);
1251                 return 0; /* don't care */
1252         }
1253
1254         /* check to make sure that we are not extending the file */
1255         if (mapping->host->i_size - offset < (loff_t)to)
1256                 to = (unsigned)(mapping->host->i_size - offset);
1257
1258         open_file = find_writable_file(CIFS_I(mapping->host), false);
1259         if (open_file) {
1260                 bytes_written = cifs_write(open_file, write_data,
1261                                            to - from, &offset);
1262                 cifsFileInfo_put(open_file);
1263                 /* Does mm or vfs already set times? */
1264                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1265                 if ((bytes_written > 0) && (offset))
1266                         rc = 0;
1267                 else if (bytes_written < 0)
1268                         rc = bytes_written;
1269         } else {
1270                 cFYI(1, "No writeable filehandles for inode");
1271                 rc = -EIO;
1272         }
1273
1274         kunmap(page);
1275         return rc;
1276 }
1277
1278 static int cifs_writepages(struct address_space *mapping,
1279                            struct writeback_control *wbc)
1280 {
1281         unsigned int bytes_to_write;
1282         unsigned int bytes_written;
1283         struct cifs_sb_info *cifs_sb;
1284         int done = 0;
1285         pgoff_t end;
1286         pgoff_t index;
1287         int range_whole = 0;
1288         struct kvec *iov;
1289         int len;
1290         int n_iov = 0;
1291         pgoff_t next;
1292         int nr_pages;
1293         __u64 offset = 0;
1294         struct cifsFileInfo *open_file;
1295         struct cifsTconInfo *tcon;
1296         struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1297         struct page *page;
1298         struct pagevec pvec;
1299         int rc = 0;
1300         int scanned = 0;
1301         int xid, long_op;
1302
1303         cifs_sb = CIFS_SB(mapping->host->i_sb);
1304
1305         /*
1306          * If wsize is smaller that the page cache size, default to writing
1307          * one page at a time via cifs_writepage
1308          */
1309         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1310                 return generic_writepages(mapping, wbc);
1311
1312         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1313         if (iov == NULL)
1314                 return generic_writepages(mapping, wbc);
1315
1316         /*
1317          * if there's no open file, then this is likely to fail too,
1318          * but it'll at least handle the return. Maybe it should be
1319          * a BUG() instead?
1320          */
1321         open_file = find_writable_file(CIFS_I(mapping->host), false);
1322         if (!open_file) {
1323                 kfree(iov);
1324                 return generic_writepages(mapping, wbc);
1325         }
1326
1327         tcon = tlink_tcon(open_file->tlink);
1328         if (!experimEnabled && tcon->ses->server->secMode &
1329                         (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1330                 cifsFileInfo_put(open_file);
1331                 kfree(iov);
1332                 return generic_writepages(mapping, wbc);
1333         }
1334         cifsFileInfo_put(open_file);
1335
1336         xid = GetXid();
1337
1338         pagevec_init(&pvec, 0);
1339         if (wbc->range_cyclic) {
1340                 index = mapping->writeback_index; /* Start from prev offset */
1341                 end = -1;
1342         } else {
1343                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1344                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1345                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1346                         range_whole = 1;
1347                 scanned = 1;
1348         }
1349 retry:
1350         while (!done && (index <= end) &&
1351                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1352                         PAGECACHE_TAG_DIRTY,
1353                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1354                 int first;
1355                 unsigned int i;
1356
1357                 first = -1;
1358                 next = 0;
1359                 n_iov = 0;
1360                 bytes_to_write = 0;
1361
1362                 for (i = 0; i < nr_pages; i++) {
1363                         page = pvec.pages[i];
1364                         /*
1365                          * At this point we hold neither mapping->tree_lock nor
1366                          * lock on the page itself: the page may be truncated or
1367                          * invalidated (changing page->mapping to NULL), or even
1368                          * swizzled back from swapper_space to tmpfs file
1369                          * mapping
1370                          */
1371
1372                         if (first < 0)
1373                                 lock_page(page);
1374                         else if (!trylock_page(page))
1375                                 break;
1376
1377                         if (unlikely(page->mapping != mapping)) {
1378                                 unlock_page(page);
1379                                 break;
1380                         }
1381
1382                         if (!wbc->range_cyclic && page->index > end) {
1383                                 done = 1;
1384                                 unlock_page(page);
1385                                 break;
1386                         }
1387
1388                         if (next && (page->index != next)) {
1389                                 /* Not next consecutive page */
1390                                 unlock_page(page);
1391                                 break;
1392                         }
1393
1394                         if (wbc->sync_mode != WB_SYNC_NONE)
1395                                 wait_on_page_writeback(page);
1396
1397                         if (PageWriteback(page) ||
1398                                         !clear_page_dirty_for_io(page)) {
1399                                 unlock_page(page);
1400                                 break;
1401                         }
1402
1403                         /*
1404                          * This actually clears the dirty bit in the radix tree.
1405                          * See cifs_writepage() for more commentary.
1406                          */
1407                         set_page_writeback(page);
1408
1409                         if (page_offset(page) >= mapping->host->i_size) {
1410                                 done = 1;
1411                                 unlock_page(page);
1412                                 end_page_writeback(page);
1413                                 break;
1414                         }
1415
1416                         /*
1417                          * BB can we get rid of this?  pages are held by pvec
1418                          */
1419                         page_cache_get(page);
1420
1421                         len = min(mapping->host->i_size - page_offset(page),
1422                                   (loff_t)PAGE_CACHE_SIZE);
1423
1424                         /* reserve iov[0] for the smb header */
1425                         n_iov++;
1426                         iov[n_iov].iov_base = kmap(page);
1427                         iov[n_iov].iov_len = len;
1428                         bytes_to_write += len;
1429
1430                         if (first < 0) {
1431                                 first = i;
1432                                 offset = page_offset(page);
1433                         }
1434                         next = page->index + 1;
1435                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1436                                 break;
1437                 }
1438                 if (n_iov) {
1439                         open_file = find_writable_file(CIFS_I(mapping->host),
1440                                                         false);
1441                         if (!open_file) {
1442                                 cERROR(1, "No writable handles for inode");
1443                                 rc = -EBADF;
1444                         } else {
1445                                 long_op = cifs_write_timeout(cifsi, offset);
1446                                 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1447                                                    bytes_to_write, offset,
1448                                                    &bytes_written, iov, n_iov,
1449                                                    long_op);
1450                                 cifsFileInfo_put(open_file);
1451                                 cifs_update_eof(cifsi, offset, bytes_written);
1452                         }
1453
1454                         if (rc || bytes_written < bytes_to_write) {
1455                                 cERROR(1, "Write2 ret %d, wrote %d",
1456                                           rc, bytes_written);
1457                                 mapping_set_error(mapping, rc);
1458                         } else {
1459                                 cifs_stats_bytes_written(tcon, bytes_written);
1460                         }
1461
1462                         for (i = 0; i < n_iov; i++) {
1463                                 page = pvec.pages[first + i];
1464                                 /* Should we also set page error on
1465                                 success rc but too little data written? */
1466                                 /* BB investigate retry logic on temporary
1467                                 server crash cases and how recovery works
1468                                 when page marked as error */
1469                                 if (rc)
1470                                         SetPageError(page);
1471                                 kunmap(page);
1472                                 unlock_page(page);
1473                                 end_page_writeback(page);
1474                                 page_cache_release(page);
1475                         }
1476                         if ((wbc->nr_to_write -= n_iov) <= 0)
1477                                 done = 1;
1478                         index = next;
1479                 } else
1480                         /* Need to re-find the pages we skipped */
1481                         index = pvec.pages[0]->index + 1;
1482
1483                 pagevec_release(&pvec);
1484         }
1485         if (!scanned && !done) {
1486                 /*
1487                  * We hit the last page and there is more work to be done: wrap
1488                  * back to the start of the file
1489                  */
1490                 scanned = 1;
1491                 index = 0;
1492                 goto retry;
1493         }
1494         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1495                 mapping->writeback_index = index;
1496
1497         FreeXid(xid);
1498         kfree(iov);
1499         return rc;
1500 }
1501
1502 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1503 {
1504         int rc = -EFAULT;
1505         int xid;
1506
1507         xid = GetXid();
1508 /* BB add check for wbc flags */
1509         page_cache_get(page);
1510         if (!PageUptodate(page))
1511                 cFYI(1, "ppw - page not up to date");
1512
1513         /*
1514          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1515          *
1516          * A writepage() implementation always needs to do either this,
1517          * or re-dirty the page with "redirty_page_for_writepage()" in
1518          * the case of a failure.
1519          *
1520          * Just unlocking the page will cause the radix tree tag-bits
1521          * to fail to update with the state of the page correctly.
1522          */
1523         set_page_writeback(page);
1524         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1525         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1526         unlock_page(page);
1527         end_page_writeback(page);
1528         page_cache_release(page);
1529         FreeXid(xid);
1530         return rc;
1531 }
1532
1533 static int cifs_write_end(struct file *file, struct address_space *mapping,
1534                         loff_t pos, unsigned len, unsigned copied,
1535                         struct page *page, void *fsdata)
1536 {
1537         int rc;
1538         struct inode *inode = mapping->host;
1539
1540         cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1541                  page, pos, copied);
1542
1543         if (PageChecked(page)) {
1544                 if (copied == len)
1545                         SetPageUptodate(page);
1546                 ClearPageChecked(page);
1547         } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1548                 SetPageUptodate(page);
1549
1550         if (!PageUptodate(page)) {
1551                 char *page_data;
1552                 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1553                 int xid;
1554
1555                 xid = GetXid();
1556                 /* this is probably better than directly calling
1557                    partialpage_write since in this function the file handle is
1558                    known which we might as well leverage */
1559                 /* BB check if anything else missing out of ppw
1560                    such as updating last write time */
1561                 page_data = kmap(page);
1562                 rc = cifs_write(file->private_data, page_data + offset,
1563                                 copied, &pos);
1564                 /* if (rc < 0) should we set writebehind rc? */
1565                 kunmap(page);
1566
1567                 FreeXid(xid);
1568         } else {
1569                 rc = copied;
1570                 pos += copied;
1571                 set_page_dirty(page);
1572         }
1573
1574         if (rc > 0) {
1575                 spin_lock(&inode->i_lock);
1576                 if (pos > inode->i_size)
1577                         i_size_write(inode, pos);
1578                 spin_unlock(&inode->i_lock);
1579         }
1580
1581         unlock_page(page);
1582         page_cache_release(page);
1583
1584         return rc;
1585 }
1586
1587 int cifs_fsync(struct file *file, int datasync)
1588 {
1589         int xid;
1590         int rc = 0;
1591         struct cifsTconInfo *tcon;
1592         struct cifsFileInfo *smbfile = file->private_data;
1593         struct inode *inode = file->f_path.dentry->d_inode;
1594
1595         xid = GetXid();
1596
1597         cFYI(1, "Sync file - name: %s datasync: 0x%x",
1598                 file->f_path.dentry->d_name.name, datasync);
1599
1600         rc = filemap_write_and_wait(inode->i_mapping);
1601         if (rc == 0) {
1602                 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1603
1604                 tcon = tlink_tcon(smbfile->tlink);
1605                 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1606                         rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1607         }
1608
1609         FreeXid(xid);
1610         return rc;
1611 }
1612
1613 /* static void cifs_sync_page(struct page *page)
1614 {
1615         struct address_space *mapping;
1616         struct inode *inode;
1617         unsigned long index = page->index;
1618         unsigned int rpages = 0;
1619         int rc = 0;
1620
1621         cFYI(1, "sync page %p", page);
1622         mapping = page->mapping;
1623         if (!mapping)
1624                 return 0;
1625         inode = mapping->host;
1626         if (!inode)
1627                 return; */
1628
1629 /*      fill in rpages then
1630         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1631
1632 /*      cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1633
1634 #if 0
1635         if (rc < 0)
1636                 return rc;
1637         return 0;
1638 #endif
1639 } */
1640
1641 /*
1642  * As file closes, flush all cached write data for this inode checking
1643  * for write behind errors.
1644  */
1645 int cifs_flush(struct file *file, fl_owner_t id)
1646 {
1647         struct inode *inode = file->f_path.dentry->d_inode;
1648         int rc = 0;
1649
1650         if (file->f_mode & FMODE_WRITE)
1651                 rc = filemap_write_and_wait(inode->i_mapping);
1652
1653         cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1654
1655         return rc;
1656 }
1657
1658 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1659         size_t read_size, loff_t *poffset)
1660 {
1661         int rc = -EACCES;
1662         unsigned int bytes_read = 0;
1663         unsigned int total_read = 0;
1664         unsigned int current_read_size;
1665         struct cifs_sb_info *cifs_sb;
1666         struct cifsTconInfo *pTcon;
1667         int xid;
1668         struct cifsFileInfo *open_file;
1669         char *smb_read_data;
1670         char __user *current_offset;
1671         struct smb_com_read_rsp *pSMBr;
1672
1673         xid = GetXid();
1674         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1675
1676         if (file->private_data == NULL) {
1677                 rc = -EBADF;
1678                 FreeXid(xid);
1679                 return rc;
1680         }
1681         open_file = file->private_data;
1682         pTcon = tlink_tcon(open_file->tlink);
1683
1684         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1685                 cFYI(1, "attempting read on write only file instance");
1686
1687         for (total_read = 0, current_offset = read_data;
1688              read_size > total_read;
1689              total_read += bytes_read, current_offset += bytes_read) {
1690                 current_read_size = min_t(const int, read_size - total_read,
1691                                           cifs_sb->rsize);
1692                 rc = -EAGAIN;
1693                 smb_read_data = NULL;
1694                 while (rc == -EAGAIN) {
1695                         int buf_type = CIFS_NO_BUFFER;
1696                         if (open_file->invalidHandle) {
1697                                 rc = cifs_reopen_file(open_file, true);
1698                                 if (rc != 0)
1699                                         break;
1700                         }
1701                         rc = CIFSSMBRead(xid, pTcon,
1702                                          open_file->netfid,
1703                                          current_read_size, *poffset,
1704                                          &bytes_read, &smb_read_data,
1705                                          &buf_type);
1706                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1707                         if (smb_read_data) {
1708                                 if (copy_to_user(current_offset,
1709                                                 smb_read_data +
1710                                                 4 /* RFC1001 length field */ +
1711                                                 le16_to_cpu(pSMBr->DataOffset),
1712                                                 bytes_read))
1713                                         rc = -EFAULT;
1714
1715                                 if (buf_type == CIFS_SMALL_BUFFER)
1716                                         cifs_small_buf_release(smb_read_data);
1717                                 else if (buf_type == CIFS_LARGE_BUFFER)
1718                                         cifs_buf_release(smb_read_data);
1719                                 smb_read_data = NULL;
1720                         }
1721                 }
1722                 if (rc || (bytes_read == 0)) {
1723                         if (total_read) {
1724                                 break;
1725                         } else {
1726                                 FreeXid(xid);
1727                                 return rc;
1728                         }
1729                 } else {
1730                         cifs_stats_bytes_read(pTcon, bytes_read);
1731                         *poffset += bytes_read;
1732                 }
1733         }
1734         FreeXid(xid);
1735         return total_read;
1736 }
1737
1738
1739 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1740         loff_t *poffset)
1741 {
1742         int rc = -EACCES;
1743         unsigned int bytes_read = 0;
1744         unsigned int total_read;
1745         unsigned int current_read_size;
1746         struct cifs_sb_info *cifs_sb;
1747         struct cifsTconInfo *pTcon;
1748         int xid;
1749         char *current_offset;
1750         struct cifsFileInfo *open_file;
1751         int buf_type = CIFS_NO_BUFFER;
1752
1753         xid = GetXid();
1754         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1755
1756         if (file->private_data == NULL) {
1757                 rc = -EBADF;
1758                 FreeXid(xid);
1759                 return rc;
1760         }
1761         open_file = file->private_data;
1762         pTcon = tlink_tcon(open_file->tlink);
1763
1764         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1765                 cFYI(1, "attempting read on write only file instance");
1766
1767         for (total_read = 0, current_offset = read_data;
1768              read_size > total_read;
1769              total_read += bytes_read, current_offset += bytes_read) {
1770                 current_read_size = min_t(const int, read_size - total_read,
1771                                           cifs_sb->rsize);
1772                 /* For windows me and 9x we do not want to request more
1773                 than it negotiated since it will refuse the read then */
1774                 if ((pTcon->ses) &&
1775                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1776                         current_read_size = min_t(const int, current_read_size,
1777                                         pTcon->ses->server->maxBuf - 128);
1778                 }
1779                 rc = -EAGAIN;
1780                 while (rc == -EAGAIN) {
1781                         if (open_file->invalidHandle) {
1782                                 rc = cifs_reopen_file(open_file, true);
1783                                 if (rc != 0)
1784                                         break;
1785                         }
1786                         rc = CIFSSMBRead(xid, pTcon,
1787                                          open_file->netfid,
1788                                          current_read_size, *poffset,
1789                                          &bytes_read, &current_offset,
1790                                          &buf_type);
1791                 }
1792                 if (rc || (bytes_read == 0)) {
1793                         if (total_read) {
1794                                 break;
1795                         } else {
1796                                 FreeXid(xid);
1797                                 return rc;
1798                         }
1799                 } else {
1800                         cifs_stats_bytes_read(pTcon, total_read);
1801                         *poffset += bytes_read;
1802                 }
1803         }
1804         FreeXid(xid);
1805         return total_read;
1806 }
1807
1808 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1809 {
1810         int rc, xid;
1811
1812         xid = GetXid();
1813         rc = cifs_revalidate_file(file);
1814         if (rc) {
1815                 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1816                 FreeXid(xid);
1817                 return rc;
1818         }
1819         rc = generic_file_mmap(file, vma);
1820         FreeXid(xid);
1821         return rc;
1822 }
1823
1824
1825 static void cifs_copy_cache_pages(struct address_space *mapping,
1826         struct list_head *pages, int bytes_read, char *data)
1827 {
1828         struct page *page;
1829         char *target;
1830
1831         while (bytes_read > 0) {
1832                 if (list_empty(pages))
1833                         break;
1834
1835                 page = list_entry(pages->prev, struct page, lru);
1836                 list_del(&page->lru);
1837
1838                 if (add_to_page_cache_lru(page, mapping, page->index,
1839                                       GFP_KERNEL)) {
1840                         page_cache_release(page);
1841                         cFYI(1, "Add page cache failed");
1842                         data += PAGE_CACHE_SIZE;
1843                         bytes_read -= PAGE_CACHE_SIZE;
1844                         continue;
1845                 }
1846                 page_cache_release(page);
1847
1848                 target = kmap_atomic(page, KM_USER0);
1849
1850                 if (PAGE_CACHE_SIZE > bytes_read) {
1851                         memcpy(target, data, bytes_read);
1852                         /* zero the tail end of this partial page */
1853                         memset(target + bytes_read, 0,
1854                                PAGE_CACHE_SIZE - bytes_read);
1855                         bytes_read = 0;
1856                 } else {
1857                         memcpy(target, data, PAGE_CACHE_SIZE);
1858                         bytes_read -= PAGE_CACHE_SIZE;
1859                 }
1860                 kunmap_atomic(target, KM_USER0);
1861
1862                 flush_dcache_page(page);
1863                 SetPageUptodate(page);
1864                 unlock_page(page);
1865                 data += PAGE_CACHE_SIZE;
1866
1867                 /* add page to FS-Cache */
1868                 cifs_readpage_to_fscache(mapping->host, page);
1869         }
1870         return;
1871 }
1872
1873 static int cifs_readpages(struct file *file, struct address_space *mapping,
1874         struct list_head *page_list, unsigned num_pages)
1875 {
1876         int rc = -EACCES;
1877         int xid;
1878         loff_t offset;
1879         struct page *page;
1880         struct cifs_sb_info *cifs_sb;
1881         struct cifsTconInfo *pTcon;
1882         unsigned int bytes_read = 0;
1883         unsigned int read_size, i;
1884         char *smb_read_data = NULL;
1885         struct smb_com_read_rsp *pSMBr;
1886         struct cifsFileInfo *open_file;
1887         int buf_type = CIFS_NO_BUFFER;
1888
1889         xid = GetXid();
1890         if (file->private_data == NULL) {
1891                 rc = -EBADF;
1892                 FreeXid(xid);
1893                 return rc;
1894         }
1895         open_file = file->private_data;
1896         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1897         pTcon = tlink_tcon(open_file->tlink);
1898
1899         /*
1900          * Reads as many pages as possible from fscache. Returns -ENOBUFS
1901          * immediately if the cookie is negative
1902          */
1903         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
1904                                          &num_pages);
1905         if (rc == 0)
1906                 goto read_complete;
1907
1908         cFYI(DBG2, "rpages: num pages %d", num_pages);
1909         for (i = 0; i < num_pages; ) {
1910                 unsigned contig_pages;
1911                 struct page *tmp_page;
1912                 unsigned long expected_index;
1913
1914                 if (list_empty(page_list))
1915                         break;
1916
1917                 page = list_entry(page_list->prev, struct page, lru);
1918                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1919
1920                 /* count adjacent pages that we will read into */
1921                 contig_pages = 0;
1922                 expected_index =
1923                         list_entry(page_list->prev, struct page, lru)->index;
1924                 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1925                         if (tmp_page->index == expected_index) {
1926                                 contig_pages++;
1927                                 expected_index++;
1928                         } else
1929                                 break;
1930                 }
1931                 if (contig_pages + i >  num_pages)
1932                         contig_pages = num_pages - i;
1933
1934                 /* for reads over a certain size could initiate async
1935                    read ahead */
1936
1937                 read_size = contig_pages * PAGE_CACHE_SIZE;
1938                 /* Read size needs to be in multiples of one page */
1939                 read_size = min_t(const unsigned int, read_size,
1940                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1941                 cFYI(DBG2, "rpages: read size 0x%x  contiguous pages %d",
1942                                 read_size, contig_pages);
1943                 rc = -EAGAIN;
1944                 while (rc == -EAGAIN) {
1945                         if (open_file->invalidHandle) {
1946                                 rc = cifs_reopen_file(open_file, true);
1947                                 if (rc != 0)
1948                                         break;
1949                         }
1950
1951                         rc = CIFSSMBRead(xid, pTcon,
1952                                          open_file->netfid,
1953                                          read_size, offset,
1954                                          &bytes_read, &smb_read_data,
1955                                          &buf_type);
1956                         /* BB more RC checks ? */
1957                         if (rc == -EAGAIN) {
1958                                 if (smb_read_data) {
1959                                         if (buf_type == CIFS_SMALL_BUFFER)
1960                                                 cifs_small_buf_release(smb_read_data);
1961                                         else if (buf_type == CIFS_LARGE_BUFFER)
1962                                                 cifs_buf_release(smb_read_data);
1963                                         smb_read_data = NULL;
1964                                 }
1965                         }
1966                 }
1967                 if ((rc < 0) || (smb_read_data == NULL)) {
1968                         cFYI(1, "Read error in readpages: %d", rc);
1969                         break;
1970                 } else if (bytes_read > 0) {
1971                         task_io_account_read(bytes_read);
1972                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1973                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
1974                                 smb_read_data + 4 /* RFC1001 hdr */ +
1975                                 le16_to_cpu(pSMBr->DataOffset));
1976
1977                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
1978                         cifs_stats_bytes_read(pTcon, bytes_read);
1979                         if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1980                                 i++; /* account for partial page */
1981
1982                                 /* server copy of file can have smaller size
1983                                    than client */
1984                                 /* BB do we need to verify this common case ?
1985                                    this case is ok - if we are at server EOF
1986                                    we will hit it on next read */
1987
1988                                 /* break; */
1989                         }
1990                 } else {
1991                         cFYI(1, "No bytes read (%d) at offset %lld . "
1992                                 "Cleaning remaining pages from readahead list",
1993                                 bytes_read, offset);
1994                         /* BB turn off caching and do new lookup on
1995                            file size at server? */
1996                         break;
1997                 }
1998                 if (smb_read_data) {
1999                         if (buf_type == CIFS_SMALL_BUFFER)
2000                                 cifs_small_buf_release(smb_read_data);
2001                         else if (buf_type == CIFS_LARGE_BUFFER)
2002                                 cifs_buf_release(smb_read_data);
2003                         smb_read_data = NULL;
2004                 }
2005                 bytes_read = 0;
2006         }
2007
2008 /* need to free smb_read_data buf before exit */
2009         if (smb_read_data) {
2010                 if (buf_type == CIFS_SMALL_BUFFER)
2011                         cifs_small_buf_release(smb_read_data);
2012                 else if (buf_type == CIFS_LARGE_BUFFER)
2013                         cifs_buf_release(smb_read_data);
2014                 smb_read_data = NULL;
2015         }
2016
2017 read_complete:
2018         FreeXid(xid);
2019         return rc;
2020 }
2021
2022 static int cifs_readpage_worker(struct file *file, struct page *page,
2023         loff_t *poffset)
2024 {
2025         char *read_data;
2026         int rc;
2027
2028         /* Is the page cached? */
2029         rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2030         if (rc == 0)
2031                 goto read_complete;
2032
2033         page_cache_get(page);
2034         read_data = kmap(page);
2035         /* for reads over a certain size could initiate async read ahead */
2036
2037         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2038
2039         if (rc < 0)
2040                 goto io_error;
2041         else
2042                 cFYI(1, "Bytes read %d", rc);
2043
2044         file->f_path.dentry->d_inode->i_atime =
2045                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2046
2047         if (PAGE_CACHE_SIZE > rc)
2048                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2049
2050         flush_dcache_page(page);
2051         SetPageUptodate(page);
2052
2053         /* send this page to the cache */
2054         cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2055
2056         rc = 0;
2057
2058 io_error:
2059         kunmap(page);
2060         page_cache_release(page);
2061
2062 read_complete:
2063         return rc;
2064 }
2065
2066 static int cifs_readpage(struct file *file, struct page *page)
2067 {
2068         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2069         int rc = -EACCES;
2070         int xid;
2071
2072         xid = GetXid();
2073
2074         if (file->private_data == NULL) {
2075                 rc = -EBADF;
2076                 FreeXid(xid);
2077                 return rc;
2078         }
2079
2080         cFYI(1, "readpage %p at offset %d 0x%x\n",
2081                  page, (int)offset, (int)offset);
2082
2083         rc = cifs_readpage_worker(file, page, &offset);
2084
2085         unlock_page(page);
2086
2087         FreeXid(xid);
2088         return rc;
2089 }
2090
2091 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2092 {
2093         struct cifsFileInfo *open_file;
2094
2095         spin_lock(&cifs_file_list_lock);
2096         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2097                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2098                         spin_unlock(&cifs_file_list_lock);
2099                         return 1;
2100                 }
2101         }
2102         spin_unlock(&cifs_file_list_lock);
2103         return 0;
2104 }
2105
2106 /* We do not want to update the file size from server for inodes
2107    open for write - to avoid races with writepage extending
2108    the file - in the future we could consider allowing
2109    refreshing the inode only on increases in the file size
2110    but this is tricky to do without racing with writebehind
2111    page caching in the current Linux kernel design */
2112 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2113 {
2114         if (!cifsInode)
2115                 return true;
2116
2117         if (is_inode_writable(cifsInode)) {
2118                 /* This inode is open for write at least once */
2119                 struct cifs_sb_info *cifs_sb;
2120
2121                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2122                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2123                         /* since no page cache to corrupt on directio
2124                         we can change size safely */
2125                         return true;
2126                 }
2127
2128                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2129                         return true;
2130
2131                 return false;
2132         } else
2133                 return true;
2134 }
2135
2136 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2137                         loff_t pos, unsigned len, unsigned flags,
2138                         struct page **pagep, void **fsdata)
2139 {
2140         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2141         loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2142         loff_t page_start = pos & PAGE_MASK;
2143         loff_t i_size;
2144         struct page *page;
2145         int rc = 0;
2146
2147         cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2148
2149         page = grab_cache_page_write_begin(mapping, index, flags);
2150         if (!page) {
2151                 rc = -ENOMEM;
2152                 goto out;
2153         }
2154
2155         if (PageUptodate(page))
2156                 goto out;
2157
2158         /*
2159          * If we write a full page it will be up to date, no need to read from
2160          * the server. If the write is short, we'll end up doing a sync write
2161          * instead.
2162          */
2163         if (len == PAGE_CACHE_SIZE)
2164                 goto out;
2165
2166         /*
2167          * optimize away the read when we have an oplock, and we're not
2168          * expecting to use any of the data we'd be reading in. That
2169          * is, when the page lies beyond the EOF, or straddles the EOF
2170          * and the write will cover all of the existing data.
2171          */
2172         if (CIFS_I(mapping->host)->clientCanCacheRead) {
2173                 i_size = i_size_read(mapping->host);
2174                 if (page_start >= i_size ||
2175                     (offset == 0 && (pos + len) >= i_size)) {
2176                         zero_user_segments(page, 0, offset,
2177                                            offset + len,
2178                                            PAGE_CACHE_SIZE);
2179                         /*
2180                          * PageChecked means that the parts of the page
2181                          * to which we're not writing are considered up
2182                          * to date. Once the data is copied to the
2183                          * page, it can be set uptodate.
2184                          */
2185                         SetPageChecked(page);
2186                         goto out;
2187                 }
2188         }
2189
2190         if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2191                 /*
2192                  * might as well read a page, it is fast enough. If we get
2193                  * an error, we don't need to return it. cifs_write_end will
2194                  * do a sync write instead since PG_uptodate isn't set.
2195                  */
2196                 cifs_readpage_worker(file, page, &page_start);
2197         } else {
2198                 /* we could try using another file handle if there is one -
2199                    but how would we lock it to prevent close of that handle
2200                    racing with this read? In any case
2201                    this will be written out by write_end so is fine */
2202         }
2203 out:
2204         *pagep = page;
2205         return rc;
2206 }
2207
2208 static int cifs_release_page(struct page *page, gfp_t gfp)
2209 {
2210         if (PagePrivate(page))
2211                 return 0;
2212
2213         return cifs_fscache_release_page(page, gfp);
2214 }
2215
2216 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2217 {
2218         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2219
2220         if (offset == 0)
2221                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2222 }
2223
2224 void cifs_oplock_break(struct work_struct *work)
2225 {
2226         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2227                                                   oplock_break);
2228         struct inode *inode = cfile->dentry->d_inode;
2229         struct cifsInodeInfo *cinode = CIFS_I(inode);
2230         int rc = 0;
2231
2232         if (inode && S_ISREG(inode->i_mode)) {
2233                 if (cinode->clientCanCacheRead)
2234                         break_lease(inode, O_RDONLY);
2235                 else
2236                         break_lease(inode, O_WRONLY);
2237                 rc = filemap_fdatawrite(inode->i_mapping);
2238                 if (cinode->clientCanCacheRead == 0) {
2239                         rc = filemap_fdatawait(inode->i_mapping);
2240                         mapping_set_error(inode->i_mapping, rc);
2241                         invalidate_remote_inode(inode);
2242                 }
2243                 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2244         }
2245
2246         /*
2247          * releasing stale oplock after recent reconnect of smb session using
2248          * a now incorrect file handle is not a data integrity issue but do
2249          * not bother sending an oplock release if session to server still is
2250          * disconnected since oplock already released by the server
2251          */
2252         if (!cfile->oplock_break_cancelled) {
2253                 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2254                                  0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
2255                 cFYI(1, "Oplock release rc = %d", rc);
2256         }
2257
2258         /*
2259          * We might have kicked in before is_valid_oplock_break()
2260          * finished grabbing reference for us.  Make sure it's done by
2261          * waiting for cifs_file_list_lock.
2262          */
2263         spin_lock(&cifs_file_list_lock);
2264         spin_unlock(&cifs_file_list_lock);
2265
2266         cifs_oplock_break_put(cfile);
2267 }
2268
2269 /* must be called while holding cifs_file_list_lock */
2270 void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2271 {
2272         cifs_sb_active(cfile->dentry->d_sb);
2273         cifsFileInfo_get(cfile);
2274 }
2275
2276 void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2277 {
2278         cifsFileInfo_put(cfile);
2279         cifs_sb_deactive(cfile->dentry->d_sb);
2280 }
2281
2282 const struct address_space_operations cifs_addr_ops = {
2283         .readpage = cifs_readpage,
2284         .readpages = cifs_readpages,
2285         .writepage = cifs_writepage,
2286         .writepages = cifs_writepages,
2287         .write_begin = cifs_write_begin,
2288         .write_end = cifs_write_end,
2289         .set_page_dirty = __set_page_dirty_nobuffers,
2290         .releasepage = cifs_release_page,
2291         .invalidatepage = cifs_invalidate_page,
2292         /* .sync_page = cifs_sync_page, */
2293         /* .direct_IO = */
2294 };
2295
2296 /*
2297  * cifs_readpages requires the server to support a buffer large enough to
2298  * contain the header plus one complete page of data.  Otherwise, we need
2299  * to leave cifs_readpages out of the address space operations.
2300  */
2301 const struct address_space_operations cifs_addr_ops_smallbuf = {
2302         .readpage = cifs_readpage,
2303         .writepage = cifs_writepage,
2304         .writepages = cifs_writepages,
2305         .write_begin = cifs_write_begin,
2306         .write_end = cifs_write_end,
2307         .set_page_dirty = __set_page_dirty_nobuffers,
2308         .releasepage = cifs_release_page,
2309         .invalidatepage = cifs_invalidate_page,
2310         /* .sync_page = cifs_sync_page, */
2311         /* .direct_IO = */
2312 };