4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/xattr.h>
44 #define DECLARE_GLOBALS_HERE
46 #include "cifsproto.h"
47 #include "cifs_debug.h"
48 #include "cifs_fs_sb.h"
50 #include <linux/key-type.h>
51 #include "cifs_spnego.h"
53 #ifdef CONFIG_CIFS_SMB2
59 bool enable_oplocks = true;
60 bool linuxExtEnabled = true;
61 bool lookupCacheEnabled = true;
62 unsigned int global_secflags = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 static const struct super_operations cifs_super_ops;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, uint, 0444);
68 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
69 "Default: 16384 Range: 8192 to 130048");
70 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
71 module_param(cifs_min_rcv, uint, 0444);
72 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
74 unsigned int cifs_min_small = 30;
75 module_param(cifs_min_small, uint, 0444);
76 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
78 unsigned int cifs_max_pending = CIFS_MAX_REQ;
79 module_param(cifs_max_pending, uint, 0444);
80 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
81 "Default: 32767 Range: 2 to 32767.");
82 module_param(enable_oplocks, bool, 0644);
83 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
89 struct workqueue_struct *cifsiod_wq;
90 __u32 cifs_lock_secret;
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
99 cifs_sb_active(struct super_block *sb)
101 struct cifs_sb_info *server = CIFS_SB(sb);
103 if (atomic_inc_return(&server->active) == 1)
104 atomic_inc(&sb->s_active);
108 cifs_sb_deactive(struct super_block *sb)
110 struct cifs_sb_info *server = CIFS_SB(sb);
112 if (atomic_dec_and_test(&server->active))
113 deactivate_super(sb);
117 cifs_read_super(struct super_block *sb)
120 struct cifs_sb_info *cifs_sb;
121 struct cifs_tcon *tcon;
124 cifs_sb = CIFS_SB(sb);
125 tcon = cifs_sb_master_tcon(cifs_sb);
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL;
130 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE;
133 sb->s_maxbytes = MAX_NON_LFS;
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb->s_time_gran = 100;
138 sb->s_magic = CIFS_MAGIC_NUMBER;
139 sb->s_op = &cifs_super_ops;
140 sb->s_xattr = cifs_xattr_handlers;
141 sb->s_bdi = &cifs_sb->bdi;
142 sb->s_blocksize = CIFS_MAX_MSGSIZE;
143 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
144 inode = cifs_root_iget(sb);
152 sb->s_d_op = &cifs_ci_dentry_ops;
154 sb->s_d_op = &cifs_dentry_ops;
156 sb->s_root = d_make_root(inode);
162 #ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n");
165 sb->s_export_op = &cifs_export_ops;
167 #endif /* CONFIG_CIFS_NFSD_EXPORT */
172 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
176 static void cifs_kill_sb(struct super_block *sb)
178 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
180 cifs_umount(cifs_sb);
184 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
186 struct super_block *sb = dentry->d_sb;
187 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
188 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
189 struct TCP_Server_Info *server = tcon->ses->server;
196 * PATH_MAX may be too long - it would presumably be total path,
197 * but note that some servers (includinng Samba 3) have a shorter
200 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
202 buf->f_namelen = PATH_MAX;
203 buf->f_files = 0; /* undefined */
204 buf->f_ffree = 0; /* unlimited */
206 if (server->ops->queryfs)
207 rc = server->ops->queryfs(xid, tcon, buf);
213 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
215 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
216 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
217 struct TCP_Server_Info *server = tcon->ses->server;
219 if (server->ops->fallocate)
220 return server->ops->fallocate(file, tcon, mode, off, len);
225 static int cifs_permission(struct inode *inode, int mask)
227 struct cifs_sb_info *cifs_sb;
229 cifs_sb = CIFS_SB(inode->i_sb);
231 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
232 if ((mask & MAY_EXEC) && !execute_ok(inode))
236 } else /* file mode might have been restricted at mount time
237 on the client (above and beyond ACL on servers) for
238 servers which do not support setting and viewing mode bits,
239 so allowing client to check permissions is useful */
240 return generic_permission(inode, mask);
243 static struct kmem_cache *cifs_inode_cachep;
244 static struct kmem_cache *cifs_req_cachep;
245 static struct kmem_cache *cifs_mid_cachep;
246 static struct kmem_cache *cifs_sm_req_cachep;
247 mempool_t *cifs_sm_req_poolp;
248 mempool_t *cifs_req_poolp;
249 mempool_t *cifs_mid_poolp;
251 static struct inode *
252 cifs_alloc_inode(struct super_block *sb)
254 struct cifsInodeInfo *cifs_inode;
255 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
258 cifs_inode->cifsAttrs = 0x20; /* default */
259 cifs_inode->time = 0;
261 * Until the file is open and we have gotten oplock info back from the
262 * server, can not assume caching of file data or metadata.
264 cifs_set_oplock_level(cifs_inode, 0);
265 cifs_inode->flags = 0;
266 spin_lock_init(&cifs_inode->writers_lock);
267 cifs_inode->writers = 0;
268 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
269 cifs_inode->server_eof = 0;
270 cifs_inode->uniqueid = 0;
271 cifs_inode->createtime = 0;
272 cifs_inode->epoch = 0;
273 #ifdef CONFIG_CIFS_SMB2
274 generate_random_uuid(cifs_inode->lease_key);
277 * Can not set i_flags here - they get immediately overwritten to zero
280 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
281 INIT_LIST_HEAD(&cifs_inode->openFileList);
282 INIT_LIST_HEAD(&cifs_inode->llist);
283 return &cifs_inode->vfs_inode;
286 static void cifs_i_callback(struct rcu_head *head)
288 struct inode *inode = container_of(head, struct inode, i_rcu);
289 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
293 cifs_destroy_inode(struct inode *inode)
295 call_rcu(&inode->i_rcu, cifs_i_callback);
299 cifs_evict_inode(struct inode *inode)
301 truncate_inode_pages_final(&inode->i_data);
303 cifs_fscache_release_inode_cookie(inode);
307 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
309 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
310 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
312 seq_puts(s, ",addr=");
314 switch (server->dstaddr.ss_family) {
316 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
319 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
320 if (sa6->sin6_scope_id)
321 seq_printf(s, "%%%u", sa6->sin6_scope_id);
324 seq_puts(s, "(unknown)");
329 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
331 if (ses->sectype == Unspecified) {
332 if (ses->user_name == NULL)
333 seq_puts(s, ",sec=none");
337 seq_puts(s, ",sec=");
339 switch (ses->sectype) {
341 seq_puts(s, "lanman");
344 seq_puts(s, "ntlmv2");
353 seq_puts(s, "ntlmssp");
356 /* shouldn't ever happen */
357 seq_puts(s, "unknown");
366 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
368 seq_puts(s, ",cache=");
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
371 seq_puts(s, "strict");
372 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
375 seq_puts(s, "loose");
379 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
381 struct nls_table *def;
383 /* Display iocharset= option if it's not default charset */
384 def = load_nls_default();
386 seq_printf(s, ",iocharset=%s", cur->charset);
391 * cifs_show_options() is for displaying mount options in /proc/mounts.
392 * Not all settable options are displayed but most of the important
396 cifs_show_options(struct seq_file *s, struct dentry *root)
398 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
399 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
400 struct sockaddr *srcaddr;
401 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
403 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
404 cifs_show_security(s, tcon->ses);
405 cifs_show_cache_flavor(s, cifs_sb);
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
408 seq_puts(s, ",multiuser");
409 else if (tcon->ses->user_name)
410 seq_show_option(s, "username", tcon->ses->user_name);
412 if (tcon->ses->domainName)
413 seq_show_option(s, "domain", tcon->ses->domainName);
415 if (srcaddr->sa_family != AF_UNSPEC) {
416 struct sockaddr_in *saddr4;
417 struct sockaddr_in6 *saddr6;
418 saddr4 = (struct sockaddr_in *)srcaddr;
419 saddr6 = (struct sockaddr_in6 *)srcaddr;
420 if (srcaddr->sa_family == AF_INET6)
421 seq_printf(s, ",srcaddr=%pI6c",
423 else if (srcaddr->sa_family == AF_INET)
424 seq_printf(s, ",srcaddr=%pI4",
425 &saddr4->sin_addr.s_addr);
427 seq_printf(s, ",srcaddr=BAD-AF:%i",
428 (int)(srcaddr->sa_family));
431 seq_printf(s, ",uid=%u",
432 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
434 seq_puts(s, ",forceuid");
436 seq_puts(s, ",noforceuid");
438 seq_printf(s, ",gid=%u",
439 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
440 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
441 seq_puts(s, ",forcegid");
443 seq_puts(s, ",noforcegid");
445 cifs_show_address(s, tcon->ses->server);
448 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
449 cifs_sb->mnt_file_mode,
450 cifs_sb->mnt_dir_mode);
452 cifs_show_nls(s, cifs_sb->local_nls);
455 seq_puts(s, ",seal");
457 seq_puts(s, ",nocase");
459 seq_puts(s, ",hard");
460 if (tcon->use_persistent)
461 seq_puts(s, ",persistenthandles");
462 else if (tcon->use_resilient)
463 seq_puts(s, ",resilienthandles");
465 seq_puts(s, ",unix");
467 seq_puts(s, ",nounix");
468 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
469 seq_puts(s, ",posixpaths");
470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
471 seq_puts(s, ",setuids");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
473 seq_puts(s, ",idsfromsid");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
475 seq_puts(s, ",serverino");
476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
477 seq_puts(s, ",rwpidforward");
478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
479 seq_puts(s, ",forcemand");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
481 seq_puts(s, ",nouser_xattr");
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
483 seq_puts(s, ",mapchars");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
485 seq_puts(s, ",mapposix");
486 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
488 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
489 seq_puts(s, ",nobrl");
490 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
491 seq_puts(s, ",cifsacl");
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
493 seq_puts(s, ",dynperm");
494 if (root->d_sb->s_flags & MS_POSIXACL)
496 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
497 seq_puts(s, ",mfsymlinks");
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
501 seq_puts(s, ",nostrictsync");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
503 seq_puts(s, ",noperm");
504 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
505 seq_printf(s, ",backupuid=%u",
506 from_kuid_munged(&init_user_ns,
507 cifs_sb->mnt_backupuid));
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
509 seq_printf(s, ",backupgid=%u",
510 from_kgid_munged(&init_user_ns,
511 cifs_sb->mnt_backupgid));
513 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
514 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
515 seq_printf(s, ",echo_interval=%lu",
516 tcon->ses->server->echo_interval / HZ);
517 /* convert actimeo and display it in seconds */
518 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
523 static void cifs_umount_begin(struct super_block *sb)
525 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
526 struct cifs_tcon *tcon;
531 tcon = cifs_sb_master_tcon(cifs_sb);
533 spin_lock(&cifs_tcp_ses_lock);
534 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
535 /* we have other mounts to same share or we have
536 already tried to force umount this and woken up
537 all waiting network requests, nothing to do */
538 spin_unlock(&cifs_tcp_ses_lock);
540 } else if (tcon->tc_count == 1)
541 tcon->tidStatus = CifsExiting;
542 spin_unlock(&cifs_tcp_ses_lock);
544 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
545 /* cancel_notify_requests(tcon); */
546 if (tcon->ses && tcon->ses->server) {
547 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
548 wake_up_all(&tcon->ses->server->request_q);
549 wake_up_all(&tcon->ses->server->response_q);
550 msleep(1); /* yield */
551 /* we have to kick the requests once more */
552 wake_up_all(&tcon->ses->server->response_q);
559 #ifdef CONFIG_CIFS_STATS2
560 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
567 static int cifs_remount(struct super_block *sb, int *flags, char *data)
570 *flags |= MS_NODIRATIME;
574 static int cifs_drop_inode(struct inode *inode)
576 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
578 /* no serverino => unconditional eviction */
579 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
580 generic_drop_inode(inode);
583 static const struct super_operations cifs_super_ops = {
584 .statfs = cifs_statfs,
585 .alloc_inode = cifs_alloc_inode,
586 .destroy_inode = cifs_destroy_inode,
587 .drop_inode = cifs_drop_inode,
588 .evict_inode = cifs_evict_inode,
589 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
590 function unless later we add lazy close of inodes or unless the
591 kernel forgets to call us with the same number of releases (closes)
593 .show_options = cifs_show_options,
594 .umount_begin = cifs_umount_begin,
595 .remount_fs = cifs_remount,
596 #ifdef CONFIG_CIFS_STATS2
597 .show_stats = cifs_show_stats,
602 * Get root dentry from superblock according to prefix path mount option.
603 * Return dentry with refcount + 1 on success and NULL otherwise.
605 static struct dentry *
606 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
608 struct dentry *dentry;
609 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
610 char *full_path = NULL;
614 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
615 return dget(sb->s_root);
617 full_path = cifs_build_path_to_root(vol, cifs_sb,
618 cifs_sb_master_tcon(cifs_sb), 0);
619 if (full_path == NULL)
620 return ERR_PTR(-ENOMEM);
622 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
624 sep = CIFS_DIR_SEP(cifs_sb);
625 dentry = dget(sb->s_root);
629 struct inode *dir = d_inode(dentry);
630 struct dentry *child;
634 dentry = ERR_PTR(-ENOENT);
637 if (!S_ISDIR(dir->i_mode)) {
639 dentry = ERR_PTR(-ENOTDIR);
643 /* skip separators */
650 while (*s && *s != sep)
653 child = lookup_one_len_unlocked(p, dentry, s - p);
656 } while (!IS_ERR(dentry));
661 static int cifs_set_super(struct super_block *sb, void *data)
663 struct cifs_mnt_data *mnt_data = data;
664 sb->s_fs_info = mnt_data->cifs_sb;
665 return set_anon_super(sb, NULL);
668 static struct dentry *
669 cifs_do_mount(struct file_system_type *fs_type,
670 int flags, const char *dev_name, void *data)
673 struct super_block *sb;
674 struct cifs_sb_info *cifs_sb;
675 struct smb_vol *volume_info;
676 struct cifs_mnt_data mnt_data;
679 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
681 volume_info = cifs_get_volume_info((char *)data, dev_name);
682 if (IS_ERR(volume_info))
683 return ERR_CAST(volume_info);
685 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
686 if (cifs_sb == NULL) {
687 root = ERR_PTR(-ENOMEM);
691 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
692 if (cifs_sb->mountdata == NULL) {
693 root = ERR_PTR(-ENOMEM);
697 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
703 rc = cifs_mount(cifs_sb, volume_info);
705 if (!(flags & MS_SILENT))
706 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
712 mnt_data.vol = volume_info;
713 mnt_data.cifs_sb = cifs_sb;
714 mnt_data.flags = flags;
716 /* BB should we make this contingent on mount parm? */
717 flags |= MS_NODIRATIME | MS_NOATIME;
719 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
722 cifs_umount(cifs_sb);
727 cifs_dbg(FYI, "Use existing superblock\n");
728 cifs_umount(cifs_sb);
730 rc = cifs_read_super(sb);
736 sb->s_flags |= MS_ACTIVE;
739 root = cifs_get_root(volume_info, sb);
743 cifs_dbg(FYI, "dentry root is: %p\n", root);
747 deactivate_locked_super(sb);
749 cifs_cleanup_volume_info(volume_info);
753 kfree(cifs_sb->prepath);
754 kfree(cifs_sb->mountdata);
757 unload_nls(volume_info->local_nls);
762 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
765 struct inode *inode = file_inode(iocb->ki_filp);
767 if (iocb->ki_filp->f_flags & O_DIRECT)
768 return cifs_user_readv(iocb, iter);
770 rc = cifs_revalidate_mapping(inode);
774 return generic_file_read_iter(iocb, iter);
777 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
779 struct inode *inode = file_inode(iocb->ki_filp);
780 struct cifsInodeInfo *cinode = CIFS_I(inode);
784 if (iocb->ki_filp->f_flags & O_DIRECT) {
785 written = cifs_user_writev(iocb, from);
786 if (written > 0 && CIFS_CACHE_READ(cinode)) {
787 cifs_zap_mapping(inode);
789 "Set no oplock for inode=%p after a write operation\n",
796 written = cifs_get_writer(cinode);
800 written = generic_file_write_iter(iocb, from);
802 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
805 rc = filemap_fdatawrite(inode->i_mapping);
807 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
811 cifs_put_writer(cinode);
815 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
818 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
819 * the cached file length
821 if (whence != SEEK_SET && whence != SEEK_CUR) {
823 struct inode *inode = file_inode(file);
826 * We need to be sure that all dirty pages are written and the
827 * server has the newest file length.
829 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
830 inode->i_mapping->nrpages != 0) {
831 rc = filemap_fdatawait(inode->i_mapping);
833 mapping_set_error(inode->i_mapping, rc);
838 * Some applications poll for the file length in this strange
839 * way so we must seek to end on non-oplocked files by
840 * setting the revalidate time to zero.
842 CIFS_I(inode)->time = 0;
844 rc = cifs_revalidate_file_attr(file);
848 return generic_file_llseek(file, offset, whence);
852 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
855 * Note that this is called by vfs setlease with i_lock held to
856 * protect *lease from going away.
858 struct inode *inode = file_inode(file);
859 struct cifsFileInfo *cfile = file->private_data;
861 if (!(S_ISREG(inode->i_mode)))
864 /* Check if file is oplocked if this is request for new lease */
865 if (arg == F_UNLCK ||
866 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
867 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
868 return generic_setlease(file, arg, lease, priv);
869 else if (tlink_tcon(cfile->tlink)->local_lease &&
870 !CIFS_CACHE_READ(CIFS_I(inode)))
872 * If the server claims to support oplock on this file, then we
873 * still need to check oplock even if the local_lease mount
874 * option is set, but there are servers which do not support
875 * oplock for which this mount option may be useful if the user
876 * knows that the file won't be changed on the server by anyone
879 return generic_setlease(file, arg, lease, priv);
884 struct file_system_type cifs_fs_type = {
885 .owner = THIS_MODULE,
887 .mount = cifs_do_mount,
888 .kill_sb = cifs_kill_sb,
891 MODULE_ALIAS_FS("cifs");
892 const struct inode_operations cifs_dir_inode_ops = {
893 .create = cifs_create,
894 .atomic_open = cifs_atomic_open,
895 .lookup = cifs_lookup,
896 .getattr = cifs_getattr,
897 .unlink = cifs_unlink,
898 .link = cifs_hardlink,
901 .rename = cifs_rename2,
902 .permission = cifs_permission,
903 .setattr = cifs_setattr,
904 .symlink = cifs_symlink,
906 .listxattr = cifs_listxattr,
909 const struct inode_operations cifs_file_inode_ops = {
910 .setattr = cifs_setattr,
911 .getattr = cifs_getattr,
912 .permission = cifs_permission,
913 .listxattr = cifs_listxattr,
916 const struct inode_operations cifs_symlink_inode_ops = {
917 .get_link = cifs_get_link,
918 .permission = cifs_permission,
919 .listxattr = cifs_listxattr,
922 static int cifs_clone_file_range(struct file *src_file, loff_t off,
923 struct file *dst_file, loff_t destoff, u64 len)
925 struct inode *src_inode = file_inode(src_file);
926 struct inode *target_inode = file_inode(dst_file);
927 struct cifsFileInfo *smb_file_src = src_file->private_data;
928 struct cifsFileInfo *smb_file_target = dst_file->private_data;
929 struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
933 cifs_dbg(FYI, "clone range\n");
937 if (!src_file->private_data || !dst_file->private_data) {
939 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
944 * Note: cifs case is easier than btrfs since server responsible for
945 * checks for proper open modes and file type and if it wants
946 * server could even support copy of range where source = target
948 lock_two_nondirectories(target_inode, src_inode);
951 len = src_inode->i_size - off;
953 cifs_dbg(FYI, "about to flush pages\n");
954 /* should we flush first and last page first */
955 truncate_inode_pages_range(&target_inode->i_data, destoff,
956 PAGE_ALIGN(destoff + len)-1);
958 if (target_tcon->ses->server->ops->duplicate_extents)
959 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
960 smb_file_src, smb_file_target, off, len, destoff);
964 /* force revalidate of size and timestamps of target file now
965 that target is updated on the server */
966 CIFS_I(target_inode)->time = 0;
967 /* although unlocking in the reverse order from locking is not
968 strictly necessary here it is a little cleaner to be consistent */
969 unlock_two_nondirectories(src_inode, target_inode);
975 ssize_t cifs_file_copychunk_range(unsigned int xid,
976 struct file *src_file, loff_t off,
977 struct file *dst_file, loff_t destoff,
978 size_t len, unsigned int flags)
980 struct inode *src_inode = file_inode(src_file);
981 struct inode *target_inode = file_inode(dst_file);
982 struct cifsFileInfo *smb_file_src;
983 struct cifsFileInfo *smb_file_target;
984 struct cifs_tcon *src_tcon;
985 struct cifs_tcon *target_tcon;
988 cifs_dbg(FYI, "copychunk range\n");
990 if (src_inode == target_inode) {
995 if (!src_file->private_data || !dst_file->private_data) {
997 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1002 smb_file_target = dst_file->private_data;
1003 smb_file_src = src_file->private_data;
1004 src_tcon = tlink_tcon(smb_file_src->tlink);
1005 target_tcon = tlink_tcon(smb_file_target->tlink);
1007 if (src_tcon->ses != target_tcon->ses) {
1008 cifs_dbg(VFS, "source and target of copy not on same server\n");
1013 * Note: cifs case is easier than btrfs since server responsible for
1014 * checks for proper open modes and file type and if it wants
1015 * server could even support copy of range where source = target
1017 lock_two_nondirectories(target_inode, src_inode);
1019 cifs_dbg(FYI, "about to flush pages\n");
1020 /* should we flush first and last page first */
1021 truncate_inode_pages(&target_inode->i_data, 0);
1023 if (target_tcon->ses->server->ops->copychunk_range)
1024 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1025 smb_file_src, smb_file_target, off, len, destoff);
1029 /* force revalidate of size and timestamps of target file now
1030 * that target is updated on the server
1032 CIFS_I(target_inode)->time = 0;
1033 /* although unlocking in the reverse order from locking is not
1034 * strictly necessary here it is a little cleaner to be consistent
1036 unlock_two_nondirectories(src_inode, target_inode);
1042 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1043 struct file *dst_file, loff_t destoff,
1044 size_t len, unsigned int flags)
1046 unsigned int xid = get_xid();
1049 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1055 const struct file_operations cifs_file_ops = {
1056 .read_iter = cifs_loose_read_iter,
1057 .write_iter = cifs_file_write_iter,
1059 .release = cifs_close,
1061 .fsync = cifs_fsync,
1062 .flush = cifs_flush,
1063 .mmap = cifs_file_mmap,
1064 .splice_read = generic_file_splice_read,
1065 .llseek = cifs_llseek,
1066 .unlocked_ioctl = cifs_ioctl,
1067 .copy_file_range = cifs_copy_file_range,
1068 .clone_file_range = cifs_clone_file_range,
1069 .setlease = cifs_setlease,
1070 .fallocate = cifs_fallocate,
1073 const struct file_operations cifs_file_strict_ops = {
1074 .read_iter = cifs_strict_readv,
1075 .write_iter = cifs_strict_writev,
1077 .release = cifs_close,
1079 .fsync = cifs_strict_fsync,
1080 .flush = cifs_flush,
1081 .mmap = cifs_file_strict_mmap,
1082 .splice_read = generic_file_splice_read,
1083 .llseek = cifs_llseek,
1084 .unlocked_ioctl = cifs_ioctl,
1085 .copy_file_range = cifs_copy_file_range,
1086 .clone_file_range = cifs_clone_file_range,
1087 .setlease = cifs_setlease,
1088 .fallocate = cifs_fallocate,
1091 const struct file_operations cifs_file_direct_ops = {
1092 /* BB reevaluate whether they can be done with directio, no cache */
1093 .read_iter = cifs_user_readv,
1094 .write_iter = cifs_user_writev,
1096 .release = cifs_close,
1098 .fsync = cifs_fsync,
1099 .flush = cifs_flush,
1100 .mmap = cifs_file_mmap,
1101 .splice_read = generic_file_splice_read,
1102 .unlocked_ioctl = cifs_ioctl,
1103 .copy_file_range = cifs_copy_file_range,
1104 .clone_file_range = cifs_clone_file_range,
1105 .llseek = cifs_llseek,
1106 .setlease = cifs_setlease,
1107 .fallocate = cifs_fallocate,
1110 const struct file_operations cifs_file_nobrl_ops = {
1111 .read_iter = cifs_loose_read_iter,
1112 .write_iter = cifs_file_write_iter,
1114 .release = cifs_close,
1115 .fsync = cifs_fsync,
1116 .flush = cifs_flush,
1117 .mmap = cifs_file_mmap,
1118 .splice_read = generic_file_splice_read,
1119 .llseek = cifs_llseek,
1120 .unlocked_ioctl = cifs_ioctl,
1121 .copy_file_range = cifs_copy_file_range,
1122 .clone_file_range = cifs_clone_file_range,
1123 .setlease = cifs_setlease,
1124 .fallocate = cifs_fallocate,
1127 const struct file_operations cifs_file_strict_nobrl_ops = {
1128 .read_iter = cifs_strict_readv,
1129 .write_iter = cifs_strict_writev,
1131 .release = cifs_close,
1132 .fsync = cifs_strict_fsync,
1133 .flush = cifs_flush,
1134 .mmap = cifs_file_strict_mmap,
1135 .splice_read = generic_file_splice_read,
1136 .llseek = cifs_llseek,
1137 .unlocked_ioctl = cifs_ioctl,
1138 .copy_file_range = cifs_copy_file_range,
1139 .clone_file_range = cifs_clone_file_range,
1140 .setlease = cifs_setlease,
1141 .fallocate = cifs_fallocate,
1144 const struct file_operations cifs_file_direct_nobrl_ops = {
1145 /* BB reevaluate whether they can be done with directio, no cache */
1146 .read_iter = cifs_user_readv,
1147 .write_iter = cifs_user_writev,
1149 .release = cifs_close,
1150 .fsync = cifs_fsync,
1151 .flush = cifs_flush,
1152 .mmap = cifs_file_mmap,
1153 .splice_read = generic_file_splice_read,
1154 .unlocked_ioctl = cifs_ioctl,
1155 .copy_file_range = cifs_copy_file_range,
1156 .clone_file_range = cifs_clone_file_range,
1157 .llseek = cifs_llseek,
1158 .setlease = cifs_setlease,
1159 .fallocate = cifs_fallocate,
1162 const struct file_operations cifs_dir_ops = {
1163 .iterate_shared = cifs_readdir,
1164 .release = cifs_closedir,
1165 .read = generic_read_dir,
1166 .unlocked_ioctl = cifs_ioctl,
1167 .copy_file_range = cifs_copy_file_range,
1168 .clone_file_range = cifs_clone_file_range,
1169 .llseek = generic_file_llseek,
1173 cifs_init_once(void *inode)
1175 struct cifsInodeInfo *cifsi = inode;
1177 inode_init_once(&cifsi->vfs_inode);
1178 init_rwsem(&cifsi->lock_sem);
1182 cifs_init_inodecache(void)
1184 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1185 sizeof(struct cifsInodeInfo),
1186 0, (SLAB_RECLAIM_ACCOUNT|
1187 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1189 if (cifs_inode_cachep == NULL)
1196 cifs_destroy_inodecache(void)
1199 * Make sure all delayed rcu free inodes are flushed before we
1203 kmem_cache_destroy(cifs_inode_cachep);
1207 cifs_init_request_bufs(void)
1209 size_t max_hdr_size = MAX_CIFS_HDR_SIZE;
1210 #ifdef CONFIG_CIFS_SMB2
1212 * SMB2 maximum header size is bigger than CIFS one - no problems to
1213 * allocate some more bytes for CIFS.
1215 max_hdr_size = MAX_SMB2_HDR_SIZE;
1217 if (CIFSMaxBufSize < 8192) {
1218 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1219 Unicode path name has to fit in any SMB/CIFS path based frames */
1220 CIFSMaxBufSize = 8192;
1221 } else if (CIFSMaxBufSize > 1024*127) {
1222 CIFSMaxBufSize = 1024 * 127;
1224 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1227 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1228 CIFSMaxBufSize, CIFSMaxBufSize);
1230 cifs_req_cachep = kmem_cache_create("cifs_request",
1231 CIFSMaxBufSize + max_hdr_size, 0,
1232 SLAB_HWCACHE_ALIGN, NULL);
1233 if (cifs_req_cachep == NULL)
1236 if (cifs_min_rcv < 1)
1238 else if (cifs_min_rcv > 64) {
1240 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1243 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1246 if (cifs_req_poolp == NULL) {
1247 kmem_cache_destroy(cifs_req_cachep);
1250 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1251 almost all handle based requests (but not write response, nor is it
1252 sufficient for path based requests). A smaller size would have
1253 been more efficient (compacting multiple slab items on one 4k page)
1254 for the case in which debug was on, but this larger size allows
1255 more SMBs to use small buffer alloc and is still much more
1256 efficient to alloc 1 per page off the slab compared to 17K (5page)
1257 alloc of large cifs buffers even when page debugging is on */
1258 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
1259 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1261 if (cifs_sm_req_cachep == NULL) {
1262 mempool_destroy(cifs_req_poolp);
1263 kmem_cache_destroy(cifs_req_cachep);
1267 if (cifs_min_small < 2)
1269 else if (cifs_min_small > 256) {
1270 cifs_min_small = 256;
1271 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1274 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1275 cifs_sm_req_cachep);
1277 if (cifs_sm_req_poolp == NULL) {
1278 mempool_destroy(cifs_req_poolp);
1279 kmem_cache_destroy(cifs_req_cachep);
1280 kmem_cache_destroy(cifs_sm_req_cachep);
1288 cifs_destroy_request_bufs(void)
1290 mempool_destroy(cifs_req_poolp);
1291 kmem_cache_destroy(cifs_req_cachep);
1292 mempool_destroy(cifs_sm_req_poolp);
1293 kmem_cache_destroy(cifs_sm_req_cachep);
1297 cifs_init_mids(void)
1299 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1300 sizeof(struct mid_q_entry), 0,
1301 SLAB_HWCACHE_ALIGN, NULL);
1302 if (cifs_mid_cachep == NULL)
1305 /* 3 is a reasonable minimum number of simultaneous operations */
1306 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1307 if (cifs_mid_poolp == NULL) {
1308 kmem_cache_destroy(cifs_mid_cachep);
1316 cifs_destroy_mids(void)
1318 mempool_destroy(cifs_mid_poolp);
1319 kmem_cache_destroy(cifs_mid_cachep);
1327 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1328 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1329 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1330 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1331 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1333 * Initialize Global counters
1335 atomic_set(&sesInfoAllocCount, 0);
1336 atomic_set(&tconInfoAllocCount, 0);
1337 atomic_set(&tcpSesAllocCount, 0);
1338 atomic_set(&tcpSesReconnectCount, 0);
1339 atomic_set(&tconInfoReconnectCount, 0);
1341 atomic_set(&bufAllocCount, 0);
1342 atomic_set(&smBufAllocCount, 0);
1343 #ifdef CONFIG_CIFS_STATS2
1344 atomic_set(&totBufAllocCount, 0);
1345 atomic_set(&totSmBufAllocCount, 0);
1346 #endif /* CONFIG_CIFS_STATS2 */
1348 atomic_set(&midCount, 0);
1349 GlobalCurrentXid = 0;
1350 GlobalTotalActiveXid = 0;
1351 GlobalMaxActiveXid = 0;
1352 spin_lock_init(&cifs_tcp_ses_lock);
1353 spin_lock_init(&GlobalMid_Lock);
1355 get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
1357 if (cifs_max_pending < 2) {
1358 cifs_max_pending = 2;
1359 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1360 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1361 cifs_max_pending = CIFS_MAX_REQ;
1362 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1366 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1369 goto out_clean_proc;
1372 rc = cifs_fscache_register();
1374 goto out_destroy_wq;
1376 rc = cifs_init_inodecache();
1378 goto out_unreg_fscache;
1380 rc = cifs_init_mids();
1382 goto out_destroy_inodecache;
1384 rc = cifs_init_request_bufs();
1386 goto out_destroy_mids;
1388 #ifdef CONFIG_CIFS_UPCALL
1389 rc = init_cifs_spnego();
1391 goto out_destroy_request_bufs;
1392 #endif /* CONFIG_CIFS_UPCALL */
1394 #ifdef CONFIG_CIFS_ACL
1395 rc = init_cifs_idmap();
1397 goto out_register_key_type;
1398 #endif /* CONFIG_CIFS_ACL */
1400 rc = register_filesystem(&cifs_fs_type);
1402 goto out_init_cifs_idmap;
1406 out_init_cifs_idmap:
1407 #ifdef CONFIG_CIFS_ACL
1409 out_register_key_type:
1411 #ifdef CONFIG_CIFS_UPCALL
1413 out_destroy_request_bufs:
1415 cifs_destroy_request_bufs();
1417 cifs_destroy_mids();
1418 out_destroy_inodecache:
1419 cifs_destroy_inodecache();
1421 cifs_fscache_unregister();
1423 destroy_workqueue(cifsiod_wq);
1432 cifs_dbg(NOISY, "exit_cifs\n");
1433 unregister_filesystem(&cifs_fs_type);
1434 cifs_dfs_release_automount_timer();
1435 #ifdef CONFIG_CIFS_ACL
1438 #ifdef CONFIG_CIFS_UPCALL
1439 unregister_key_type(&cifs_spnego_key_type);
1441 cifs_destroy_request_bufs();
1442 cifs_destroy_mids();
1443 cifs_destroy_inodecache();
1444 cifs_fscache_unregister();
1445 destroy_workqueue(cifsiod_wq);
1449 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1450 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1452 ("VFS to access servers complying with the SNIA CIFS Specification "
1453 "e.g. Samba and Windows");
1454 MODULE_VERSION(CIFS_VERSION);
1455 MODULE_SOFTDEP("pre: arc4");
1456 MODULE_SOFTDEP("pre: des");
1457 MODULE_SOFTDEP("pre: ecb");
1458 MODULE_SOFTDEP("pre: hmac");
1459 MODULE_SOFTDEP("pre: md4");
1460 MODULE_SOFTDEP("pre: md5");
1461 MODULE_SOFTDEP("pre: nls");
1462 #ifdef CONFIG_CIFS_SMB2
1463 MODULE_SOFTDEP("pre: aes");
1464 MODULE_SOFTDEP("pre: cmac");
1465 MODULE_SOFTDEP("pre: sha256");
1466 MODULE_SOFTDEP("pre: aead2");
1467 MODULE_SOFTDEP("pre: ccm");
1468 #endif /* CONFIG_CIFS_SMB2 */
1469 module_init(init_cifs)
1470 module_exit(exit_cifs)