4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
92 extern struct kmem_cache *cifs_oplock_cachep;
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
99 struct cifs_sb_info *cifs_sb;
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
130 rc = cifs_mount(sb, cifs_sb, data, devname);
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
157 sb->s_root = d_alloc_root(inode);
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
169 #endif /* EXPERIMENTAL */
174 cERROR(1, ("cifs_read_super: get root inode failed"));
178 cifs_umount(sb, cifs_sb);
182 #ifdef CONFIG_CIFS_DFS_UPCALL
183 if (cifs_sb->mountdata) {
184 kfree(cifs_sb->mountdata);
185 cifs_sb->mountdata = NULL;
188 if (cifs_sb->local_nls)
189 unload_nls(cifs_sb->local_nls);
196 cifs_put_super(struct super_block *sb)
199 struct cifs_sb_info *cifs_sb;
201 cFYI(1, ("In cifs_put_super"));
202 cifs_sb = CIFS_SB(sb);
203 if (cifs_sb == NULL) {
204 cFYI(1, ("Empty cifs superblock info passed to unmount"));
207 rc = cifs_umount(sb, cifs_sb);
209 cERROR(1, ("cifs_umount failed with return code %d", rc));
210 #ifdef CONFIG_CIFS_DFS_UPCALL
211 if (cifs_sb->mountdata) {
212 kfree(cifs_sb->mountdata);
213 cifs_sb->mountdata = NULL;
217 unload_nls(cifs_sb->local_nls);
223 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
225 struct super_block *sb = dentry->d_sb;
226 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
227 struct cifsTconInfo *tcon = cifs_sb->tcon;
228 int rc = -EOPNOTSUPP;
233 buf->f_type = CIFS_MAGIC_NUMBER;
236 * PATH_MAX may be too long - it would presumably be total path,
237 * but note that some servers (includinng Samba 3) have a shorter
240 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
242 buf->f_namelen = PATH_MAX;
243 buf->f_files = 0; /* undefined */
244 buf->f_ffree = 0; /* unlimited */
247 * We could add a second check for a QFS Unix capability bit
249 if ((tcon->ses->capabilities & CAP_UNIX) &&
250 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
251 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
254 * Only need to call the old QFSInfo if failed on newer one,
257 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
258 rc = CIFSSMBQFSInfo(xid, tcon, buf);
261 * Some old Windows servers also do not support level 103, retry with
262 * older level one if old server failed the previous call or we
263 * bypassed it because we detected that this was an older LANMAN sess
266 rc = SMBOldQFSInfo(xid, tcon, buf);
272 static int cifs_permission(struct inode *inode, int mask)
274 struct cifs_sb_info *cifs_sb;
276 cifs_sb = CIFS_SB(inode->i_sb);
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
279 if ((mask & MAY_EXEC) && !execute_ok(inode))
283 } else /* file mode might have been restricted at mount time
284 on the client (above and beyond ACL on servers) for
285 servers which do not support setting and viewing mode bits,
286 so allowing client to check permissions is useful */
287 return generic_permission(inode, mask, NULL);
290 static struct kmem_cache *cifs_inode_cachep;
291 static struct kmem_cache *cifs_req_cachep;
292 static struct kmem_cache *cifs_mid_cachep;
293 struct kmem_cache *cifs_oplock_cachep;
294 static struct kmem_cache *cifs_sm_req_cachep;
295 mempool_t *cifs_sm_req_poolp;
296 mempool_t *cifs_req_poolp;
297 mempool_t *cifs_mid_poolp;
299 static struct inode *
300 cifs_alloc_inode(struct super_block *sb)
302 struct cifsInodeInfo *cifs_inode;
303 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
306 cifs_inode->cifsAttrs = 0x20; /* default */
307 atomic_set(&cifs_inode->inUse, 0);
308 cifs_inode->time = 0;
309 cifs_inode->write_behind_rc = 0;
310 /* Until the file is open and we have gotten oplock
311 info back from the server, can not assume caching of
312 file data or metadata */
313 cifs_inode->clientCanCacheRead = false;
314 cifs_inode->clientCanCacheAll = false;
315 cifs_inode->delete_pending = false;
316 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
318 /* Can not set i_flags here - they get immediately overwritten
319 to zero by the VFS */
320 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
321 INIT_LIST_HEAD(&cifs_inode->openFileList);
322 return &cifs_inode->vfs_inode;
326 cifs_destroy_inode(struct inode *inode)
328 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
332 * cifs_show_options() is for displaying mount options in /proc/mounts.
333 * Not all settable options are displayed but most of the important
337 cifs_show_options(struct seq_file *s, struct vfsmount *m)
339 struct cifs_sb_info *cifs_sb;
340 struct cifsTconInfo *tcon;
341 struct TCP_Server_Info *server;
343 cifs_sb = CIFS_SB(m->mnt_sb);
346 tcon = cifs_sb->tcon;
348 /* BB add prepath to mount options displayed */
349 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
351 if (tcon->ses->userName)
352 seq_printf(s, ",username=%s",
353 tcon->ses->userName);
354 if (tcon->ses->domainName)
355 seq_printf(s, ",domain=%s",
356 tcon->ses->domainName);
357 server = tcon->ses->server;
359 seq_printf(s, ",addr=");
360 switch (server->addr.sockAddr6.
363 seq_printf(s, NIP6_FMT,
364 NIP6(server->addr.sockAddr6.sin6_addr));
367 seq_printf(s, NIPQUAD_FMT,
368 NIPQUAD(server->addr.sockAddr.sin_addr.s_addr));
373 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
375 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
376 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
378 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
379 if (!tcon->unix_ext) {
380 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
381 cifs_sb->mnt_file_mode,
382 cifs_sb->mnt_dir_mode);
385 seq_printf(s, ",seal");
387 seq_printf(s, ",nocase");
389 seq_printf(s, ",hard");
391 if (cifs_sb->prepath)
392 seq_printf(s, ",prepath=%s", cifs_sb->prepath);
393 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
394 seq_printf(s, ",posixpaths");
395 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
396 seq_printf(s, ",setuids");
397 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
398 seq_printf(s, ",serverino");
399 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
400 seq_printf(s, ",directio");
401 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
402 seq_printf(s, ",nouser_xattr");
403 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
404 seq_printf(s, ",mapchars");
405 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
406 seq_printf(s, ",sfu");
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
408 seq_printf(s, ",nobrl");
409 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
410 seq_printf(s, ",cifsacl");
411 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
412 seq_printf(s, ",dynperm");
413 if (m->mnt_sb->s_flags & MS_POSIXACL)
414 seq_printf(s, ",acl");
416 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
417 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
422 #ifdef CONFIG_CIFS_QUOTA
423 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
424 struct fs_disk_quota *pdquota)
428 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
429 struct cifsTconInfo *pTcon;
432 pTcon = cifs_sb->tcon;
439 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
448 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
449 struct fs_disk_quota *pdquota)
453 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
454 struct cifsTconInfo *pTcon;
457 pTcon = cifs_sb->tcon;
463 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
472 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
476 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
477 struct cifsTconInfo *pTcon;
480 pTcon = cifs_sb->tcon;
486 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
495 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
499 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
500 struct cifsTconInfo *pTcon;
503 pTcon = cifs_sb->tcon;
509 cFYI(1, ("pqstats %p", qstats));
518 static struct quotactl_ops cifs_quotactl_ops = {
519 .set_xquota = cifs_xquota_set,
520 .get_xquota = cifs_xquota_get,
521 .set_xstate = cifs_xstate_set,
522 .get_xstate = cifs_xstate_get,
526 static void cifs_umount_begin(struct super_block *sb)
528 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
529 struct cifsTconInfo *tcon;
534 tcon = cifs_sb->tcon;
538 read_lock(&cifs_tcp_ses_lock);
539 if (tcon->tc_count == 1)
540 tcon->tidStatus = CifsExiting;
541 read_unlock(&cifs_tcp_ses_lock);
543 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
544 /* cancel_notify_requests(tcon); */
545 if (tcon->ses && tcon->ses->server) {
546 cFYI(1, ("wake up tasks now - umount begin not complete"));
547 wake_up_all(&tcon->ses->server->request_q);
548 wake_up_all(&tcon->ses->server->response_q);
549 msleep(1); /* yield */
550 /* we have to kick the requests once more */
551 wake_up_all(&tcon->ses->server->response_q);
554 /* BB FIXME - finish add checks for tidStatus BB */
559 #ifdef CONFIG_CIFS_STATS2
560 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
567 static int cifs_remount(struct super_block *sb, int *flags, char *data)
569 *flags |= MS_NODIRATIME;
573 static const struct super_operations cifs_super_ops = {
574 .put_super = cifs_put_super,
575 .statfs = cifs_statfs,
576 .alloc_inode = cifs_alloc_inode,
577 .destroy_inode = cifs_destroy_inode,
578 /* .drop_inode = generic_delete_inode,
579 .delete_inode = cifs_delete_inode, */ /* Do not need above two
580 functions unless later we add lazy close of inodes or unless the
581 kernel forgets to call us with the same number of releases (closes)
583 .show_options = cifs_show_options,
584 .umount_begin = cifs_umount_begin,
585 .remount_fs = cifs_remount,
586 #ifdef CONFIG_CIFS_STATS2
587 .show_stats = cifs_show_stats,
592 cifs_get_sb(struct file_system_type *fs_type,
593 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
596 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
598 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
605 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
607 up_write(&sb->s_umount);
608 deactivate_super(sb);
611 sb->s_flags |= MS_ACTIVE;
612 return simple_set_mnt(mnt, sb);
615 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
616 unsigned long nr_segs, loff_t pos)
618 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
621 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
622 if (!CIFS_I(inode)->clientCanCacheAll)
623 filemap_fdatawrite(inode->i_mapping);
627 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
629 /* origin == SEEK_END => we must revalidate the cached file length */
630 if (origin == SEEK_END) {
633 /* some applications poll for the file length in this strange
634 way so we must seek to end on non-oplocked files by
635 setting the revalidate time to zero */
636 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
638 retval = cifs_revalidate(file->f_path.dentry);
640 return (loff_t)retval;
642 return generic_file_llseek_unlocked(file, offset, origin);
645 #ifdef CONFIG_CIFS_EXPERIMENTAL
646 static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
648 /* note that this is called by vfs setlease with the BKL held
649 although I doubt that BKL is needed here in cifs */
650 struct inode *inode = file->f_path.dentry->d_inode;
652 if (!(S_ISREG(inode->i_mode)))
655 /* check if file is oplocked */
656 if (((arg == F_RDLCK) &&
657 (CIFS_I(inode)->clientCanCacheRead)) ||
659 (CIFS_I(inode)->clientCanCacheAll)))
660 return generic_setlease(file, arg, lease);
661 else if (CIFS_SB(inode->i_sb)->tcon->local_lease &&
662 !CIFS_I(inode)->clientCanCacheRead)
663 /* If the server claims to support oplock on this
664 file, then we still need to check oplock even
665 if the local_lease mount option is set, but there
666 are servers which do not support oplock for which
667 this mount option may be useful if the user
668 knows that the file won't be changed on the server
670 return generic_setlease(file, arg, lease);
676 struct file_system_type cifs_fs_type = {
677 .owner = THIS_MODULE,
679 .get_sb = cifs_get_sb,
680 .kill_sb = kill_anon_super,
683 const struct inode_operations cifs_dir_inode_ops = {
684 .create = cifs_create,
685 .lookup = cifs_lookup,
686 .getattr = cifs_getattr,
687 .unlink = cifs_unlink,
688 .link = cifs_hardlink,
691 .rename = cifs_rename,
692 .permission = cifs_permission,
693 /* revalidate:cifs_revalidate, */
694 .setattr = cifs_setattr,
695 .symlink = cifs_symlink,
697 #ifdef CONFIG_CIFS_XATTR
698 .setxattr = cifs_setxattr,
699 .getxattr = cifs_getxattr,
700 .listxattr = cifs_listxattr,
701 .removexattr = cifs_removexattr,
705 const struct inode_operations cifs_file_inode_ops = {
706 /* revalidate:cifs_revalidate, */
707 .setattr = cifs_setattr,
708 .getattr = cifs_getattr, /* do we need this anymore? */
709 .rename = cifs_rename,
710 .permission = cifs_permission,
711 #ifdef CONFIG_CIFS_XATTR
712 .setxattr = cifs_setxattr,
713 .getxattr = cifs_getxattr,
714 .listxattr = cifs_listxattr,
715 .removexattr = cifs_removexattr,
719 const struct inode_operations cifs_symlink_inode_ops = {
720 .readlink = generic_readlink,
721 .follow_link = cifs_follow_link,
722 .put_link = cifs_put_link,
723 .permission = cifs_permission,
724 /* BB add the following two eventually */
725 /* revalidate: cifs_revalidate,
726 setattr: cifs_notify_change, *//* BB do we need notify change */
727 #ifdef CONFIG_CIFS_XATTR
728 .setxattr = cifs_setxattr,
729 .getxattr = cifs_getxattr,
730 .listxattr = cifs_listxattr,
731 .removexattr = cifs_removexattr,
735 const struct file_operations cifs_file_ops = {
736 .read = do_sync_read,
737 .write = do_sync_write,
738 .aio_read = generic_file_aio_read,
739 .aio_write = cifs_file_aio_write,
741 .release = cifs_close,
745 .mmap = cifs_file_mmap,
746 .splice_read = generic_file_splice_read,
747 .llseek = cifs_llseek,
748 #ifdef CONFIG_CIFS_POSIX
749 .unlocked_ioctl = cifs_ioctl,
750 #endif /* CONFIG_CIFS_POSIX */
752 #ifdef CONFIG_CIFS_EXPERIMENTAL
753 .dir_notify = cifs_dir_notify,
754 .setlease = cifs_setlease,
755 #endif /* CONFIG_CIFS_EXPERIMENTAL */
758 const struct file_operations cifs_file_direct_ops = {
759 /* no mmap, no aio, no readv -
760 BB reevaluate whether they can be done with directio, no cache */
761 .read = cifs_user_read,
762 .write = cifs_user_write,
764 .release = cifs_close,
768 .splice_read = generic_file_splice_read,
769 #ifdef CONFIG_CIFS_POSIX
770 .unlocked_ioctl = cifs_ioctl,
771 #endif /* CONFIG_CIFS_POSIX */
772 .llseek = cifs_llseek,
773 #ifdef CONFIG_CIFS_EXPERIMENTAL
774 .dir_notify = cifs_dir_notify,
775 .setlease = cifs_setlease,
776 #endif /* CONFIG_CIFS_EXPERIMENTAL */
778 const struct file_operations cifs_file_nobrl_ops = {
779 .read = do_sync_read,
780 .write = do_sync_write,
781 .aio_read = generic_file_aio_read,
782 .aio_write = cifs_file_aio_write,
784 .release = cifs_close,
787 .mmap = cifs_file_mmap,
788 .splice_read = generic_file_splice_read,
789 .llseek = cifs_llseek,
790 #ifdef CONFIG_CIFS_POSIX
791 .unlocked_ioctl = cifs_ioctl,
792 #endif /* CONFIG_CIFS_POSIX */
794 #ifdef CONFIG_CIFS_EXPERIMENTAL
795 .dir_notify = cifs_dir_notify,
796 .setlease = cifs_setlease,
797 #endif /* CONFIG_CIFS_EXPERIMENTAL */
800 const struct file_operations cifs_file_direct_nobrl_ops = {
801 /* no mmap, no aio, no readv -
802 BB reevaluate whether they can be done with directio, no cache */
803 .read = cifs_user_read,
804 .write = cifs_user_write,
806 .release = cifs_close,
809 .splice_read = generic_file_splice_read,
810 #ifdef CONFIG_CIFS_POSIX
811 .unlocked_ioctl = cifs_ioctl,
812 #endif /* CONFIG_CIFS_POSIX */
813 .llseek = cifs_llseek,
814 #ifdef CONFIG_CIFS_EXPERIMENTAL
815 .dir_notify = cifs_dir_notify,
816 .setlease = cifs_setlease,
817 #endif /* CONFIG_CIFS_EXPERIMENTAL */
820 const struct file_operations cifs_dir_ops = {
821 .readdir = cifs_readdir,
822 .release = cifs_closedir,
823 .read = generic_read_dir,
824 #ifdef CONFIG_CIFS_EXPERIMENTAL
825 .dir_notify = cifs_dir_notify,
826 #endif /* CONFIG_CIFS_EXPERIMENTAL */
827 .unlocked_ioctl = cifs_ioctl,
828 .llseek = generic_file_llseek,
832 cifs_init_once(void *inode)
834 struct cifsInodeInfo *cifsi = inode;
836 inode_init_once(&cifsi->vfs_inode);
837 INIT_LIST_HEAD(&cifsi->lockList);
841 cifs_init_inodecache(void)
843 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
844 sizeof(struct cifsInodeInfo),
845 0, (SLAB_RECLAIM_ACCOUNT|
848 if (cifs_inode_cachep == NULL)
855 cifs_destroy_inodecache(void)
857 kmem_cache_destroy(cifs_inode_cachep);
861 cifs_init_request_bufs(void)
863 if (CIFSMaxBufSize < 8192) {
864 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
865 Unicode path name has to fit in any SMB/CIFS path based frames */
866 CIFSMaxBufSize = 8192;
867 } else if (CIFSMaxBufSize > 1024*127) {
868 CIFSMaxBufSize = 1024 * 127;
870 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
872 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
873 cifs_req_cachep = kmem_cache_create("cifs_request",
875 MAX_CIFS_HDR_SIZE, 0,
876 SLAB_HWCACHE_ALIGN, NULL);
877 if (cifs_req_cachep == NULL)
880 if (cifs_min_rcv < 1)
882 else if (cifs_min_rcv > 64) {
884 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
887 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
890 if (cifs_req_poolp == NULL) {
891 kmem_cache_destroy(cifs_req_cachep);
894 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
895 almost all handle based requests (but not write response, nor is it
896 sufficient for path based requests). A smaller size would have
897 been more efficient (compacting multiple slab items on one 4k page)
898 for the case in which debug was on, but this larger size allows
899 more SMBs to use small buffer alloc and is still much more
900 efficient to alloc 1 per page off the slab compared to 17K (5page)
901 alloc of large cifs buffers even when page debugging is on */
902 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
903 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
905 if (cifs_sm_req_cachep == NULL) {
906 mempool_destroy(cifs_req_poolp);
907 kmem_cache_destroy(cifs_req_cachep);
911 if (cifs_min_small < 2)
913 else if (cifs_min_small > 256) {
914 cifs_min_small = 256;
915 cFYI(1, ("cifs_min_small set to maximum (256)"));
918 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
921 if (cifs_sm_req_poolp == NULL) {
922 mempool_destroy(cifs_req_poolp);
923 kmem_cache_destroy(cifs_req_cachep);
924 kmem_cache_destroy(cifs_sm_req_cachep);
932 cifs_destroy_request_bufs(void)
934 mempool_destroy(cifs_req_poolp);
935 kmem_cache_destroy(cifs_req_cachep);
936 mempool_destroy(cifs_sm_req_poolp);
937 kmem_cache_destroy(cifs_sm_req_cachep);
943 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
944 sizeof(struct mid_q_entry), 0,
945 SLAB_HWCACHE_ALIGN, NULL);
946 if (cifs_mid_cachep == NULL)
949 /* 3 is a reasonable minimum number of simultaneous operations */
950 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
951 if (cifs_mid_poolp == NULL) {
952 kmem_cache_destroy(cifs_mid_cachep);
956 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
957 sizeof(struct oplock_q_entry), 0,
958 SLAB_HWCACHE_ALIGN, NULL);
959 if (cifs_oplock_cachep == NULL) {
960 mempool_destroy(cifs_mid_poolp);
961 kmem_cache_destroy(cifs_mid_cachep);
969 cifs_destroy_mids(void)
971 mempool_destroy(cifs_mid_poolp);
972 kmem_cache_destroy(cifs_mid_cachep);
973 kmem_cache_destroy(cifs_oplock_cachep);
976 static int cifs_oplock_thread(void *dummyarg)
978 struct oplock_q_entry *oplock_item;
979 struct cifsTconInfo *pTcon;
989 spin_lock(&GlobalMid_Lock);
990 if (list_empty(&GlobalOplock_Q)) {
991 spin_unlock(&GlobalMid_Lock);
992 set_current_state(TASK_INTERRUPTIBLE);
993 schedule_timeout(39*HZ);
995 oplock_item = list_entry(GlobalOplock_Q.next,
996 struct oplock_q_entry, qhead);
997 cFYI(1, ("found oplock item to write out"));
998 pTcon = oplock_item->tcon;
999 inode = oplock_item->pinode;
1000 netfid = oplock_item->netfid;
1001 spin_unlock(&GlobalMid_Lock);
1002 DeleteOplockQEntry(oplock_item);
1003 /* can not grab inode sem here since it would
1004 deadlock when oplock received on delete
1005 since vfs_unlink holds the i_mutex across
1007 /* mutex_lock(&inode->i_mutex);*/
1008 if (S_ISREG(inode->i_mode)) {
1009 #ifdef CONFIG_CIFS_EXPERIMENTAL
1010 if (CIFS_I(inode)->clientCanCacheAll == 0)
1011 break_lease(inode, FMODE_READ);
1012 else if (CIFS_I(inode)->clientCanCacheRead == 0)
1013 break_lease(inode, FMODE_WRITE);
1015 rc = filemap_fdatawrite(inode->i_mapping);
1016 if (CIFS_I(inode)->clientCanCacheRead == 0) {
1017 waitrc = filemap_fdatawait(
1019 invalidate_remote_inode(inode);
1025 /* mutex_unlock(&inode->i_mutex);*/
1027 CIFS_I(inode)->write_behind_rc = rc;
1028 cFYI(1, ("Oplock flush inode %p rc %d",
1031 /* releasing stale oplock after recent reconnect
1032 of smb session using a now incorrect file
1033 handle is not a data integrity issue but do
1034 not bother sending an oplock release if session
1035 to server still is disconnected since oplock
1036 already released by the server in that case */
1037 if (!pTcon->need_reconnect) {
1038 rc = CIFSSMBLock(0, pTcon, netfid,
1039 0 /* len */ , 0 /* offset */, 0,
1040 0, LOCKING_ANDX_OPLOCK_RELEASE,
1041 false /* wait flag */);
1042 cFYI(1, ("Oplock release rc = %d", rc));
1044 set_current_state(TASK_INTERRUPTIBLE);
1045 schedule_timeout(1); /* yield in case q were corrupt */
1047 } while (!kthread_should_stop());
1052 static int cifs_dnotify_thread(void *dummyarg)
1054 struct list_head *tmp;
1055 struct TCP_Server_Info *server;
1058 if (try_to_freeze())
1060 set_current_state(TASK_INTERRUPTIBLE);
1061 schedule_timeout(15*HZ);
1062 /* check if any stuck requests that need
1063 to be woken up and wakeq so the
1064 thread can wake up and error out */
1065 read_lock(&cifs_tcp_ses_lock);
1066 list_for_each(tmp, &cifs_tcp_ses_list) {
1067 server = list_entry(tmp, struct TCP_Server_Info,
1069 if (atomic_read(&server->inFlight))
1070 wake_up_all(&server->response_q);
1072 read_unlock(&cifs_tcp_ses_lock);
1073 } while (!kthread_should_stop());
1083 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1084 INIT_LIST_HEAD(&GlobalOplock_Q);
1085 #ifdef CONFIG_CIFS_EXPERIMENTAL
1086 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1087 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1090 * Initialize Global counters
1092 atomic_set(&sesInfoAllocCount, 0);
1093 atomic_set(&tconInfoAllocCount, 0);
1094 atomic_set(&tcpSesAllocCount, 0);
1095 atomic_set(&tcpSesReconnectCount, 0);
1096 atomic_set(&tconInfoReconnectCount, 0);
1098 atomic_set(&bufAllocCount, 0);
1099 atomic_set(&smBufAllocCount, 0);
1100 #ifdef CONFIG_CIFS_STATS2
1101 atomic_set(&totBufAllocCount, 0);
1102 atomic_set(&totSmBufAllocCount, 0);
1103 #endif /* CONFIG_CIFS_STATS2 */
1105 atomic_set(&midCount, 0);
1106 GlobalCurrentXid = 0;
1107 GlobalTotalActiveXid = 0;
1108 GlobalMaxActiveXid = 0;
1109 memset(Local_System_Name, 0, 15);
1110 rwlock_init(&GlobalSMBSeslock);
1111 rwlock_init(&cifs_tcp_ses_lock);
1112 spin_lock_init(&GlobalMid_Lock);
1114 if (cifs_max_pending < 2) {
1115 cifs_max_pending = 2;
1116 cFYI(1, ("cifs_max_pending set to min of 2"));
1117 } else if (cifs_max_pending > 256) {
1118 cifs_max_pending = 256;
1119 cFYI(1, ("cifs_max_pending set to max of 256"));
1122 rc = cifs_init_inodecache();
1124 goto out_clean_proc;
1126 rc = cifs_init_mids();
1128 goto out_destroy_inodecache;
1130 rc = cifs_init_request_bufs();
1132 goto out_destroy_mids;
1134 rc = register_filesystem(&cifs_fs_type);
1136 goto out_destroy_request_bufs;
1137 #ifdef CONFIG_CIFS_UPCALL
1138 rc = register_key_type(&cifs_spnego_key_type);
1140 goto out_unregister_filesystem;
1142 #ifdef CONFIG_CIFS_DFS_UPCALL
1143 rc = register_key_type(&key_type_dns_resolver);
1145 goto out_unregister_key_type;
1147 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1148 if (IS_ERR(oplockThread)) {
1149 rc = PTR_ERR(oplockThread);
1150 cERROR(1, ("error %d create oplock thread", rc));
1151 goto out_unregister_dfs_key_type;
1154 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1155 if (IS_ERR(dnotifyThread)) {
1156 rc = PTR_ERR(dnotifyThread);
1157 cERROR(1, ("error %d create dnotify thread", rc));
1158 goto out_stop_oplock_thread;
1163 out_stop_oplock_thread:
1164 kthread_stop(oplockThread);
1165 out_unregister_dfs_key_type:
1166 #ifdef CONFIG_CIFS_DFS_UPCALL
1167 unregister_key_type(&key_type_dns_resolver);
1168 out_unregister_key_type:
1170 #ifdef CONFIG_CIFS_UPCALL
1171 unregister_key_type(&cifs_spnego_key_type);
1172 out_unregister_filesystem:
1174 unregister_filesystem(&cifs_fs_type);
1175 out_destroy_request_bufs:
1176 cifs_destroy_request_bufs();
1178 cifs_destroy_mids();
1179 out_destroy_inodecache:
1180 cifs_destroy_inodecache();
1189 cFYI(DBG2, ("exit_cifs"));
1191 #ifdef CONFIG_CIFS_DFS_UPCALL
1192 cifs_dfs_release_automount_timer();
1193 unregister_key_type(&key_type_dns_resolver);
1195 #ifdef CONFIG_CIFS_UPCALL
1196 unregister_key_type(&cifs_spnego_key_type);
1198 unregister_filesystem(&cifs_fs_type);
1199 cifs_destroy_inodecache();
1200 cifs_destroy_mids();
1201 cifs_destroy_request_bufs();
1202 kthread_stop(oplockThread);
1203 kthread_stop(dnotifyThread);
1206 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1207 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1209 ("VFS to access servers complying with the SNIA CIFS Specification "
1210 "e.g. Samba and Windows");
1211 MODULE_VERSION(CIFS_VERSION);
1212 module_init(init_cifs)
1213 module_exit(exit_cifs)