2 * fs/sysfs/file.c - sysfs regular (text) file implementation
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
8 * This file is released under the GPLv2.
10 * Please see Documentation/filesystems/sysfs.txt for more information.
13 #include <linux/module.h>
14 #include <linux/kobject.h>
15 #include <linux/kallsyms.h>
16 #include <linux/slab.h>
17 #include <linux/fsnotify.h>
18 #include <linux/namei.h>
19 #include <linux/poll.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/limits.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
30 * There's one sysfs_open_file for each open file and one sysfs_open_dirent
31 * for each sysfs_dirent with one or more open files.
33 * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open is
34 * protected by sysfs_open_dirent_lock.
36 * filp->private_data points to seq_file whose ->private points to
37 * sysfs_open_file. sysfs_open_files are chained at
38 * sysfs_open_dirent->files, which is protected by sysfs_open_file_mutex.
40 static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
41 static DEFINE_MUTEX(sysfs_open_file_mutex);
43 struct sysfs_open_dirent {
46 wait_queue_head_t poll;
47 struct list_head files; /* goes through sysfs_open_file.list */
50 static struct sysfs_open_file *sysfs_of(struct file *file)
52 return ((struct seq_file *)file->private_data)->private;
56 * Determine the kernfs_ops for the given sysfs_dirent. This function must
57 * be called while holding an active reference.
59 static const struct kernfs_ops *kernfs_ops(struct sysfs_dirent *sd)
61 if (!sysfs_ignore_lockdep(sd))
62 lockdep_assert_held(sd);
63 return sd->s_attr.ops;
67 * Determine ktype->sysfs_ops for the given sysfs_dirent. This function
68 * must be called while holding an active reference.
70 static const struct sysfs_ops *sysfs_file_ops(struct sysfs_dirent *sd)
72 struct kobject *kobj = sd->s_parent->priv;
74 if (!sysfs_ignore_lockdep(sd))
75 lockdep_assert_held(sd);
76 return kobj->ktype ? kobj->ktype->sysfs_ops : NULL;
80 * Reads on sysfs are handled through seq_file, which takes care of hairy
81 * details like buffering and seeking. The following function pipes
82 * sysfs_ops->show() result through seq_file.
84 static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
86 struct sysfs_open_file *of = sf->private;
87 struct kobject *kobj = of->sd->s_parent->priv;
88 const struct sysfs_ops *ops = sysfs_file_ops(of->sd);
92 /* acquire buffer and ensure that it's >= PAGE_SIZE */
93 count = seq_get_buf(sf, &buf);
94 if (count < PAGE_SIZE) {
100 * Invoke show(). Control may reach here via seq file lseek even
101 * if @ops->show() isn't implemented.
104 count = ops->show(kobj, of->sd->priv, buf);
110 * The code works fine with PAGE_SIZE return but it's likely to
111 * indicate truncated result or overflow in normal use cases.
113 if (count >= (ssize_t)PAGE_SIZE) {
114 print_symbol("fill_read_buffer: %s returned bad count\n",
115 (unsigned long)ops->show);
116 /* Try to struggle along */
117 count = PAGE_SIZE - 1;
119 seq_commit(sf, count);
123 static ssize_t sysfs_kf_bin_read(struct sysfs_open_file *of, char *buf,
124 size_t count, loff_t pos)
126 struct bin_attribute *battr = of->sd->priv;
127 struct kobject *kobj = of->sd->s_parent->priv;
128 loff_t size = file_inode(of->file)->i_size;
136 if (pos + count > size)
143 return battr->read(of->file, kobj, battr, buf, pos, count);
146 static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
148 struct sysfs_open_file *of = sf->private;
149 const struct kernfs_ops *ops;
152 * @of->mutex nests outside active ref and is just to ensure that
153 * the ops aren't called concurrently for the same open file.
155 mutex_lock(&of->mutex);
156 if (!sysfs_get_active(of->sd))
157 return ERR_PTR(-ENODEV);
159 ops = kernfs_ops(of->sd);
160 if (ops->seq_start) {
161 return ops->seq_start(sf, ppos);
164 * The same behavior and code as single_open(). Returns
165 * !NULL if pos is at the beginning; otherwise, NULL.
167 return NULL + !*ppos;
171 static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
173 struct sysfs_open_file *of = sf->private;
174 const struct kernfs_ops *ops = kernfs_ops(of->sd);
177 return ops->seq_next(sf, v, ppos);
180 * The same behavior and code as single_open(), always
181 * terminate after the initial read.
188 static void kernfs_seq_stop(struct seq_file *sf, void *v)
190 struct sysfs_open_file *of = sf->private;
191 const struct kernfs_ops *ops = kernfs_ops(of->sd);
194 ops->seq_stop(sf, v);
196 sysfs_put_active(of->sd);
197 mutex_unlock(&of->mutex);
200 static int kernfs_seq_show(struct seq_file *sf, void *v)
202 struct sysfs_open_file *of = sf->private;
204 of->event = atomic_read(&of->sd->s_attr.open->event);
206 return of->sd->s_attr.ops->seq_show(sf, v);
209 static const struct seq_operations kernfs_seq_ops = {
210 .start = kernfs_seq_start,
211 .next = kernfs_seq_next,
212 .stop = kernfs_seq_stop,
213 .show = kernfs_seq_show,
217 * As reading a bin file can have side-effects, the exact offset and bytes
218 * specified in read(2) call should be passed to the read callback making
219 * it difficult to use seq_file. Implement simplistic custom buffering for
222 static ssize_t kernfs_file_direct_read(struct sysfs_open_file *of,
223 char __user *user_buf, size_t count,
226 ssize_t len = min_t(size_t, count, PAGE_SIZE);
227 const struct kernfs_ops *ops;
230 buf = kmalloc(len, GFP_KERNEL);
235 * @of->mutex nests outside active ref and is just to ensure that
236 * the ops aren't called concurrently for the same open file.
238 mutex_lock(&of->mutex);
239 if (!sysfs_get_active(of->sd)) {
241 mutex_unlock(&of->mutex);
245 ops = kernfs_ops(of->sd);
247 len = ops->read(of, buf, len, *ppos);
251 sysfs_put_active(of->sd);
252 mutex_unlock(&of->mutex);
257 if (copy_to_user(user_buf, buf, len)) {
270 * kernfs_file_read - kernfs vfs read callback
271 * @file: file pointer
272 * @user_buf: data to write
273 * @count: number of bytes
274 * @ppos: starting offset
276 static ssize_t kernfs_file_read(struct file *file, char __user *user_buf,
277 size_t count, loff_t *ppos)
279 struct sysfs_open_file *of = sysfs_of(file);
281 if (of->sd->s_flags & SYSFS_FLAG_HAS_SEQ_SHOW)
282 return seq_read(file, user_buf, count, ppos);
284 return kernfs_file_direct_read(of, user_buf, count, ppos);
287 /* kernfs write callback for regular sysfs files */
288 static ssize_t sysfs_kf_write(struct sysfs_open_file *of, char *buf,
289 size_t count, loff_t pos)
291 const struct sysfs_ops *ops = sysfs_file_ops(of->sd);
292 struct kobject *kobj = of->sd->s_parent->priv;
297 return ops->store(kobj, of->sd->priv, buf, count);
300 /* kernfs write callback for bin sysfs files */
301 static ssize_t sysfs_kf_bin_write(struct sysfs_open_file *of, char *buf,
302 size_t count, loff_t pos)
304 struct bin_attribute *battr = of->sd->priv;
305 struct kobject *kobj = of->sd->s_parent->priv;
306 loff_t size = file_inode(of->file)->i_size;
311 count = min_t(ssize_t, count, size - pos);
319 return battr->write(of->file, kobj, battr, buf, pos, count);
323 * kernfs_file_write - kernfs vfs write callback
324 * @file: file pointer
325 * @user_buf: data to write
326 * @count: number of bytes
327 * @ppos: starting offset
329 * Copy data in from userland and pass it to the matching kernfs write
332 * There is no easy way for us to know if userspace is only doing a partial
333 * write, so we don't support them. We expect the entire buffer to come on
334 * the first write. Hint: if you're writing a value, first read the file,
335 * modify only the the value you're changing, then write entire buffer
338 static ssize_t kernfs_file_write(struct file *file, const char __user *user_buf,
339 size_t count, loff_t *ppos)
341 struct sysfs_open_file *of = sysfs_of(file);
342 ssize_t len = min_t(size_t, count, PAGE_SIZE);
343 const struct kernfs_ops *ops;
346 buf = kmalloc(len + 1, GFP_KERNEL);
350 if (copy_from_user(buf, user_buf, len)) {
354 buf[len] = '\0'; /* guarantee string termination */
357 * @of->mutex nests outside active ref and is just to ensure that
358 * the ops aren't called concurrently for the same open file.
360 mutex_lock(&of->mutex);
361 if (!sysfs_get_active(of->sd)) {
362 mutex_unlock(&of->mutex);
367 ops = kernfs_ops(of->sd);
369 len = ops->write(of, buf, len, *ppos);
373 sysfs_put_active(of->sd);
374 mutex_unlock(&of->mutex);
383 static int sysfs_kf_bin_mmap(struct sysfs_open_file *of,
384 struct vm_area_struct *vma)
386 struct bin_attribute *battr = of->sd->priv;
387 struct kobject *kobj = of->sd->s_parent->priv;
392 return battr->mmap(of->file, kobj, battr, vma);
395 static void kernfs_vma_open(struct vm_area_struct *vma)
397 struct file *file = vma->vm_file;
398 struct sysfs_open_file *of = sysfs_of(file);
403 if (!sysfs_get_active(of->sd))
406 if (of->vm_ops->open)
407 of->vm_ops->open(vma);
409 sysfs_put_active(of->sd);
412 static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
414 struct file *file = vma->vm_file;
415 struct sysfs_open_file *of = sysfs_of(file);
419 return VM_FAULT_SIGBUS;
421 if (!sysfs_get_active(of->sd))
422 return VM_FAULT_SIGBUS;
424 ret = VM_FAULT_SIGBUS;
425 if (of->vm_ops->fault)
426 ret = of->vm_ops->fault(vma, vmf);
428 sysfs_put_active(of->sd);
432 static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
433 struct vm_fault *vmf)
435 struct file *file = vma->vm_file;
436 struct sysfs_open_file *of = sysfs_of(file);
440 return VM_FAULT_SIGBUS;
442 if (!sysfs_get_active(of->sd))
443 return VM_FAULT_SIGBUS;
446 if (of->vm_ops->page_mkwrite)
447 ret = of->vm_ops->page_mkwrite(vma, vmf);
449 file_update_time(file);
451 sysfs_put_active(of->sd);
455 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
456 void *buf, int len, int write)
458 struct file *file = vma->vm_file;
459 struct sysfs_open_file *of = sysfs_of(file);
465 if (!sysfs_get_active(of->sd))
469 if (of->vm_ops->access)
470 ret = of->vm_ops->access(vma, addr, buf, len, write);
472 sysfs_put_active(of->sd);
477 static int kernfs_vma_set_policy(struct vm_area_struct *vma,
478 struct mempolicy *new)
480 struct file *file = vma->vm_file;
481 struct sysfs_open_file *of = sysfs_of(file);
487 if (!sysfs_get_active(of->sd))
491 if (of->vm_ops->set_policy)
492 ret = of->vm_ops->set_policy(vma, new);
494 sysfs_put_active(of->sd);
498 static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
501 struct file *file = vma->vm_file;
502 struct sysfs_open_file *of = sysfs_of(file);
503 struct mempolicy *pol;
506 return vma->vm_policy;
508 if (!sysfs_get_active(of->sd))
509 return vma->vm_policy;
511 pol = vma->vm_policy;
512 if (of->vm_ops->get_policy)
513 pol = of->vm_ops->get_policy(vma, addr);
515 sysfs_put_active(of->sd);
519 static int kernfs_vma_migrate(struct vm_area_struct *vma,
520 const nodemask_t *from, const nodemask_t *to,
523 struct file *file = vma->vm_file;
524 struct sysfs_open_file *of = sysfs_of(file);
530 if (!sysfs_get_active(of->sd))
534 if (of->vm_ops->migrate)
535 ret = of->vm_ops->migrate(vma, from, to, flags);
537 sysfs_put_active(of->sd);
542 static const struct vm_operations_struct kernfs_vm_ops = {
543 .open = kernfs_vma_open,
544 .fault = kernfs_vma_fault,
545 .page_mkwrite = kernfs_vma_page_mkwrite,
546 .access = kernfs_vma_access,
548 .set_policy = kernfs_vma_set_policy,
549 .get_policy = kernfs_vma_get_policy,
550 .migrate = kernfs_vma_migrate,
554 static int kernfs_file_mmap(struct file *file, struct vm_area_struct *vma)
556 struct sysfs_open_file *of = sysfs_of(file);
557 const struct kernfs_ops *ops;
560 mutex_lock(&of->mutex);
563 if (!sysfs_get_active(of->sd))
566 ops = kernfs_ops(of->sd);
568 rc = ops->mmap(of, vma);
573 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
574 * to satisfy versions of X which crash if the mmap fails: that
575 * substitutes a new vm_file, and we don't then want bin_vm_ops.
577 if (vma->vm_file != file)
581 if (of->mmapped && of->vm_ops != vma->vm_ops)
585 * It is not possible to successfully wrap close.
586 * So error if someone is trying to use close.
589 if (vma->vm_ops && vma->vm_ops->close)
594 of->vm_ops = vma->vm_ops;
595 vma->vm_ops = &kernfs_vm_ops;
597 sysfs_put_active(of->sd);
599 mutex_unlock(&of->mutex);
605 * sysfs_get_open_dirent - get or create sysfs_open_dirent
606 * @sd: target sysfs_dirent
607 * @of: sysfs_open_file for this instance of open
609 * If @sd->s_attr.open exists, increment its reference count;
610 * otherwise, create one. @of is chained to the files list.
613 * Kernel thread context (may sleep).
616 * 0 on success, -errno on failure.
618 static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
619 struct sysfs_open_file *of)
621 struct sysfs_open_dirent *od, *new_od = NULL;
624 mutex_lock(&sysfs_open_file_mutex);
625 spin_lock_irq(&sysfs_open_dirent_lock);
627 if (!sd->s_attr.open && new_od) {
628 sd->s_attr.open = new_od;
632 od = sd->s_attr.open;
634 atomic_inc(&od->refcnt);
635 list_add_tail(&of->list, &od->files);
638 spin_unlock_irq(&sysfs_open_dirent_lock);
639 mutex_unlock(&sysfs_open_file_mutex);
646 /* not there, initialize a new one and retry */
647 new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
651 atomic_set(&new_od->refcnt, 0);
652 atomic_set(&new_od->event, 1);
653 init_waitqueue_head(&new_od->poll);
654 INIT_LIST_HEAD(&new_od->files);
659 * sysfs_put_open_dirent - put sysfs_open_dirent
660 * @sd: target sysfs_dirent
661 * @of: associated sysfs_open_file
663 * Put @sd->s_attr.open and unlink @of from the files list. If
664 * reference count reaches zero, disassociate and free it.
669 static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
670 struct sysfs_open_file *of)
672 struct sysfs_open_dirent *od = sd->s_attr.open;
675 mutex_lock(&sysfs_open_file_mutex);
676 spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
681 if (atomic_dec_and_test(&od->refcnt))
682 sd->s_attr.open = NULL;
686 spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
687 mutex_unlock(&sysfs_open_file_mutex);
692 static int kernfs_file_open(struct inode *inode, struct file *file)
694 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
695 const struct kernfs_ops *ops;
696 struct sysfs_open_file *of;
697 bool has_read, has_write, has_mmap;
700 if (!sysfs_get_active(attr_sd))
703 ops = kernfs_ops(attr_sd);
705 has_read = ops->seq_show || ops->read || ops->mmap;
706 has_write = ops->write || ops->mmap;
707 has_mmap = ops->mmap;
709 /* check perms and supported operations */
710 if ((file->f_mode & FMODE_WRITE) &&
711 (!(inode->i_mode & S_IWUGO) || !has_write))
714 if ((file->f_mode & FMODE_READ) &&
715 (!(inode->i_mode & S_IRUGO) || !has_read))
718 /* allocate a sysfs_open_file for the file */
720 of = kzalloc(sizeof(struct sysfs_open_file), GFP_KERNEL);
725 * The following is done to give a different lockdep key to
726 * @of->mutex for files which implement mmap. This is a rather
727 * crude way to avoid false positive lockdep warning around
728 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
729 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
730 * which mm->mmap_sem nests, while holding @of->mutex. As each
731 * open file has a separate mutex, it's okay as long as those don't
732 * happen on the same file. At this point, we can't easily give
733 * each file a separate locking class. Let's differentiate on
734 * whether the file has mmap or not for now.
737 mutex_init(&of->mutex);
739 mutex_init(&of->mutex);
745 * Always instantiate seq_file even if read access doesn't use
746 * seq_file or is not requested. This unifies private data access
747 * and readable regular files are the vast majority anyway.
750 error = seq_open(file, &kernfs_seq_ops);
752 error = seq_open(file, NULL);
756 ((struct seq_file *)file->private_data)->private = of;
758 /* seq_file clears PWRITE unconditionally, restore it if WRITE */
759 if (file->f_mode & FMODE_WRITE)
760 file->f_mode |= FMODE_PWRITE;
762 /* make sure we have open dirent struct */
763 error = sysfs_get_open_dirent(attr_sd, of);
767 /* open succeeded, put active references */
768 sysfs_put_active(attr_sd);
772 seq_release(inode, file);
776 sysfs_put_active(attr_sd);
780 static int kernfs_file_release(struct inode *inode, struct file *filp)
782 struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
783 struct sysfs_open_file *of = sysfs_of(filp);
785 sysfs_put_open_dirent(sd, of);
786 seq_release(inode, filp);
792 void sysfs_unmap_bin_file(struct sysfs_dirent *sd)
794 struct sysfs_open_dirent *od;
795 struct sysfs_open_file *of;
797 if (!(sd->s_flags & SYSFS_FLAG_HAS_MMAP))
800 spin_lock_irq(&sysfs_open_dirent_lock);
801 od = sd->s_attr.open;
803 atomic_inc(&od->refcnt);
804 spin_unlock_irq(&sysfs_open_dirent_lock);
808 mutex_lock(&sysfs_open_file_mutex);
809 list_for_each_entry(of, &od->files, list) {
810 struct inode *inode = file_inode(of->file);
811 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
813 mutex_unlock(&sysfs_open_file_mutex);
815 sysfs_put_open_dirent(sd, NULL);
818 /* Sysfs attribute files are pollable. The idea is that you read
819 * the content and then you use 'poll' or 'select' to wait for
820 * the content to change. When the content changes (assuming the
821 * manager for the kobject supports notification), poll will
822 * return POLLERR|POLLPRI, and select will return the fd whether
823 * it is waiting for read, write, or exceptions.
824 * Once poll/select indicates that the value has changed, you
825 * need to close and re-open the file, or seek to 0 and read again.
826 * Reminder: this only works for attributes which actively support
827 * it, and it is not possible to test an attribute from userspace
828 * to see if it supports poll (Neither 'poll' nor 'select' return
829 * an appropriate error code). When in doubt, set a suitable timeout value.
831 static unsigned int kernfs_file_poll(struct file *filp, poll_table *wait)
833 struct sysfs_open_file *of = sysfs_of(filp);
834 struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
835 struct sysfs_open_dirent *od = attr_sd->s_attr.open;
837 /* need parent for the kobj, grab both */
838 if (!sysfs_get_active(attr_sd))
841 poll_wait(filp, &od->poll, wait);
843 sysfs_put_active(attr_sd);
845 if (of->event != atomic_read(&od->event))
848 return DEFAULT_POLLMASK;
851 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
854 void sysfs_notify_dirent(struct sysfs_dirent *sd)
856 struct sysfs_open_dirent *od;
859 spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
861 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
862 od = sd->s_attr.open;
864 atomic_inc(&od->event);
865 wake_up_interruptible(&od->poll);
869 spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
871 EXPORT_SYMBOL_GPL(sysfs_notify_dirent);
873 void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
875 struct sysfs_dirent *sd = k->sd;
877 mutex_lock(&sysfs_mutex);
880 sd = sysfs_find_dirent(sd, dir, NULL);
882 sd = sysfs_find_dirent(sd, attr, NULL);
884 sysfs_notify_dirent(sd);
886 mutex_unlock(&sysfs_mutex);
888 EXPORT_SYMBOL_GPL(sysfs_notify);
890 const struct file_operations kernfs_file_operations = {
891 .read = kernfs_file_read,
892 .write = kernfs_file_write,
893 .llseek = generic_file_llseek,
894 .mmap = kernfs_file_mmap,
895 .open = kernfs_file_open,
896 .release = kernfs_file_release,
897 .poll = kernfs_file_poll,
900 static const struct kernfs_ops sysfs_file_kfops_empty = {
903 static const struct kernfs_ops sysfs_file_kfops_ro = {
904 .seq_show = sysfs_kf_seq_show,
907 static const struct kernfs_ops sysfs_file_kfops_wo = {
908 .write = sysfs_kf_write,
911 static const struct kernfs_ops sysfs_file_kfops_rw = {
912 .seq_show = sysfs_kf_seq_show,
913 .write = sysfs_kf_write,
916 static const struct kernfs_ops sysfs_bin_kfops_ro = {
917 .read = sysfs_kf_bin_read,
920 static const struct kernfs_ops sysfs_bin_kfops_wo = {
921 .write = sysfs_kf_bin_write,
924 static const struct kernfs_ops sysfs_bin_kfops_rw = {
925 .read = sysfs_kf_bin_read,
926 .write = sysfs_kf_bin_write,
927 .mmap = sysfs_kf_bin_mmap,
930 int sysfs_add_file_mode_ns(struct sysfs_dirent *dir_sd,
931 const struct attribute *attr, bool is_bin,
932 umode_t mode, const void *ns)
934 const struct kernfs_ops *ops;
935 struct sysfs_dirent *sd;
939 struct kobject *kobj = dir_sd->priv;
940 const struct sysfs_ops *sysfs_ops = kobj->ktype->sysfs_ops;
942 /* every kobject with an attribute needs a ktype assigned */
943 if (WARN(!sysfs_ops, KERN_ERR
944 "missing sysfs attribute operations for kobject: %s\n",
948 if (sysfs_ops->show && sysfs_ops->store)
949 ops = &sysfs_file_kfops_rw;
950 else if (sysfs_ops->show)
951 ops = &sysfs_file_kfops_ro;
952 else if (sysfs_ops->store)
953 ops = &sysfs_file_kfops_wo;
955 ops = &sysfs_file_kfops_empty;
959 struct bin_attribute *battr = (void *)attr;
961 if ((battr->read && battr->write) || battr->mmap)
962 ops = &sysfs_bin_kfops_rw;
963 else if (battr->read)
964 ops = &sysfs_bin_kfops_ro;
965 else if (battr->write)
966 ops = &sysfs_bin_kfops_wo;
968 ops = &sysfs_file_kfops_empty;
973 sd = kernfs_create_file_ns(dir_sd, attr->name, mode, size,
974 ops, (void *)attr, ns);
976 if (PTR_ERR(sd) == -EEXIST)
977 sysfs_warn_dup(dir_sd, attr->name);
984 * kernfs_create_file_ns - create a file
985 * @parent: directory to create the file in
986 * @name: name of the file
987 * @mode: mode of the file
988 * @size: size of the file
989 * @ops: kernfs operations for the file
990 * @priv: private data for the file
991 * @ns: optional namespace tag of the file
993 * Returns the created node on success, ERR_PTR() value on error.
995 struct sysfs_dirent *kernfs_create_file_ns(struct sysfs_dirent *parent,
997 umode_t mode, loff_t size,
998 const struct kernfs_ops *ops,
999 void *priv, const void *ns)
1001 struct sysfs_addrm_cxt acxt;
1002 struct sysfs_dirent *sd;
1005 sd = sysfs_new_dirent(name, (mode & S_IALLUGO) | S_IFREG,
1008 return ERR_PTR(-ENOMEM);
1010 sd->s_attr.ops = ops;
1011 sd->s_attr.size = size;
1014 sysfs_dirent_init_lockdep(sd);
1017 * sd->s_attr.ops is accesible only while holding active ref. We
1018 * need to know whether some ops are implemented outside active
1019 * ref. Cache their existence in flags.
1022 sd->s_flags |= SYSFS_FLAG_HAS_SEQ_SHOW;
1024 sd->s_flags |= SYSFS_FLAG_HAS_MMAP;
1026 sysfs_addrm_start(&acxt);
1027 rc = sysfs_add_one(&acxt, sd, parent);
1028 sysfs_addrm_finish(&acxt);
1037 int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
1040 return sysfs_add_file_mode_ns(dir_sd, attr, is_bin, attr->mode, NULL);
1044 * sysfs_create_file_ns - create an attribute file for an object with custom ns
1045 * @kobj: object we're creating for
1046 * @attr: attribute descriptor
1047 * @ns: namespace the new file should belong to
1049 int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
1052 BUG_ON(!kobj || !kobj->sd || !attr);
1054 return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, ns);
1057 EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
1059 int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
1064 for (i = 0; ptr[i] && !err; i++)
1065 err = sysfs_create_file(kobj, ptr[i]);
1068 sysfs_remove_file(kobj, ptr[i]);
1071 EXPORT_SYMBOL_GPL(sysfs_create_files);
1074 * sysfs_add_file_to_group - add an attribute file to a pre-existing group.
1075 * @kobj: object we're acting for.
1076 * @attr: attribute descriptor.
1077 * @group: group name.
1079 int sysfs_add_file_to_group(struct kobject *kobj,
1080 const struct attribute *attr, const char *group)
1082 struct sysfs_dirent *dir_sd;
1086 dir_sd = sysfs_get_dirent(kobj->sd, group);
1088 dir_sd = sysfs_get(kobj->sd);
1093 error = sysfs_add_file(dir_sd, attr, false);
1098 EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
1101 * sysfs_chmod_file - update the modified mode value on an object attribute.
1102 * @kobj: object we're acting for.
1103 * @attr: attribute descriptor.
1104 * @mode: file permissions.
1107 int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
1110 struct sysfs_dirent *sd;
1111 struct iattr newattrs;
1114 sd = sysfs_get_dirent(kobj->sd, attr->name);
1118 newattrs.ia_mode = (mode & S_IALLUGO) | (sd->s_mode & ~S_IALLUGO);
1119 newattrs.ia_valid = ATTR_MODE;
1121 rc = kernfs_setattr(sd, &newattrs);
1126 EXPORT_SYMBOL_GPL(sysfs_chmod_file);
1129 * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
1130 * @kobj: object we're acting for
1131 * @attr: attribute descriptor
1132 * @ns: namespace tag of the file to remove
1134 * Hash the attribute name and namespace tag and kill the victim.
1136 void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
1139 struct sysfs_dirent *dir_sd = kobj->sd;
1141 kernfs_remove_by_name_ns(dir_sd, attr->name, ns);
1143 EXPORT_SYMBOL_GPL(sysfs_remove_file_ns);
1145 void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
1148 for (i = 0; ptr[i]; i++)
1149 sysfs_remove_file(kobj, ptr[i]);
1151 EXPORT_SYMBOL_GPL(sysfs_remove_files);
1154 * sysfs_remove_file_from_group - remove an attribute file from a group.
1155 * @kobj: object we're acting for.
1156 * @attr: attribute descriptor.
1157 * @group: group name.
1159 void sysfs_remove_file_from_group(struct kobject *kobj,
1160 const struct attribute *attr, const char *group)
1162 struct sysfs_dirent *dir_sd;
1165 dir_sd = sysfs_get_dirent(kobj->sd, group);
1167 dir_sd = sysfs_get(kobj->sd);
1169 kernfs_remove_by_name(dir_sd, attr->name);
1173 EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
1176 * sysfs_create_bin_file - create binary file for object.
1178 * @attr: attribute descriptor.
1180 int sysfs_create_bin_file(struct kobject *kobj,
1181 const struct bin_attribute *attr)
1183 BUG_ON(!kobj || !kobj->sd || !attr);
1185 return sysfs_add_file(kobj->sd, &attr->attr, true);
1187 EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
1190 * sysfs_remove_bin_file - remove binary file for object.
1192 * @attr: attribute descriptor.
1194 void sysfs_remove_bin_file(struct kobject *kobj,
1195 const struct bin_attribute *attr)
1197 kernfs_remove_by_name(kobj->sd, attr->attr.name);
1199 EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
1201 struct sysfs_schedule_callback_struct {
1202 struct list_head workq_list;
1203 struct kobject *kobj;
1204 void (*func)(void *);
1206 struct module *owner;
1207 struct work_struct work;
1210 static struct workqueue_struct *sysfs_workqueue;
1211 static DEFINE_MUTEX(sysfs_workq_mutex);
1212 static LIST_HEAD(sysfs_workq);
1213 static void sysfs_schedule_callback_work(struct work_struct *work)
1215 struct sysfs_schedule_callback_struct *ss = container_of(work,
1216 struct sysfs_schedule_callback_struct, work);
1218 (ss->func)(ss->data);
1219 kobject_put(ss->kobj);
1220 module_put(ss->owner);
1221 mutex_lock(&sysfs_workq_mutex);
1222 list_del(&ss->workq_list);
1223 mutex_unlock(&sysfs_workq_mutex);
1228 * sysfs_schedule_callback - helper to schedule a callback for a kobject
1229 * @kobj: object we're acting for.
1230 * @func: callback function to invoke later.
1231 * @data: argument to pass to @func.
1232 * @owner: module owning the callback code
1234 * sysfs attribute methods must not unregister themselves or their parent
1235 * kobject (which would amount to the same thing). Attempts to do so will
1236 * deadlock, since unregistration is mutually exclusive with driver
1239 * Instead methods can call this routine, which will attempt to allocate
1240 * and schedule a workqueue request to call back @func with @data as its
1241 * argument in the workqueue's process context. @kobj will be pinned
1242 * until @func returns.
1244 * Returns 0 if the request was submitted, -ENOMEM if storage could not
1245 * be allocated, -ENODEV if a reference to @owner isn't available,
1246 * -EAGAIN if a callback has already been scheduled for @kobj.
1248 int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
1249 void *data, struct module *owner)
1251 struct sysfs_schedule_callback_struct *ss, *tmp;
1253 if (!try_module_get(owner))
1256 mutex_lock(&sysfs_workq_mutex);
1257 list_for_each_entry_safe(ss, tmp, &sysfs_workq, workq_list)
1258 if (ss->kobj == kobj) {
1260 mutex_unlock(&sysfs_workq_mutex);
1263 mutex_unlock(&sysfs_workq_mutex);
1265 if (sysfs_workqueue == NULL) {
1266 sysfs_workqueue = create_singlethread_workqueue("sysfsd");
1267 if (sysfs_workqueue == NULL) {
1273 ss = kmalloc(sizeof(*ss), GFP_KERNEL);
1283 INIT_WORK(&ss->work, sysfs_schedule_callback_work);
1284 INIT_LIST_HEAD(&ss->workq_list);
1285 mutex_lock(&sysfs_workq_mutex);
1286 list_add_tail(&ss->workq_list, &sysfs_workq);
1287 mutex_unlock(&sysfs_workq_mutex);
1288 queue_work(sysfs_workqueue, &ss->work);
1291 EXPORT_SYMBOL_GPL(sysfs_schedule_callback);