2 * fs/sysfs/file.c - sysfs regular (text) file implementation
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
8 * This file is released under the GPLv2.
10 * Please see Documentation/filesystems/sysfs.txt for more information.
13 #include <linux/module.h>
14 #include <linux/kobject.h>
15 #include <linux/kallsyms.h>
16 #include <linux/slab.h>
17 #include <linux/fsnotify.h>
18 #include <linux/namei.h>
19 #include <linux/poll.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/limits.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
30 * There's one sysfs_open_file for each open file and one sysfs_open_dirent
31 * for each sysfs_dirent with one or more open files.
33 * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open is
34 * protected by sysfs_open_dirent_lock.
36 * filp->private_data points to seq_file whose ->private points to
37 * sysfs_open_file. sysfs_open_files are chained at
38 * sysfs_open_dirent->files, which is protected by sysfs_open_file_mutex.
40 static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
41 static DEFINE_MUTEX(sysfs_open_file_mutex);
43 struct sysfs_open_dirent {
46 wait_queue_head_t poll;
47 struct list_head files; /* goes through sysfs_open_file.list */
50 static struct sysfs_open_file *sysfs_of(struct file *file)
52 return ((struct seq_file *)file->private_data)->private;
56 * Determine the kernfs_ops for the given sysfs_dirent. This function must
57 * be called while holding an active reference.
59 static const struct kernfs_ops *kernfs_ops(struct sysfs_dirent *sd)
61 if (sd->s_flags & SYSFS_FLAG_LOCKDEP)
62 lockdep_assert_held(sd);
63 return sd->s_attr.ops;
67 * Determine ktype->sysfs_ops for the given sysfs_dirent. This function
68 * must be called while holding an active reference.
70 static const struct sysfs_ops *sysfs_file_ops(struct sysfs_dirent *sd)
72 struct kobject *kobj = sd->s_parent->priv;
74 if (sd->s_flags & SYSFS_FLAG_LOCKDEP)
75 lockdep_assert_held(sd);
76 return kobj->ktype ? kobj->ktype->sysfs_ops : NULL;
80 * Reads on sysfs are handled through seq_file, which takes care of hairy
81 * details like buffering and seeking. The following function pipes
82 * sysfs_ops->show() result through seq_file.
84 static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
86 struct sysfs_open_file *of = sf->private;
87 struct kobject *kobj = of->sd->s_parent->priv;
88 const struct sysfs_ops *ops = sysfs_file_ops(of->sd);
92 /* acquire buffer and ensure that it's >= PAGE_SIZE */
93 count = seq_get_buf(sf, &buf);
94 if (count < PAGE_SIZE) {
100 * Invoke show(). Control may reach here via seq file lseek even
101 * if @ops->show() isn't implemented.
104 count = ops->show(kobj, of->sd->priv, buf);
110 * The code works fine with PAGE_SIZE return but it's likely to
111 * indicate truncated result or overflow in normal use cases.
113 if (count >= (ssize_t)PAGE_SIZE) {
114 print_symbol("fill_read_buffer: %s returned bad count\n",
115 (unsigned long)ops->show);
116 /* Try to struggle along */
117 count = PAGE_SIZE - 1;
119 seq_commit(sf, count);
123 static ssize_t sysfs_kf_bin_read(struct sysfs_open_file *of, char *buf,
124 size_t count, loff_t pos)
126 struct bin_attribute *battr = of->sd->priv;
127 struct kobject *kobj = of->sd->s_parent->priv;
128 loff_t size = file_inode(of->file)->i_size;
136 if (pos + count > size)
143 return battr->read(of->file, kobj, battr, buf, pos, count);
146 static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
148 struct sysfs_open_file *of = sf->private;
149 const struct kernfs_ops *ops;
152 * @of->mutex nests outside active ref and is just to ensure that
153 * the ops aren't called concurrently for the same open file.
155 mutex_lock(&of->mutex);
156 if (!sysfs_get_active(of->sd))
157 return ERR_PTR(-ENODEV);
159 ops = kernfs_ops(of->sd);
160 if (ops->seq_start) {
161 return ops->seq_start(sf, ppos);
164 * The same behavior and code as single_open(). Returns
165 * !NULL if pos is at the beginning; otherwise, NULL.
167 return NULL + !*ppos;
171 static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
173 struct sysfs_open_file *of = sf->private;
174 const struct kernfs_ops *ops = kernfs_ops(of->sd);
177 return ops->seq_next(sf, v, ppos);
180 * The same behavior and code as single_open(), always
181 * terminate after the initial read.
188 static void kernfs_seq_stop(struct seq_file *sf, void *v)
190 struct sysfs_open_file *of = sf->private;
191 const struct kernfs_ops *ops = kernfs_ops(of->sd);
194 ops->seq_stop(sf, v);
196 sysfs_put_active(of->sd);
197 mutex_unlock(&of->mutex);
200 static int kernfs_seq_show(struct seq_file *sf, void *v)
202 struct sysfs_open_file *of = sf->private;
204 of->event = atomic_read(&of->sd->s_attr.open->event);
206 return of->sd->s_attr.ops->seq_show(sf, v);
209 static const struct seq_operations kernfs_seq_ops = {
210 .start = kernfs_seq_start,
211 .next = kernfs_seq_next,
212 .stop = kernfs_seq_stop,
213 .show = kernfs_seq_show,
217 * As reading a bin file can have side-effects, the exact offset and bytes
218 * specified in read(2) call should be passed to the read callback making
219 * it difficult to use seq_file. Implement simplistic custom buffering for
222 static ssize_t kernfs_file_direct_read(struct sysfs_open_file *of,
223 char __user *user_buf, size_t count,
226 ssize_t len = min_t(size_t, count, PAGE_SIZE);
227 const struct kernfs_ops *ops;
230 buf = kmalloc(len, GFP_KERNEL);
235 * @of->mutex nests outside active ref and is just to ensure that
236 * the ops aren't called concurrently for the same open file.
238 mutex_lock(&of->mutex);
239 if (!sysfs_get_active(of->sd)) {
241 mutex_unlock(&of->mutex);
245 ops = kernfs_ops(of->sd);
247 len = ops->read(of, buf, len, *ppos);
251 sysfs_put_active(of->sd);
252 mutex_unlock(&of->mutex);
257 if (copy_to_user(user_buf, buf, len)) {
270 * kernfs_file_read - kernfs vfs read callback
271 * @file: file pointer
272 * @user_buf: data to write
273 * @count: number of bytes
274 * @ppos: starting offset
276 static ssize_t kernfs_file_read(struct file *file, char __user *user_buf,
277 size_t count, loff_t *ppos)
279 struct sysfs_open_file *of = sysfs_of(file);
281 if (of->sd->s_flags & SYSFS_FLAG_HAS_SEQ_SHOW)
282 return seq_read(file, user_buf, count, ppos);
284 return kernfs_file_direct_read(of, user_buf, count, ppos);
287 /* kernfs write callback for regular sysfs files */
288 static ssize_t sysfs_kf_write(struct sysfs_open_file *of, char *buf,
289 size_t count, loff_t pos)
291 const struct sysfs_ops *ops = sysfs_file_ops(of->sd);
292 struct kobject *kobj = of->sd->s_parent->priv;
297 return ops->store(kobj, of->sd->priv, buf, count);
300 /* kernfs write callback for bin sysfs files */
301 static ssize_t sysfs_kf_bin_write(struct sysfs_open_file *of, char *buf,
302 size_t count, loff_t pos)
304 struct bin_attribute *battr = of->sd->priv;
305 struct kobject *kobj = of->sd->s_parent->priv;
306 loff_t size = file_inode(of->file)->i_size;
311 count = min_t(ssize_t, count, size - pos);
319 return battr->write(of->file, kobj, battr, buf, pos, count);
323 * kernfs_file_write - kernfs vfs write callback
324 * @file: file pointer
325 * @user_buf: data to write
326 * @count: number of bytes
327 * @ppos: starting offset
329 * Copy data in from userland and pass it to the matching kernfs write
332 * There is no easy way for us to know if userspace is only doing a partial
333 * write, so we don't support them. We expect the entire buffer to come on
334 * the first write. Hint: if you're writing a value, first read the file,
335 * modify only the the value you're changing, then write entire buffer
338 static ssize_t kernfs_file_write(struct file *file, const char __user *user_buf,
339 size_t count, loff_t *ppos)
341 struct sysfs_open_file *of = sysfs_of(file);
342 ssize_t len = min_t(size_t, count, PAGE_SIZE);
343 const struct kernfs_ops *ops;
346 buf = kmalloc(len + 1, GFP_KERNEL);
350 if (copy_from_user(buf, user_buf, len)) {
354 buf[len] = '\0'; /* guarantee string termination */
357 * @of->mutex nests outside active ref and is just to ensure that
358 * the ops aren't called concurrently for the same open file.
360 mutex_lock(&of->mutex);
361 if (!sysfs_get_active(of->sd)) {
362 mutex_unlock(&of->mutex);
367 ops = kernfs_ops(of->sd);
369 len = ops->write(of, buf, len, *ppos);
373 sysfs_put_active(of->sd);
374 mutex_unlock(&of->mutex);
383 static int sysfs_kf_bin_mmap(struct sysfs_open_file *of,
384 struct vm_area_struct *vma)
386 struct bin_attribute *battr = of->sd->priv;
387 struct kobject *kobj = of->sd->s_parent->priv;
392 return battr->mmap(of->file, kobj, battr, vma);
395 static void kernfs_vma_open(struct vm_area_struct *vma)
397 struct file *file = vma->vm_file;
398 struct sysfs_open_file *of = sysfs_of(file);
403 if (!sysfs_get_active(of->sd))
406 if (of->vm_ops->open)
407 of->vm_ops->open(vma);
409 sysfs_put_active(of->sd);
412 static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
414 struct file *file = vma->vm_file;
415 struct sysfs_open_file *of = sysfs_of(file);
419 return VM_FAULT_SIGBUS;
421 if (!sysfs_get_active(of->sd))
422 return VM_FAULT_SIGBUS;
424 ret = VM_FAULT_SIGBUS;
425 if (of->vm_ops->fault)
426 ret = of->vm_ops->fault(vma, vmf);
428 sysfs_put_active(of->sd);
432 static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
433 struct vm_fault *vmf)
435 struct file *file = vma->vm_file;
436 struct sysfs_open_file *of = sysfs_of(file);
440 return VM_FAULT_SIGBUS;
442 if (!sysfs_get_active(of->sd))
443 return VM_FAULT_SIGBUS;
446 if (of->vm_ops->page_mkwrite)
447 ret = of->vm_ops->page_mkwrite(vma, vmf);
449 file_update_time(file);
451 sysfs_put_active(of->sd);
455 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
456 void *buf, int len, int write)
458 struct file *file = vma->vm_file;
459 struct sysfs_open_file *of = sysfs_of(file);
465 if (!sysfs_get_active(of->sd))
469 if (of->vm_ops->access)
470 ret = of->vm_ops->access(vma, addr, buf, len, write);
472 sysfs_put_active(of->sd);
477 static int kernfs_vma_set_policy(struct vm_area_struct *vma,
478 struct mempolicy *new)
480 struct file *file = vma->vm_file;
481 struct sysfs_open_file *of = sysfs_of(file);
487 if (!sysfs_get_active(of->sd))
491 if (of->vm_ops->set_policy)
492 ret = of->vm_ops->set_policy(vma, new);
494 sysfs_put_active(of->sd);
498 static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
501 struct file *file = vma->vm_file;
502 struct sysfs_open_file *of = sysfs_of(file);
503 struct mempolicy *pol;
506 return vma->vm_policy;
508 if (!sysfs_get_active(of->sd))
509 return vma->vm_policy;
511 pol = vma->vm_policy;
512 if (of->vm_ops->get_policy)
513 pol = of->vm_ops->get_policy(vma, addr);
515 sysfs_put_active(of->sd);
519 static int kernfs_vma_migrate(struct vm_area_struct *vma,
520 const nodemask_t *from, const nodemask_t *to,
523 struct file *file = vma->vm_file;
524 struct sysfs_open_file *of = sysfs_of(file);
530 if (!sysfs_get_active(of->sd))
534 if (of->vm_ops->migrate)
535 ret = of->vm_ops->migrate(vma, from, to, flags);
537 sysfs_put_active(of->sd);
542 static const struct vm_operations_struct kernfs_vm_ops = {
543 .open = kernfs_vma_open,
544 .fault = kernfs_vma_fault,
545 .page_mkwrite = kernfs_vma_page_mkwrite,
546 .access = kernfs_vma_access,
548 .set_policy = kernfs_vma_set_policy,
549 .get_policy = kernfs_vma_get_policy,
550 .migrate = kernfs_vma_migrate,
554 static int kernfs_file_mmap(struct file *file, struct vm_area_struct *vma)
556 struct sysfs_open_file *of = sysfs_of(file);
557 const struct kernfs_ops *ops;
560 mutex_lock(&of->mutex);
563 if (!sysfs_get_active(of->sd))
566 ops = kernfs_ops(of->sd);
568 rc = ops->mmap(of, vma);
573 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
574 * to satisfy versions of X which crash if the mmap fails: that
575 * substitutes a new vm_file, and we don't then want bin_vm_ops.
577 if (vma->vm_file != file)
581 if (of->mmapped && of->vm_ops != vma->vm_ops)
585 * It is not possible to successfully wrap close.
586 * So error if someone is trying to use close.
589 if (vma->vm_ops && vma->vm_ops->close)
594 of->vm_ops = vma->vm_ops;
595 vma->vm_ops = &kernfs_vm_ops;
597 sysfs_put_active(of->sd);
599 mutex_unlock(&of->mutex);
605 * sysfs_get_open_dirent - get or create sysfs_open_dirent
606 * @sd: target sysfs_dirent
607 * @of: sysfs_open_file for this instance of open
609 * If @sd->s_attr.open exists, increment its reference count;
610 * otherwise, create one. @of is chained to the files list.
613 * Kernel thread context (may sleep).
616 * 0 on success, -errno on failure.
618 static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
619 struct sysfs_open_file *of)
621 struct sysfs_open_dirent *od, *new_od = NULL;
624 mutex_lock(&sysfs_open_file_mutex);
625 spin_lock_irq(&sysfs_open_dirent_lock);
627 if (!sd->s_attr.open && new_od) {
628 sd->s_attr.open = new_od;
632 od = sd->s_attr.open;
634 atomic_inc(&od->refcnt);
635 list_add_tail(&of->list, &od->files);
638 spin_unlock_irq(&sysfs_open_dirent_lock);
639 mutex_unlock(&sysfs_open_file_mutex);
646 /* not there, initialize a new one and retry */
647 new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
651 atomic_set(&new_od->refcnt, 0);
652 atomic_set(&new_od->event, 1);
653 init_waitqueue_head(&new_od->poll);
654 INIT_LIST_HEAD(&new_od->files);
659 * sysfs_put_open_dirent - put sysfs_open_dirent
660 * @sd: target sysfs_dirent
661 * @of: associated sysfs_open_file
663 * Put @sd->s_attr.open and unlink @of from the files list. If
664 * reference count reaches zero, disassociate and free it.
669 static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
670 struct sysfs_open_file *of)
672 struct sysfs_open_dirent *od = sd->s_attr.open;
675 mutex_lock(&sysfs_open_file_mutex);
676 spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
681 if (atomic_dec_and_test(&od->refcnt))
682 sd->s_attr.open = NULL;
686 spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
687 mutex_unlock(&sysfs_open_file_mutex);
692 static int kernfs_file_open(struct inode *inode, struct file *file)
694 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
695 const struct kernfs_ops *ops;
696 struct sysfs_open_file *of;
697 bool has_read, has_write, has_mmap;
700 if (!sysfs_get_active(attr_sd))
703 ops = kernfs_ops(attr_sd);
705 has_read = ops->seq_show || ops->read || ops->mmap;
706 has_write = ops->write || ops->mmap;
707 has_mmap = ops->mmap;
709 /* check perms and supported operations */
710 if ((file->f_mode & FMODE_WRITE) &&
711 (!(inode->i_mode & S_IWUGO) || !has_write))
714 if ((file->f_mode & FMODE_READ) &&
715 (!(inode->i_mode & S_IRUGO) || !has_read))
718 /* allocate a sysfs_open_file for the file */
720 of = kzalloc(sizeof(struct sysfs_open_file), GFP_KERNEL);
725 * The following is done to give a different lockdep key to
726 * @of->mutex for files which implement mmap. This is a rather
727 * crude way to avoid false positive lockdep warning around
728 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
729 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
730 * which mm->mmap_sem nests, while holding @of->mutex. As each
731 * open file has a separate mutex, it's okay as long as those don't
732 * happen on the same file. At this point, we can't easily give
733 * each file a separate locking class. Let's differentiate on
734 * whether the file has mmap or not for now.
737 mutex_init(&of->mutex);
739 mutex_init(&of->mutex);
745 * Always instantiate seq_file even if read access doesn't use
746 * seq_file or is not requested. This unifies private data access
747 * and readable regular files are the vast majority anyway.
750 error = seq_open(file, &kernfs_seq_ops);
752 error = seq_open(file, NULL);
756 ((struct seq_file *)file->private_data)->private = of;
758 /* seq_file clears PWRITE unconditionally, restore it if WRITE */
759 if (file->f_mode & FMODE_WRITE)
760 file->f_mode |= FMODE_PWRITE;
762 /* make sure we have open dirent struct */
763 error = sysfs_get_open_dirent(attr_sd, of);
767 /* open succeeded, put active references */
768 sysfs_put_active(attr_sd);
772 seq_release(inode, file);
776 sysfs_put_active(attr_sd);
780 static int kernfs_file_release(struct inode *inode, struct file *filp)
782 struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
783 struct sysfs_open_file *of = sysfs_of(filp);
785 sysfs_put_open_dirent(sd, of);
786 seq_release(inode, filp);
792 void sysfs_unmap_bin_file(struct sysfs_dirent *sd)
794 struct sysfs_open_dirent *od;
795 struct sysfs_open_file *of;
797 if (!(sd->s_flags & SYSFS_FLAG_HAS_MMAP))
800 spin_lock_irq(&sysfs_open_dirent_lock);
801 od = sd->s_attr.open;
803 atomic_inc(&od->refcnt);
804 spin_unlock_irq(&sysfs_open_dirent_lock);
808 mutex_lock(&sysfs_open_file_mutex);
809 list_for_each_entry(of, &od->files, list) {
810 struct inode *inode = file_inode(of->file);
811 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
813 mutex_unlock(&sysfs_open_file_mutex);
815 sysfs_put_open_dirent(sd, NULL);
818 /* Sysfs attribute files are pollable. The idea is that you read
819 * the content and then you use 'poll' or 'select' to wait for
820 * the content to change. When the content changes (assuming the
821 * manager for the kobject supports notification), poll will
822 * return POLLERR|POLLPRI, and select will return the fd whether
823 * it is waiting for read, write, or exceptions.
824 * Once poll/select indicates that the value has changed, you
825 * need to close and re-open the file, or seek to 0 and read again.
826 * Reminder: this only works for attributes which actively support
827 * it, and it is not possible to test an attribute from userspace
828 * to see if it supports poll (Neither 'poll' nor 'select' return
829 * an appropriate error code). When in doubt, set a suitable timeout value.
831 static unsigned int kernfs_file_poll(struct file *filp, poll_table *wait)
833 struct sysfs_open_file *of = sysfs_of(filp);
834 struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
835 struct sysfs_open_dirent *od = attr_sd->s_attr.open;
837 /* need parent for the kobj, grab both */
838 if (!sysfs_get_active(attr_sd))
841 poll_wait(filp, &od->poll, wait);
843 sysfs_put_active(attr_sd);
845 if (of->event != atomic_read(&od->event))
848 return DEFAULT_POLLMASK;
851 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
855 * kernfs_notify - notify a kernfs file
856 * @sd: file to notify
858 * Notify @sd such that poll(2) on @sd wakes up.
860 void kernfs_notify(struct sysfs_dirent *sd)
862 struct sysfs_open_dirent *od;
865 spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
867 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
868 od = sd->s_attr.open;
870 atomic_inc(&od->event);
871 wake_up_interruptible(&od->poll);
875 spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
877 EXPORT_SYMBOL_GPL(kernfs_notify);
879 void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
881 struct sysfs_dirent *sd = k->sd, *tmp;
884 sd = sysfs_get_dirent(sd, dir);
889 tmp = sysfs_get_dirent(sd, attr);
899 EXPORT_SYMBOL_GPL(sysfs_notify);
901 const struct file_operations kernfs_file_operations = {
902 .read = kernfs_file_read,
903 .write = kernfs_file_write,
904 .llseek = generic_file_llseek,
905 .mmap = kernfs_file_mmap,
906 .open = kernfs_file_open,
907 .release = kernfs_file_release,
908 .poll = kernfs_file_poll,
911 static const struct kernfs_ops sysfs_file_kfops_empty = {
914 static const struct kernfs_ops sysfs_file_kfops_ro = {
915 .seq_show = sysfs_kf_seq_show,
918 static const struct kernfs_ops sysfs_file_kfops_wo = {
919 .write = sysfs_kf_write,
922 static const struct kernfs_ops sysfs_file_kfops_rw = {
923 .seq_show = sysfs_kf_seq_show,
924 .write = sysfs_kf_write,
927 static const struct kernfs_ops sysfs_bin_kfops_ro = {
928 .read = sysfs_kf_bin_read,
931 static const struct kernfs_ops sysfs_bin_kfops_wo = {
932 .write = sysfs_kf_bin_write,
935 static const struct kernfs_ops sysfs_bin_kfops_rw = {
936 .read = sysfs_kf_bin_read,
937 .write = sysfs_kf_bin_write,
938 .mmap = sysfs_kf_bin_mmap,
941 int sysfs_add_file_mode_ns(struct sysfs_dirent *dir_sd,
942 const struct attribute *attr, bool is_bin,
943 umode_t mode, const void *ns)
945 struct lock_class_key *key = NULL;
946 const struct kernfs_ops *ops;
947 struct sysfs_dirent *sd;
951 struct kobject *kobj = dir_sd->priv;
952 const struct sysfs_ops *sysfs_ops = kobj->ktype->sysfs_ops;
954 /* every kobject with an attribute needs a ktype assigned */
955 if (WARN(!sysfs_ops, KERN_ERR
956 "missing sysfs attribute operations for kobject: %s\n",
960 if (sysfs_ops->show && sysfs_ops->store)
961 ops = &sysfs_file_kfops_rw;
962 else if (sysfs_ops->show)
963 ops = &sysfs_file_kfops_ro;
964 else if (sysfs_ops->store)
965 ops = &sysfs_file_kfops_wo;
967 ops = &sysfs_file_kfops_empty;
971 struct bin_attribute *battr = (void *)attr;
973 if ((battr->read && battr->write) || battr->mmap)
974 ops = &sysfs_bin_kfops_rw;
975 else if (battr->read)
976 ops = &sysfs_bin_kfops_ro;
977 else if (battr->write)
978 ops = &sysfs_bin_kfops_wo;
980 ops = &sysfs_file_kfops_empty;
985 #ifdef CONFIG_DEBUG_LOCK_ALLOC
986 if (!attr->ignore_lockdep)
987 key = attr->key ?: (struct lock_class_key *)&attr->skey;
989 sd = kernfs_create_file_ns_key(dir_sd, attr->name, mode, size,
990 ops, (void *)attr, ns, key);
992 if (PTR_ERR(sd) == -EEXIST)
993 sysfs_warn_dup(dir_sd, attr->name);
1000 * kernfs_create_file_ns_key - create a file
1001 * @parent: directory to create the file in
1002 * @name: name of the file
1003 * @mode: mode of the file
1004 * @size: size of the file
1005 * @ops: kernfs operations for the file
1006 * @priv: private data for the file
1007 * @ns: optional namespace tag of the file
1008 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
1010 * Returns the created node on success, ERR_PTR() value on error.
1012 struct sysfs_dirent *kernfs_create_file_ns_key(struct sysfs_dirent *parent,
1014 umode_t mode, loff_t size,
1015 const struct kernfs_ops *ops,
1016 void *priv, const void *ns,
1017 struct lock_class_key *key)
1019 struct sysfs_addrm_cxt acxt;
1020 struct sysfs_dirent *sd;
1023 sd = sysfs_new_dirent(name, (mode & S_IALLUGO) | S_IFREG,
1026 return ERR_PTR(-ENOMEM);
1028 sd->s_attr.ops = ops;
1029 sd->s_attr.size = size;
1033 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1035 lockdep_init_map(&sd->dep_map, "s_active", key, 0);
1036 sd->s_flags |= SYSFS_FLAG_LOCKDEP;
1041 * sd->s_attr.ops is accesible only while holding active ref. We
1042 * need to know whether some ops are implemented outside active
1043 * ref. Cache their existence in flags.
1046 sd->s_flags |= SYSFS_FLAG_HAS_SEQ_SHOW;
1048 sd->s_flags |= SYSFS_FLAG_HAS_MMAP;
1050 sysfs_addrm_start(&acxt);
1051 rc = sysfs_add_one(&acxt, sd, parent);
1052 sysfs_addrm_finish(&acxt);
1061 int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
1064 return sysfs_add_file_mode_ns(dir_sd, attr, is_bin, attr->mode, NULL);
1068 * sysfs_create_file_ns - create an attribute file for an object with custom ns
1069 * @kobj: object we're creating for
1070 * @attr: attribute descriptor
1071 * @ns: namespace the new file should belong to
1073 int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
1076 BUG_ON(!kobj || !kobj->sd || !attr);
1078 return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, ns);
1081 EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
1083 int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
1088 for (i = 0; ptr[i] && !err; i++)
1089 err = sysfs_create_file(kobj, ptr[i]);
1092 sysfs_remove_file(kobj, ptr[i]);
1095 EXPORT_SYMBOL_GPL(sysfs_create_files);
1098 * sysfs_add_file_to_group - add an attribute file to a pre-existing group.
1099 * @kobj: object we're acting for.
1100 * @attr: attribute descriptor.
1101 * @group: group name.
1103 int sysfs_add_file_to_group(struct kobject *kobj,
1104 const struct attribute *attr, const char *group)
1106 struct sysfs_dirent *dir_sd;
1110 dir_sd = sysfs_get_dirent(kobj->sd, group);
1112 dir_sd = sysfs_get(kobj->sd);
1117 error = sysfs_add_file(dir_sd, attr, false);
1122 EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
1125 * sysfs_chmod_file - update the modified mode value on an object attribute.
1126 * @kobj: object we're acting for.
1127 * @attr: attribute descriptor.
1128 * @mode: file permissions.
1131 int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
1134 struct sysfs_dirent *sd;
1135 struct iattr newattrs;
1138 sd = sysfs_get_dirent(kobj->sd, attr->name);
1142 newattrs.ia_mode = (mode & S_IALLUGO) | (sd->s_mode & ~S_IALLUGO);
1143 newattrs.ia_valid = ATTR_MODE;
1145 rc = kernfs_setattr(sd, &newattrs);
1150 EXPORT_SYMBOL_GPL(sysfs_chmod_file);
1153 * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
1154 * @kobj: object we're acting for
1155 * @attr: attribute descriptor
1156 * @ns: namespace tag of the file to remove
1158 * Hash the attribute name and namespace tag and kill the victim.
1160 void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
1163 struct sysfs_dirent *dir_sd = kobj->sd;
1165 kernfs_remove_by_name_ns(dir_sd, attr->name, ns);
1167 EXPORT_SYMBOL_GPL(sysfs_remove_file_ns);
1169 void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
1172 for (i = 0; ptr[i]; i++)
1173 sysfs_remove_file(kobj, ptr[i]);
1175 EXPORT_SYMBOL_GPL(sysfs_remove_files);
1178 * sysfs_remove_file_from_group - remove an attribute file from a group.
1179 * @kobj: object we're acting for.
1180 * @attr: attribute descriptor.
1181 * @group: group name.
1183 void sysfs_remove_file_from_group(struct kobject *kobj,
1184 const struct attribute *attr, const char *group)
1186 struct sysfs_dirent *dir_sd;
1189 dir_sd = sysfs_get_dirent(kobj->sd, group);
1191 dir_sd = sysfs_get(kobj->sd);
1193 kernfs_remove_by_name(dir_sd, attr->name);
1197 EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
1200 * sysfs_create_bin_file - create binary file for object.
1202 * @attr: attribute descriptor.
1204 int sysfs_create_bin_file(struct kobject *kobj,
1205 const struct bin_attribute *attr)
1207 BUG_ON(!kobj || !kobj->sd || !attr);
1209 return sysfs_add_file(kobj->sd, &attr->attr, true);
1211 EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
1214 * sysfs_remove_bin_file - remove binary file for object.
1216 * @attr: attribute descriptor.
1218 void sysfs_remove_bin_file(struct kobject *kobj,
1219 const struct bin_attribute *attr)
1221 kernfs_remove_by_name(kobj->sd, attr->attr.name);
1223 EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
1225 struct sysfs_schedule_callback_struct {
1226 struct list_head workq_list;
1227 struct kobject *kobj;
1228 void (*func)(void *);
1230 struct module *owner;
1231 struct work_struct work;
1234 static struct workqueue_struct *sysfs_workqueue;
1235 static DEFINE_MUTEX(sysfs_workq_mutex);
1236 static LIST_HEAD(sysfs_workq);
1237 static void sysfs_schedule_callback_work(struct work_struct *work)
1239 struct sysfs_schedule_callback_struct *ss = container_of(work,
1240 struct sysfs_schedule_callback_struct, work);
1242 (ss->func)(ss->data);
1243 kobject_put(ss->kobj);
1244 module_put(ss->owner);
1245 mutex_lock(&sysfs_workq_mutex);
1246 list_del(&ss->workq_list);
1247 mutex_unlock(&sysfs_workq_mutex);
1252 * sysfs_schedule_callback - helper to schedule a callback for a kobject
1253 * @kobj: object we're acting for.
1254 * @func: callback function to invoke later.
1255 * @data: argument to pass to @func.
1256 * @owner: module owning the callback code
1258 * sysfs attribute methods must not unregister themselves or their parent
1259 * kobject (which would amount to the same thing). Attempts to do so will
1260 * deadlock, since unregistration is mutually exclusive with driver
1263 * Instead methods can call this routine, which will attempt to allocate
1264 * and schedule a workqueue request to call back @func with @data as its
1265 * argument in the workqueue's process context. @kobj will be pinned
1266 * until @func returns.
1268 * Returns 0 if the request was submitted, -ENOMEM if storage could not
1269 * be allocated, -ENODEV if a reference to @owner isn't available,
1270 * -EAGAIN if a callback has already been scheduled for @kobj.
1272 int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
1273 void *data, struct module *owner)
1275 struct sysfs_schedule_callback_struct *ss, *tmp;
1277 if (!try_module_get(owner))
1280 mutex_lock(&sysfs_workq_mutex);
1281 list_for_each_entry_safe(ss, tmp, &sysfs_workq, workq_list)
1282 if (ss->kobj == kobj) {
1284 mutex_unlock(&sysfs_workq_mutex);
1287 mutex_unlock(&sysfs_workq_mutex);
1289 if (sysfs_workqueue == NULL) {
1290 sysfs_workqueue = create_singlethread_workqueue("sysfsd");
1291 if (sysfs_workqueue == NULL) {
1297 ss = kmalloc(sizeof(*ss), GFP_KERNEL);
1307 INIT_WORK(&ss->work, sysfs_schedule_callback_work);
1308 INIT_LIST_HEAD(&ss->workq_list);
1309 mutex_lock(&sysfs_workq_mutex);
1310 list_add_tail(&ss->workq_list, &sysfs_workq);
1311 mutex_unlock(&sysfs_workq_mutex);
1312 queue_work(sysfs_workqueue, &ss->work);
1315 EXPORT_SYMBOL_GPL(sysfs_schedule_callback);