3 ** Anonymous Shared Memory Subsystem, ashmem
5 ** Copyright (C) 2008 Google, Inc.
7 ** Robert Love <rlove@google.com>
9 ** This software is licensed under the terms of the GNU General Public
10 ** License version 2, as published by the Free Software Foundation, and
11 ** may be copied, distributed, and modified under those terms.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
19 #include <linux/module.h>
20 #include <linux/file.h>
22 #include <linux/falloc.h>
23 #include <linux/miscdevice.h>
24 #include <linux/security.h>
26 #include <linux/mman.h>
27 #include <linux/uaccess.h>
28 #include <linux/personality.h>
29 #include <linux/bitops.h>
30 #include <linux/mutex.h>
31 #include <linux/shmem_fs.h>
34 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
35 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
36 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
39 * ashmem_area - anonymous shared memory area
40 * Lifecycle: From our parent file's open() until its release()
41 * Locking: Protected by `ashmem_mutex'
42 * Big Note: Mappings do NOT pin this structure; it dies on close()
45 char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
46 struct list_head unpinned_list; /* list of all ashmem areas */
47 struct file *file; /* the shmem-based backing file */
48 size_t size; /* size of the mapping, in bytes */
49 unsigned long prot_mask; /* allowed prot bits, as vm_flags */
53 * ashmem_range - represents an interval of unpinned (evictable) pages
54 * Lifecycle: From unpin to pin
55 * Locking: Protected by `ashmem_mutex'
58 struct list_head lru; /* entry in LRU list */
59 struct list_head unpinned; /* entry in its area's unpinned list */
60 struct ashmem_area *asma; /* associated area */
61 size_t pgstart; /* starting page, inclusive */
62 size_t pgend; /* ending page, inclusive */
63 unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
66 /* LRU list of unpinned pages, protected by ashmem_mutex */
67 static LIST_HEAD(ashmem_lru_list);
69 /* Count of pages on our LRU list, protected by ashmem_mutex */
70 static unsigned long lru_count;
73 * ashmem_mutex - protects the list of and each individual ashmem_area
75 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
77 static DEFINE_MUTEX(ashmem_mutex);
79 static struct kmem_cache *ashmem_area_cachep __read_mostly;
80 static struct kmem_cache *ashmem_range_cachep __read_mostly;
82 #define range_size(range) \
83 ((range)->pgend - (range)->pgstart + 1)
85 #define range_on_lru(range) \
86 ((range)->purged == ASHMEM_NOT_PURGED)
88 #define page_range_subsumes_range(range, start, end) \
89 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
91 #define page_range_subsumed_by_range(range, start, end) \
92 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
94 #define page_in_range(range, page) \
95 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
97 #define page_range_in_range(range, start, end) \
98 (page_in_range(range, start) || page_in_range(range, end) || \
99 page_range_subsumes_range(range, start, end))
101 #define range_before_page(range, page) \
102 ((range)->pgend < (page))
104 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
106 static inline void lru_add(struct ashmem_range *range)
108 list_add_tail(&range->lru, &ashmem_lru_list);
109 lru_count += range_size(range);
112 static inline void lru_del(struct ashmem_range *range)
114 list_del(&range->lru);
115 lru_count -= range_size(range);
119 * range_alloc - allocate and initialize a new ashmem_range structure
121 * 'asma' - associated ashmem_area
122 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
123 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
124 * 'start' - starting page, inclusive
125 * 'end' - ending page, inclusive
127 * Caller must hold ashmem_mutex.
129 static int range_alloc(struct ashmem_area *asma,
130 struct ashmem_range *prev_range, unsigned int purged,
131 size_t start, size_t end)
133 struct ashmem_range *range;
135 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
136 if (unlikely(!range))
140 range->pgstart = start;
142 range->purged = purged;
144 list_add_tail(&range->unpinned, &prev_range->unpinned);
146 if (range_on_lru(range))
152 static void range_del(struct ashmem_range *range)
154 list_del(&range->unpinned);
155 if (range_on_lru(range))
157 kmem_cache_free(ashmem_range_cachep, range);
161 * range_shrink - shrinks a range
163 * Caller must hold ashmem_mutex.
165 static inline void range_shrink(struct ashmem_range *range,
166 size_t start, size_t end)
168 size_t pre = range_size(range);
170 range->pgstart = start;
173 if (range_on_lru(range))
174 lru_count -= pre - range_size(range);
177 static int ashmem_open(struct inode *inode, struct file *file)
179 struct ashmem_area *asma;
182 ret = generic_file_open(inode, file);
186 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
190 INIT_LIST_HEAD(&asma->unpinned_list);
191 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
192 asma->prot_mask = PROT_MASK;
193 file->private_data = asma;
198 static int ashmem_release(struct inode *ignored, struct file *file)
200 struct ashmem_area *asma = file->private_data;
201 struct ashmem_range *range, *next;
203 mutex_lock(&ashmem_mutex);
204 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
206 mutex_unlock(&ashmem_mutex);
210 kmem_cache_free(ashmem_area_cachep, asma);
215 static ssize_t ashmem_read(struct file *file, char __user *buf,
216 size_t len, loff_t *pos)
218 struct ashmem_area *asma = file->private_data;
221 mutex_lock(&ashmem_mutex);
223 /* If size is not set, or set to 0, always return EOF. */
232 ret = asma->file->f_op->read(asma->file, buf, len, pos);
236 /** Update backing file pos, since f_ops->read() doesn't */
237 asma->file->f_pos = *pos;
240 mutex_unlock(&ashmem_mutex);
244 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
246 struct ashmem_area *asma = file->private_data;
249 mutex_lock(&ashmem_mutex);
251 if (asma->size == 0) {
261 ret = asma->file->f_op->llseek(asma->file, offset, origin);
265 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
266 file->f_pos = asma->file->f_pos;
269 mutex_unlock(&ashmem_mutex);
273 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
275 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
276 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
277 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
280 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
282 struct ashmem_area *asma = file->private_data;
285 mutex_lock(&ashmem_mutex);
287 /* user needs to SET_SIZE before mapping */
288 if (unlikely(!asma->size)) {
293 /* requested protection bits must match our allowed protection mask */
294 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
295 calc_vm_prot_bits(PROT_MASK))) {
299 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
302 char *name = ASHMEM_NAME_DEF;
305 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
308 /* ... and allocate the backing shmem file */
309 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
310 if (unlikely(IS_ERR(vmfile))) {
311 ret = PTR_ERR(vmfile);
316 get_file(asma->file);
319 * XXX - Reworked to use shmem_zero_setup() instead of
320 * shmem_set_file while we're in staging. -jstultz
322 if (vma->vm_flags & VM_SHARED) {
323 ret = shmem_zero_setup(vma);
332 vma->vm_file = asma->file;
333 vma->vm_flags |= VM_CAN_NONLINEAR;
336 mutex_unlock(&ashmem_mutex);
341 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
343 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
344 * many objects (pages) we have in total.
346 * 'gfp_mask' is the mask of the allocation that got us into this mess.
348 * Return value is the number of objects (pages) remaining, or -1 if we cannot
349 * proceed without risk of deadlock (due to gfp_mask).
351 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
352 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
355 static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
357 struct ashmem_range *range, *next;
359 /* We might recurse into filesystem code, so bail out if necessary */
360 if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
365 mutex_lock(&ashmem_mutex);
366 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
367 loff_t start = range->pgstart * PAGE_SIZE;
368 loff_t end = (range->pgend + 1) * PAGE_SIZE;
370 do_fallocate(range->asma->file,
371 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
373 range->purged = ASHMEM_WAS_PURGED;
376 sc->nr_to_scan -= range_size(range);
377 if (sc->nr_to_scan <= 0)
380 mutex_unlock(&ashmem_mutex);
385 static struct shrinker ashmem_shrinker = {
386 .shrink = ashmem_shrink,
387 .seeks = DEFAULT_SEEKS * 4,
390 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
394 mutex_lock(&ashmem_mutex);
396 /* the user can only remove, not add, protection bits */
397 if (unlikely((asma->prot_mask & prot) != prot)) {
402 /* does the application expect PROT_READ to imply PROT_EXEC? */
403 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
406 asma->prot_mask = prot;
409 mutex_unlock(&ashmem_mutex);
413 static int set_name(struct ashmem_area *asma, void __user *name)
417 mutex_lock(&ashmem_mutex);
419 /* cannot change an existing mapping's name */
420 if (unlikely(asma->file)) {
425 if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
426 name, ASHMEM_NAME_LEN)))
428 asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
431 mutex_unlock(&ashmem_mutex);
436 static int get_name(struct ashmem_area *asma, void __user *name)
440 mutex_lock(&ashmem_mutex);
441 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
445 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
446 * prevents us from revealing one user's stack to another.
448 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
449 if (unlikely(copy_to_user(name,
450 asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
453 if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
454 sizeof(ASHMEM_NAME_DEF))))
457 mutex_unlock(&ashmem_mutex);
463 * ashmem_pin - pin the given ashmem region, returning whether it was
464 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
466 * Caller must hold ashmem_mutex.
468 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
470 struct ashmem_range *range, *next;
471 int ret = ASHMEM_NOT_PURGED;
473 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
474 /* moved past last applicable page; we can short circuit */
475 if (range_before_page(range, pgstart))
479 * The user can ask us to pin pages that span multiple ranges,
480 * or to pin pages that aren't even unpinned, so this is messy.
483 * 1. The requested range subsumes an existing range, so we
484 * just remove the entire matching range.
485 * 2. The requested range overlaps the start of an existing
486 * range, so we just update that range.
487 * 3. The requested range overlaps the end of an existing
488 * range, so we just update that range.
489 * 4. The requested range punches a hole in an existing range,
490 * so we have to update one side of the range and then
491 * create a new range for the other side.
493 if (page_range_in_range(range, pgstart, pgend)) {
494 ret |= range->purged;
496 /* Case #1: Easy. Just nuke the whole thing. */
497 if (page_range_subsumes_range(range, pgstart, pgend)) {
502 /* Case #2: We overlap from the start, so adjust it */
503 if (range->pgstart >= pgstart) {
504 range_shrink(range, pgend + 1, range->pgend);
508 /* Case #3: We overlap from the rear, so adjust it */
509 if (range->pgend <= pgend) {
510 range_shrink(range, range->pgstart, pgstart-1);
515 * Case #4: We eat a chunk out of the middle. A bit
516 * more complicated, we allocate a new range for the
517 * second half and adjust the first chunk's endpoint.
519 range_alloc(asma, range, range->purged,
520 pgend + 1, range->pgend);
521 range_shrink(range, range->pgstart, pgstart - 1);
530 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
532 * Caller must hold ashmem_mutex.
534 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
536 struct ashmem_range *range, *next;
537 unsigned int purged = ASHMEM_NOT_PURGED;
540 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
541 /* short circuit: this is our insertion point */
542 if (range_before_page(range, pgstart))
546 * The user can ask us to unpin pages that are already entirely
547 * or partially pinned. We handle those two cases here.
549 if (page_range_subsumed_by_range(range, pgstart, pgend))
551 if (page_range_in_range(range, pgstart, pgend)) {
552 pgstart = min_t(size_t, range->pgstart, pgstart),
553 pgend = max_t(size_t, range->pgend, pgend);
554 purged |= range->purged;
560 return range_alloc(asma, range, purged, pgstart, pgend);
564 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
565 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
567 * Caller must hold ashmem_mutex.
569 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
572 struct ashmem_range *range;
573 int ret = ASHMEM_IS_PINNED;
575 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
576 if (range_before_page(range, pgstart))
578 if (page_range_in_range(range, pgstart, pgend)) {
579 ret = ASHMEM_IS_UNPINNED;
587 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
590 struct ashmem_pin pin;
591 size_t pgstart, pgend;
594 if (unlikely(!asma->file))
597 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
600 /* per custom, you can pass zero for len to mean "everything onward" */
602 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
604 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
607 if (unlikely(((__u32) -1) - pin.offset < pin.len))
610 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
613 pgstart = pin.offset / PAGE_SIZE;
614 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
616 mutex_lock(&ashmem_mutex);
620 ret = ashmem_pin(asma, pgstart, pgend);
623 ret = ashmem_unpin(asma, pgstart, pgend);
625 case ASHMEM_GET_PIN_STATUS:
626 ret = ashmem_get_pin_status(asma, pgstart, pgend);
630 mutex_unlock(&ashmem_mutex);
635 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
637 struct ashmem_area *asma = file->private_data;
641 case ASHMEM_SET_NAME:
642 ret = set_name(asma, (void __user *) arg);
644 case ASHMEM_GET_NAME:
645 ret = get_name(asma, (void __user *) arg);
647 case ASHMEM_SET_SIZE:
651 asma->size = (size_t) arg;
654 case ASHMEM_GET_SIZE:
657 case ASHMEM_SET_PROT_MASK:
658 ret = set_prot_mask(asma, arg);
660 case ASHMEM_GET_PROT_MASK:
661 ret = asma->prot_mask;
665 case ASHMEM_GET_PIN_STATUS:
666 ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
668 case ASHMEM_PURGE_ALL_CACHES:
670 if (capable(CAP_SYS_ADMIN)) {
671 struct shrink_control sc = {
672 .gfp_mask = GFP_KERNEL,
675 ret = ashmem_shrink(&ashmem_shrinker, &sc);
677 ashmem_shrink(&ashmem_shrinker, &sc);
685 static const struct file_operations ashmem_fops = {
686 .owner = THIS_MODULE,
688 .release = ashmem_release,
690 .llseek = ashmem_llseek,
692 .unlocked_ioctl = ashmem_ioctl,
693 .compat_ioctl = ashmem_ioctl,
696 static struct miscdevice ashmem_misc = {
697 .minor = MISC_DYNAMIC_MINOR,
699 .fops = &ashmem_fops,
702 static int __init ashmem_init(void)
706 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
707 sizeof(struct ashmem_area),
709 if (unlikely(!ashmem_area_cachep)) {
710 printk(KERN_ERR "ashmem: failed to create slab cache\n");
714 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
715 sizeof(struct ashmem_range),
717 if (unlikely(!ashmem_range_cachep)) {
718 printk(KERN_ERR "ashmem: failed to create slab cache\n");
722 ret = misc_register(&ashmem_misc);
724 printk(KERN_ERR "ashmem: failed to register misc device!\n");
728 register_shrinker(&ashmem_shrinker);
730 printk(KERN_INFO "ashmem: initialized\n");
735 static void __exit ashmem_exit(void)
739 unregister_shrinker(&ashmem_shrinker);
741 ret = misc_deregister(&ashmem_misc);
743 printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
745 kmem_cache_destroy(ashmem_range_cachep);
746 kmem_cache_destroy(ashmem_area_cachep);
748 printk(KERN_INFO "ashmem: unloaded\n");
751 module_init(ashmem_init);
752 module_exit(ashmem_exit);
754 MODULE_LICENSE("GPL");