]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/shmem.c
Char: mxser, add support for CP-114UL
[mv-sheeva.git] / mm / shmem.c
index a0126c4371054edf7120aaea97fd4d2785e4fdbf..85bed948fafc8aecb22daf1f68023b91080a30f4 100644 (file)
@@ -833,6 +833,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
        idx = 0;
        ptr = info->i_direct;
        spin_lock(&info->lock);
+       if (!info->swapped) {
+               list_del_init(&info->swaplist);
+               goto lost2;
+       }
        limit = info->next_index;
        size = limit;
        if (size > SHMEM_NR_DIRECT)
@@ -894,19 +898,34 @@ found:
        inode = igrab(&info->vfs_inode);
        spin_unlock(&info->lock);
 
-       /* move head to start search for next from here */
-       list_move_tail(&shmem_swaplist, &info->swaplist);
+       /*
+        * Move _head_ to start search for next from here.
+        * But be careful: shmem_delete_inode checks list_empty without taking
+        * mutex, and there's an instant in list_move_tail when info->swaplist
+        * would appear empty, if it were the only one on shmem_swaplist.  We
+        * could avoid doing it if inode NULL; or use this minor optimization.
+        */
+       if (shmem_swaplist.next != &info->swaplist)
+               list_move_tail(&shmem_swaplist, &info->swaplist);
        mutex_unlock(&shmem_swaplist_mutex);
 
        error = 1;
        if (!inode)
                goto out;
+       /* Precharge page while we can wait, compensate afterwards */
+       error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+       if (error)
+               goto out;
+       error = radix_tree_preload(GFP_KERNEL);
+       if (error)
+               goto uncharge;
+       error = 1;
 
        spin_lock(&info->lock);
        ptr = shmem_swp_entry(info, idx, NULL);
        if (ptr && ptr->val == entry.val)
                error = add_to_page_cache(page, inode->i_mapping,
-                                               idx, GFP_ATOMIC);
+                                               idx, GFP_NOWAIT);
        if (error == -EEXIST) {
                struct page *filepage = find_get_page(inode->i_mapping, idx);
                error = 1;
@@ -931,6 +950,9 @@ found:
        if (ptr)
                shmem_swp_unmap(ptr);
        spin_unlock(&info->lock);
+       radix_tree_preload_end();
+uncharge:
+       mem_cgroup_uncharge_page(page);
 out:
        unlock_page(page);
        page_cache_release(page);
@@ -950,10 +972,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
        mutex_lock(&shmem_swaplist_mutex);
        list_for_each_safe(p, next, &shmem_swaplist) {
                info = list_entry(p, struct shmem_inode_info, swaplist);
-               if (info->swapped)
-                       found = shmem_unuse_inode(info, entry, page);
-               else
-                       list_del_init(&info->swaplist);
+               found = shmem_unuse_inode(info, entry, page);
                cond_resched();
                if (found)
                        goto out;
@@ -1016,18 +1035,23 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                remove_from_page_cache(page);
                shmem_swp_set(info, entry, swap.val);
                shmem_swp_unmap(entry);
+               if (list_empty(&info->swaplist))
+                       inode = igrab(inode);
+               else
+                       inode = NULL;
                spin_unlock(&info->lock);
-               if (list_empty(&info->swaplist)) {
-                       mutex_lock(&shmem_swaplist_mutex);
-                       /* move instead of add in case we're racing */
-                       list_move_tail(&info->swaplist, &shmem_swaplist);
-                       mutex_unlock(&shmem_swaplist_mutex);
-               }
                swap_duplicate(swap);
                BUG_ON(page_mapped(page));
                page_cache_release(page);       /* pagecache ref */
                set_page_dirty(page);
                unlock_page(page);
+               if (inode) {
+                       mutex_lock(&shmem_swaplist_mutex);
+                       /* move instead of add in case we're racing */
+                       list_move_tail(&info->swaplist, &shmem_swaplist);
+                       mutex_unlock(&shmem_swaplist_mutex);
+                       iput(inode);
+               }
                return 0;
        }
 
@@ -1185,6 +1209,16 @@ repeat:
                goto done;
        error = 0;
        gfp = mapping_gfp_mask(mapping);
+       if (!filepage) {
+               /*
+                * Try to preload while we can wait, to not make a habit of
+                * draining atomic reserves; but don't latch on to this cpu.
+                */
+               error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+               if (error)
+                       goto failed;
+               radix_tree_preload_end();
+       }
 
        spin_lock(&info->lock);
        shmem_recalc_inode(inode);
@@ -1266,7 +1300,7 @@ repeat:
                        set_page_dirty(filepage);
                        swap_free(swap);
                } else if (!(error = add_to_page_cache(
-                               swappage, mapping, idx, GFP_ATOMIC))) {
+                               swappage, mapping, idx, GFP_NOWAIT))) {
                        info->flags |= SHMEM_PAGEIN;
                        shmem_swp_set(info, entry, 0);
                        shmem_swp_unmap(entry);
@@ -1281,8 +1315,11 @@ repeat:
                        unlock_page(swappage);
                        page_cache_release(swappage);
                        if (error == -ENOMEM) {
-                               /* let kswapd refresh zone for GFP_ATOMICs */
-                               congestion_wait(WRITE, HZ/50);
+                               /* allow reclaim from this memory cgroup */
+                               error = mem_cgroup_cache_charge(NULL,
+                                       current->mm, gfp & ~__GFP_HIGHMEM);
+                               if (error)
+                                       goto failed;
                        }
                        goto repeat;
                }
@@ -1329,6 +1366,17 @@ repeat:
                                goto failed;
                        }
 
+                       /* Precharge page while we can wait, compensate after */
+                       error = mem_cgroup_cache_charge(filepage, current->mm,
+                                                       gfp & ~__GFP_HIGHMEM);
+                       if (error) {
+                               page_cache_release(filepage);
+                               shmem_unacct_blocks(info->flags, 1);
+                               shmem_free_blocks(inode, 1);
+                               filepage = NULL;
+                               goto failed;
+                       }
+
                        spin_lock(&info->lock);
                        entry = shmem_swp_alloc(info, idx, sgp);
                        if (IS_ERR(entry))
@@ -1338,8 +1386,9 @@ repeat:
                                shmem_swp_unmap(entry);
                        }
                        if (error || swap.val || 0 != add_to_page_cache_lru(
-                                       filepage, mapping, idx, GFP_ATOMIC)) {
+                                       filepage, mapping, idx, GFP_NOWAIT)) {
                                spin_unlock(&info->lock);
+                               mem_cgroup_uncharge_page(filepage);
                                page_cache_release(filepage);
                                shmem_unacct_blocks(info->flags, 1);
                                shmem_free_blocks(inode, 1);
@@ -1348,6 +1397,7 @@ repeat:
                                        goto failed;
                                goto repeat;
                        }
+                       mem_cgroup_uncharge_page(filepage);
                        info->flags |= SHMEM_PAGEIN;
                }
 
@@ -1931,8 +1981,7 @@ static int shmem_xattr_security_get(struct inode *inode, const char *name,
 {
        if (strcmp(name, "") == 0)
                return -EINVAL;
-       return security_inode_getsecurity(inode, name, buffer, size,
-                                         -EOPNOTSUPP);
+       return xattr_getsecurity(inode, name, buffer, size);
 }
 
 static int shmem_xattr_security_set(struct inode *inode, const char *name,