]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: remove the second argument of k[un]map_atomic()
authorCong Wang <amwang@redhat.com>
Fri, 25 Nov 2011 15:14:39 +0000 (23:14 +0800)
committerCong Wang <amwang@redhat.com>
Wed, 7 Dec 2011 09:05:33 +0000 (17:05 +0800)
Signed-off-by: Cong Wang <amwang@redhat.com>
mm/bounce.c
mm/filemap.c
mm/ksm.c
mm/memory.c
mm/shmem.c
mm/swapfile.c
mm/vmalloc.c

index 4e9ae722af83eb3e9a113fba806356d44121a340..d1be02ca18898df63e705504395463b733639ddb 100644 (file)
@@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
        unsigned char *vto;
 
        local_irq_save(flags);
-       vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
+       vto = kmap_atomic(to->bv_page);
        memcpy(vto + to->bv_offset, vfrom, to->bv_len);
-       kunmap_atomic(vto, KM_BOUNCE_READ);
+       kunmap_atomic(vto);
        local_irq_restore(flags);
 }
 
index c0018f2d50e04e2ea03045989b742254be0a8489..1c5f8f7cf28ae19da425da792e31772f8b7bf77a 100644 (file)
@@ -1332,10 +1332,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
         * taking the kmap.
         */
        if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                left = __copy_to_user_inatomic(desc->arg.buf,
                                                kaddr + offset, size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                if (left == 0)
                        goto success;
        }
@@ -2062,7 +2062,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        size_t copied;
 
        BUG_ON(!in_atomic());
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
@@ -2072,7 +2072,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
                                                i->iov, i->iov_offset, bytes);
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        return copied;
 }
index 310544a379ae9c7b886b3b50815e5f3d5a991ba8..a6d3fb7e6c10576eb6f075a61ebf43535da2ce53 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -672,9 +672,9 @@ error:
 static u32 calc_checksum(struct page *page)
 {
        u32 checksum;
-       void *addr = kmap_atomic(page, KM_USER0);
+       void *addr = kmap_atomic(page);
        checksum = jhash2(addr, PAGE_SIZE / 4, 17);
-       kunmap_atomic(addr, KM_USER0);
+       kunmap_atomic(addr);
        return checksum;
 }
 
@@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
        char *addr1, *addr2;
        int ret;
 
-       addr1 = kmap_atomic(page1, KM_USER0);
-       addr2 = kmap_atomic(page2, KM_USER1);
+       addr1 = kmap_atomic(page1);
+       addr2 = kmap_atomic(page2);
        ret = memcmp(addr1, addr2, PAGE_SIZE);
-       kunmap_atomic(addr2, KM_USER1);
-       kunmap_atomic(addr1, KM_USER0);
+       kunmap_atomic(addr2);
+       kunmap_atomic(addr1);
        return ret;
 }
 
index 829d437354022959eeca37082f695e2ee73025ed..07ace8725c837c1b9233dc2f5bc26d8a5c9699ae 100644 (file)
@@ -2428,7 +2428,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
         * fails, we just zero-fill it. Live with it.
         */
        if (unlikely(!src)) {
-               void *kaddr = kmap_atomic(dst, KM_USER0);
+               void *kaddr = kmap_atomic(dst);
                void __user *uaddr = (void __user *)(va & PAGE_MASK);
 
                /*
@@ -2439,7 +2439,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                 */
                if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
                        clear_page(kaddr);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                flush_dcache_page(dst);
        } else
                copy_user_highpage(dst, src, va, vma);
index d6722506d2da72d738aa5b7040501be64437f01a..5bc679dcec4d05e37f857aff874ebbd8e4f9d23d 100644 (file)
@@ -1631,9 +1631,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                }
                inode->i_mapping->a_ops = &shmem_aops;
                inode->i_op = &shmem_symlink_inode_operations;
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memcpy(kaddr, symname, len);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                set_page_dirty(page);
                unlock_page(page);
                page_cache_release(page);
index b1cd120607230b0770c35e0823d6389782ae734a..94d8a62cbc425161c8f8d2d784a33d5a4e54de31 100644 (file)
@@ -2426,9 +2426,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
                if (!(count & COUNT_CONTINUED))
                        goto out;
 
-               map = kmap_atomic(list_page, KM_USER0) + offset;
+               map = kmap_atomic(list_page) + offset;
                count = *map;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
 
                /*
                 * If this continuation count now has some space in it,
@@ -2471,7 +2471,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
 
        offset &= ~PAGE_MASK;
        page = list_entry(head->lru.next, struct page, lru);
-       map = kmap_atomic(page, KM_USER0) + offset;
+       map = kmap_atomic(page) + offset;
 
        if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
                goto init_map;          /* jump over SWAP_CONT_MAX checks */
@@ -2481,26 +2481,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
                 * Think of how you add 1 to 999
                 */
                while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        BUG_ON(page == head);
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                }
                if (*map == SWAP_CONT_MAX) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        if (page == head)
                                return false;   /* add count continuation */
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
 init_map:              *map = 0;               /* we didn't zero the page */
                }
                *map += 1;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
                page = list_entry(page->lru.prev, struct page, lru);
                while (page != head) {
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                        *map = COUNT_CONTINUED;
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
                return true;                    /* incremented */
@@ -2511,22 +2511,22 @@ init_map:               *map = 0;               /* we didn't zero the page */
                 */
                BUG_ON(count != COUNT_CONTINUED);
                while (*map == COUNT_CONTINUED) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        BUG_ON(page == head);
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                }
                BUG_ON(*map == 0);
                *map -= 1;
                if (*map == 0)
                        count = 0;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
                page = list_entry(page->lru.prev, struct page, lru);
                while (page != head) {
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                        *map = SWAP_CONT_MAX | count;
                        count = COUNT_CONTINUED;
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
                return count == COUNT_CONTINUED;
index 3231bf3328781b023c9e3b0b772aa1685c940d31..1a39dcabe7d2598458fbf10d505d0cfa59acd709 100644 (file)
@@ -1879,9 +1879,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(buf, map + offset, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                } else
                        memset(buf, 0, length);
 
@@ -1918,9 +1918,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(map + offset, buf, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                }
                addr += length;
                buf += length;