X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=mm%2Ffilemap.c;h=d1060b8d3cd65ba696afce181c09fd1c2585f196;hb=1004879f25c5719517b3d8a3180a517e190e2f5b;hp=cb26e33fd0ff15cc9932ec44592c7ae7174ac0b7;hpb=71fa0a849b384f066dea6a2351c722c19846f4ac;p=mv-sheeva.git diff --git a/mm/filemap.c b/mm/filemap.c index cb26e33fd0f..d1060b8d3cd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -327,7 +327,7 @@ EXPORT_SYMBOL(sync_page_range); * @pos: beginning offset in pages to write * @count: number of bytes to write * - * Note: Holding i_mutex across sync_page_range_nolock is not a good idea + * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea * as it forces O_SYNC writers to different parts of the same file * to be serialised right until io completion. */ @@ -467,25 +467,15 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, } #ifdef CONFIG_NUMA -struct page *page_cache_alloc(struct address_space *x) +struct page *__page_cache_alloc(gfp_t gfp) { if (cpuset_do_page_mem_spread()) { int n = cpuset_mem_spread_node(); - return alloc_pages_node(n, mapping_gfp_mask(x), 0); + return alloc_pages_node(n, gfp, 0); } - return alloc_pages(mapping_gfp_mask(x), 0); + return alloc_pages(gfp, 0); } -EXPORT_SYMBOL(page_cache_alloc); - -struct page *page_cache_alloc_cold(struct address_space *x) -{ - if (cpuset_do_page_mem_spread()) { - int n = cpuset_mem_spread_node(); - return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0); - } - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); -} -EXPORT_SYMBOL(page_cache_alloc_cold); +EXPORT_SYMBOL(__page_cache_alloc); #endif static int __sleep_on_page_lock(void *word) @@ -615,26 +605,6 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset) } EXPORT_SYMBOL(find_get_page); -/** - * find_trylock_page - find and lock a page - * @mapping: the address_space to search - * @offset: the page index - * - * Same as find_get_page(), but trylock it instead of incrementing the count. - */ -struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) -{ - struct page *page; - - read_lock_irq(&mapping->tree_lock); - page = radix_tree_lookup(&mapping->page_tree, offset); - if (page && TestSetPageLocked(page)) - page = NULL; - read_unlock_irq(&mapping->tree_lock); - return page; -} -EXPORT_SYMBOL(find_trylock_page); - /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search @@ -814,7 +784,7 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, * @mapping: target address_space * @index: the page index * - * Same as grab_cache_page, but do not wait if the page is unavailable. + * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. @@ -826,7 +796,6 @@ struct page * grab_cache_page_nowait(struct address_space *mapping, unsigned long index) { struct page *page = find_get_page(mapping, index); - gfp_t gfp_mask; if (page) { if (!TestSetPageLocked(page)) @@ -834,9 +803,8 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index) page_cache_release(page); return NULL; } - gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS; - page = alloc_pages(gfp_mask, 0); - if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) { + page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); + if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) { page_cache_release(page); page = NULL; } @@ -1193,8 +1161,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, if (pos < size) { retval = generic_file_direct_IO(READ, iocb, iov, pos, nr_segs); - if (retval > 0 && !is_sync_kiocb(iocb)) - retval = -EIOCBQUEUED; if (retval > 0) *ppos = pos + retval; } @@ -1457,7 +1423,6 @@ no_cached_page: * effect. */ error = page_cache_read(file, pgoff); - grab_swap_token(); /* * The page we want has now been added to the page cache. @@ -1905,6 +1870,7 @@ int should_remove_suid(struct dentry *dentry) return 0; } +EXPORT_SYMBOL(should_remove_suid); int __remove_suid(struct dentry *dentry, int kill) { @@ -2059,15 +2025,14 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, * Sync the fs metadata but not the minor inode changes and * of course not the data as we did direct DMA for the IO. * i_mutex is held, which protects generic_osync_inode() from - * livelocking. + * livelocking. AIO O_DIRECT ops attempt to sync metadata here. */ - if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + if ((written >= 0 || written == -EIOCBQUEUED) && + ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); if (err < 0) written = err; } - if (written == count && !is_sync_kiocb(iocb)) - written = -EIOCBQUEUED; return written; } EXPORT_SYMBOL(generic_file_direct_write); @@ -2114,21 +2079,27 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, /* Limit the size of the copy to the caller's write size */ bytes = min(bytes, count); - /* - * Limit the size of the copy to that of the current segment, - * because fault_in_pages_readable() doesn't know how to walk - * segments. + /* We only need to worry about prefaulting when writes are from + * user-space. NFSd uses vfs_writev with several non-aligned + * segments in the vector, and limiting to one segment a time is + * a noticeable performance for re-write */ - bytes = min(bytes, cur_iov->iov_len - iov_base); - - /* - * Bring in the user page that we will copy from _first_. - * Otherwise there's a nasty deadlock on copying from the - * same page as we're writing to, without it being marked - * up-to-date. - */ - fault_in_pages_readable(buf, bytes); + if (!segment_eq(get_fs(), KERNEL_DS)) { + /* + * Limit the size of the copy to that of the current + * segment, because fault_in_pages_readable() doesn't + * know how to walk segments. + */ + bytes = min(bytes, cur_iov->iov_len - iov_base); + /* + * Bring in the user page that we will copy from + * _first_. Otherwise there's a nasty deadlock on + * copying from the same page as we're writing to, + * without it being marked up-to-date. + */ + fault_in_pages_readable(buf, bytes); + } page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); if (!page) { status = -ENOMEM; @@ -2281,7 +2252,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, if (count == 0) goto out; - err = remove_suid(file->f_dentry); + err = remove_suid(file->f_path.dentry); if (err) goto out;