From: Andrew Morton Date: Thu, 29 Nov 2012 03:18:10 +0000 (+1100) Subject: mm-introduce-new-field-managed_pages-to-struct-zone-fix X-Git-Tag: next-20121205~1^2~228 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=07fd8379cca760a4d4288e033cd607e91b4d5d55;p=karo-tx-linux.git mm-introduce-new-field-managed_pages-to-struct-zone-fix small comment tweaks Cc: Jiang Liu Cc: Jiang Liu Signed-off-by: Andrew Morton --- diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 099963337762..32bc955a8d7d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -451,7 +451,7 @@ struct zone { /* * spanned_pages is the total pages spanned by the zone, including - * holes, which is calcualted as: + * holes, which is calculated as: * spanned_pages = zone_end_pfn - zone_start_pfn; * * present_pages is physical pages existing within the zone, which @@ -469,9 +469,9 @@ struct zone { * by page allocator and vm scanner to calculate all kinds of watermarks * and thresholds. * - * Lock Rules: + * Locking rules: * - * zone_start_pfn, spanned_pages are protected by span_seqlock. + * zone_start_pfn and spanned_pages are protected by span_seqlock. * It is a seqlock because it has to be read outside of zone->lock, * and it is done in the main allocator path. But, it is written * quite infrequently. @@ -480,7 +480,7 @@ struct zone { * frequently read in proximity to zone->lock. It's good to * give them a chance of being in the same cacheline. * - * Writing access to present_pages and managed_pages at runtime should + * Write access to present_pages and managed_pages at runtime should * be protected by lock_memory_hotplug()/unlock_memory_hotplug(). * Any reader who can't tolerant drift of present_pages and * managed_pages should hold memory hotplug lock to get a stable value. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f736d4e77630..0f945af223d2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -738,7 +738,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) * Read access to zone->managed_pages is safe because it's unsigned long, * but we still need to serialize writers. Currently all callers of * __free_pages_bootmem() except put_page_bootmem() should only be used - * at boot time. So for shorter boot time, we have shift the burden to + * at boot time. So for shorter boot time, we shift the burden to * put_page_bootmem() to serialize writers. */ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)