this changes if() BUG(); constructs to BUG_ON() which is
cleaner, contains unlikely() and can better optimized away.
Signed-off-by: Eric Sesterhenn <snakebyte@gmx.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
pkmap_count[i] = 0;
/* sanity check */
pkmap_count[i] = 0;
/* sanity check */
- if (pte_none(pkmap_page_table[i]))
- BUG();
+ BUG_ON(pte_none(pkmap_page_table[i]));
/*
* Don't need an atomic fetch-and-clear op here;
/*
* Don't need an atomic fetch-and-clear op here;
if (!vaddr)
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;
if (!vaddr)
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;
- if (pkmap_count[PKMAP_NR(vaddr)] < 2)
- BUG();
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
spin_unlock(&kmap_lock);
return (void*) vaddr;
}
spin_unlock(&kmap_lock);
return (void*) vaddr;
}
spin_lock(&kmap_lock);
vaddr = (unsigned long)page_address(page);
spin_lock(&kmap_lock);
vaddr = (unsigned long)page_address(page);
return 0;
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
return 0;
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
- if (!page_pool)
- BUG();
printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
return 0;
printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
return 0;
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
- if (!isa_page_pool)
- BUG();
+ BUG_ON(!isa_page_pool);
printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
return 0;
printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
return 0;