This is the last remaining slab destructor in the kernel, which
we kill off and move the resultant list tracking logic up to
the pmb_alloc()/pmb_free() paths.
As Christoph Lameter pointed out, it's potentially unsafe to be
taking the list lock in the destructor anyways, so this is also
more fundamentally correct.
With this in place, we're all set for killing off slab destructors
from the kernel entirely.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
*
* Privileged Space Mapping Buffer (PMB) Support.
*
*
* Privileged Space Mapping Buffer (PMB) Support.
*
- * Copyright (C) 2005, 2006 Paul Mundt
+ * Copyright (C) 2005, 2006, 2007 Paul Mundt
*
* P1/P2 Section mapping definitions from map32.h, which was:
*
*
* P1/P2 Section mapping definitions from map32.h, which was:
*
return mk_pmb_entry(entry) | PMB_DATA;
}
return mk_pmb_entry(entry) | PMB_DATA;
}
+static DEFINE_SPINLOCK(pmb_list_lock);
+static struct pmb_entry *pmb_list;
+
+static inline void pmb_list_add(struct pmb_entry *pmbe)
+{
+ struct pmb_entry **p, *tmp;
+
+ p = &pmb_list;
+ while ((tmp = *p) != NULL)
+ p = &tmp->next;
+
+ pmbe->next = tmp;
+ *p = pmbe;
+}
+
+static inline void pmb_list_del(struct pmb_entry *pmbe)
+{
+ struct pmb_entry **p, *tmp;
+
+ for (p = &pmb_list; (tmp = *p); p = &tmp->next)
+ if (tmp == pmbe) {
+ *p = tmp->next;
+ return;
+ }
+}
+
struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
unsigned long flags)
{
struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
unsigned long flags)
{
pmbe->ppn = ppn;
pmbe->flags = flags;
pmbe->ppn = ppn;
pmbe->flags = flags;
+ spin_lock_irq(&pmb_list_lock);
+ pmb_list_add(pmbe);
+ spin_unlock_irq(&pmb_list_lock);
+
return pmbe;
}
void pmb_free(struct pmb_entry *pmbe)
{
return pmbe;
}
void pmb_free(struct pmb_entry *pmbe)
{
+ spin_lock_irq(&pmb_list_lock);
+ pmb_list_del(pmbe);
+ spin_unlock_irq(&pmb_list_lock);
+
kmem_cache_free(pmb_cache, pmbe);
}
kmem_cache_free(pmb_cache, pmbe);
}
clear_bit(entry, &pmb_map);
}
clear_bit(entry, &pmb_map);
}
-static DEFINE_SPINLOCK(pmb_list_lock);
-static struct pmb_entry *pmb_list;
-
-static inline void pmb_list_add(struct pmb_entry *pmbe)
-{
- struct pmb_entry **p, *tmp;
-
- p = &pmb_list;
- while ((tmp = *p) != NULL)
- p = &tmp->next;
-
- pmbe->next = tmp;
- *p = pmbe;
-}
-
-static inline void pmb_list_del(struct pmb_entry *pmbe)
-{
- struct pmb_entry **p, *tmp;
-
- for (p = &pmb_list; (tmp = *p); p = &tmp->next)
- if (tmp == pmbe) {
- *p = tmp->next;
- return;
- }
-}
static struct {
unsigned long size;
static struct {
unsigned long size;
-static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
+static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep,
+ unsigned long flags)
{
struct pmb_entry *pmbe = pmb;
memset(pmb, 0, sizeof(struct pmb_entry));
{
struct pmb_entry *pmbe = pmb;
memset(pmb, 0, sizeof(struct pmb_entry));
- spin_lock_irq(&pmb_list_lock);
-
pmbe->entry = PMB_NO_ENTRY;
pmbe->entry = PMB_NO_ENTRY;
- pmb_list_add(pmbe);
-
- spin_unlock_irq(&pmb_list_lock);
-}
-
-static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
-{
- spin_lock_irq(&pmb_list_lock);
- pmb_list_del(pmb);
- spin_unlock_irq(&pmb_list_lock);
}
static int __init pmb_init(void)
}
static int __init pmb_init(void)
BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
- SLAB_PANIC, pmb_cache_ctor,
- pmb_cache_dtor);
+ SLAB_PANIC, pmb_cache_ctor, NULL);