#endif
}
-int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm)
-{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- if (!alloc_cpumask_var(&mm->cpu_vm_mask_var, GFP_KERNEL))
- return -ENOMEM;
-
- if (oldmm)
- cpumask_copy(mm_cpumask(mm), mm_cpumask(oldmm));
- else
- memset(mm_cpumask(mm), 0, cpumask_size());
-#endif
- return 0;
-}
-
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
return NULL;
memset(mm, 0, sizeof(*mm));
- mm = mm_init(mm, current);
- if (!mm)
- return NULL;
-
- if (mm_init_cpumask(mm, NULL)) {
- mm_free_pgd(mm);
- free_mm(mm);
- return NULL;
- }
-
- return mm;
+ mm_init_cpumask(mm);
+ return mm_init(mm, current);
}
/*
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
- free_cpumask_var(mm->cpu_vm_mask_var);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
+ mm_init_cpumask(mm);
/* Initializing for Swap token stuff */
mm->token_priority = 0;
if (!mm_init(mm, tsk))
goto fail_nomem;
- if (mm_init_cpumask(mm, oldmm))
- goto fail_nocpumask;
-
if (init_new_context(tsk, mm))
goto fail_nocontext;
return NULL;
fail_nocontext:
- free_cpumask_var(mm->cpu_vm_mask_var);
-
-fail_nocpumask:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
+ /*
+ * FIXME! The "sizeof(struct mm_struct)" currently includes the
+ * whole struct cpumask for the OFFSTACK case. We could change
+ * this to *only* allocate as much of it as required by the
+ * maximum number of CPU's we can ever have. The cpumask_allocation
+ * is at the end of the structure, exactly for that reason.
+ */
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);