]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
arm/arm64: KVM: Add a protection parameter to create_hyp_mappings
authorMarc Zyngier <marc.zyngier@arm.com>
Mon, 13 Jun 2016 14:00:45 +0000 (15:00 +0100)
committerChristoffer Dall <christoffer.dall@linaro.org>
Wed, 29 Jun 2016 11:59:14 +0000 (13:59 +0200)
Currently, create_hyp_mappings applies a "one size fits all" page
protection (PAGE_HYP). As we're heading towards separate protections
for different sections, let's make this protection a parameter, and
let the callers pass their prefered protection (PAGE_HYP for everyone
for the time being).

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm64/include/asm/kvm_mmu.h

index f9a65061130b66a402fb6d2772b44f9321d926e6..6cb4d4d5c48c41b7018946ddc0529f2c96aa2de2 100644 (file)
@@ -49,7 +49,7 @@
 #include <asm/pgalloc.h>
 #include <asm/stage2_pgtable.h>
 
-int create_hyp_mappings(void *from, void *to);
+int create_hyp_mappings(void *from, void *to, pgprot_t prot);
 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
 void free_boot_hyp_pgd(void);
 void free_hyp_pgds(void);
index f20ca84537f5d7849ac707d1eb939f42cba797d8..45dd6df70cdf87bd45e8534ff78fc10aaf480347 100644 (file)
@@ -122,7 +122,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (ret)
                goto out_fail_alloc;
 
-       ret = create_hyp_mappings(kvm, kvm + 1);
+       ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
        if (ret)
                goto out_free_stage2_pgd;
 
@@ -239,7 +239,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_vcpu;
 
-       err = create_hyp_mappings(vcpu, vcpu + 1);
+       err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
        if (err)
                goto vcpu_uninit;
 
@@ -1293,14 +1293,14 @@ static int init_hyp_mode(void)
         * Map the Hyp-code called directly from the host
         */
        err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
-                                 kvm_ksym_ref(__hyp_text_end));
+                                 kvm_ksym_ref(__hyp_text_end), PAGE_HYP);
        if (err) {
                kvm_err("Cannot map world-switch code\n");
                goto out_err;
        }
 
        err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
-                                 kvm_ksym_ref(__end_rodata));
+                                 kvm_ksym_ref(__end_rodata), PAGE_HYP);
        if (err) {
                kvm_err("Cannot map rodata section\n");
                goto out_err;
@@ -1311,7 +1311,8 @@ static int init_hyp_mode(void)
         */
        for_each_possible_cpu(cpu) {
                char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
-               err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
+               err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
+                                         PAGE_HYP);
 
                if (err) {
                        kvm_err("Cannot map hyp stack\n");
@@ -1323,7 +1324,7 @@ static int init_hyp_mode(void)
                kvm_cpu_context_t *cpu_ctxt;
 
                cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
-               err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
+               err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
 
                if (err) {
                        kvm_err("Cannot map host CPU state: %d\n", err);
index 45c43aecb8f2f30997015f454d3d85fb25d7ac7a..49cb5ccf6c2377edd02ec1167af45bd98485d9bb 100644 (file)
@@ -679,12 +679,13 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
  * @from:      The virtual kernel start address of the range
  * @to:                The virtual kernel end address of the range (exclusive)
+ * @prot:      The protection to be applied to this range
  *
  * The same virtual address as the kernel virtual address is also used
  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
  * physical pages.
  */
-int create_hyp_mappings(void *from, void *to)
+int create_hyp_mappings(void *from, void *to, pgprot_t prot)
 {
        phys_addr_t phys_addr;
        unsigned long virt_addr;
@@ -704,7 +705,7 @@ int create_hyp_mappings(void *from, void *to)
                err = __create_hyp_mappings(hyp_pgd, virt_addr,
                                            virt_addr + PAGE_SIZE,
                                            __phys_to_pfn(phys_addr),
-                                           PAGE_HYP);
+                                           prot);
                if (err)
                        return err;
        }
index f05ac27d033ed8419d36b871f9e607c87362298f..fdfbddbe9fbac5cd0b352465db821675ca451c68 100644 (file)
@@ -81,7 +81,7 @@ alternative_endif
 
 #include <asm/stage2_pgtable.h>
 
-int create_hyp_mappings(void *from, void *to);
+int create_hyp_mappings(void *from, void *to, pgprot_t prot);
 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
 void free_boot_hyp_pgd(void);
 void free_hyp_pgds(void);