]> git.karo-electronics.de Git - linux-beck.git/commitdiff
powerpc: Create disable_kernel_{fp,altivec,vsx,spe}()
authorAnton Blanchard <anton@samba.org>
Thu, 29 Oct 2015 00:44:05 +0000 (11:44 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 1 Dec 2015 02:52:25 +0000 (13:52 +1100)
The enable_kernel_*() functions leave the relevant MSR bits enabled
until we exit the kernel sometime later. Create disable versions
that wrap the kernel use of FP, Altivec VSX or SPE.

While we don't want to disable it normally for performance reasons
(MSR writes are slow), it will be used for a debug boot option that
does this and catches bad uses in other areas of the kernel.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
15 files changed:
arch/powerpc/crypto/aes-spe-glue.c
arch/powerpc/crypto/sha1-spe-glue.c
arch/powerpc/crypto/sha256-spe-glue.c
arch/powerpc/include/asm/switch_to.h
arch/powerpc/kernel/align.c
arch/powerpc/kvm/book3s_paired_singles.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/lib/vmx-helper.c
arch/powerpc/lib/xor_vmx.c
drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/ghash.c
lib/raid6/altivec.uc

index bd5e63f72ad40ad5d9c2c5561bbc2af7c9196980..93ee046d12cde4f15dabcc6b6ae1b657920353c3 100644 (file)
@@ -85,6 +85,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
index 3e1d2221252180ca302cca3e068f113c515b0a2a..f9ebc38d3fe79376956c83bf4bda9af3f11dd792 100644 (file)
@@ -46,6 +46,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
index f4a616fe1a822e9b262d0848d7e235d97e99a8b3..718a079dcdbfb3b92919c58b1e084a4769f0f4db 100644 (file)
@@ -47,6 +47,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
index c2678b93bcbacdbc545e37abd740215197140aaf..438502f59550ff0210f31e8352cdf2b405b83fe4 100644 (file)
@@ -26,6 +26,11 @@ extern void enable_kernel_spe(void);
 extern void load_up_spe(struct task_struct *);
 extern void switch_booke_debug_regs(struct debug_reg *new_debug);
 
+static inline void disable_kernel_fp(void) { }
+static inline void disable_kernel_altivec(void) { }
+static inline void disable_kernel_spe(void) { }
+static inline void disable_kernel_vsx(void) { }
+
 #ifdef CONFIG_PPC_FPU
 extern void flush_fp_to_thread(struct task_struct *);
 extern void giveup_fpu(struct task_struct *);
index 86150fbb42c39111c7440523f482dbe55e0bd4d9..8e7cb8e2b21ac87b8ef60f141af16773085728e4 100644 (file)
@@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs)
                        preempt_disable();
                        enable_kernel_fp();
                        cvt_df(&data.dd, (float *)&data.x32.low32);
+                       disable_kernel_fp();
                        preempt_enable();
 #else
                        return 0;
@@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs)
                preempt_disable();
                enable_kernel_fp();
                cvt_fd((float *)&data.x32.low32, &data.dd);
+               disable_kernel_fp();
                preempt_enable();
 #else
                return 0;
index a759d9adb0b6f8218c38c52520dfe3cdfa70c884..eab96cfe82fa08772999704d07686a6dd61fa307 100644 (file)
@@ -1265,6 +1265,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
        if (rcomp)
                kvmppc_set_cr(vcpu, cr);
 
+       disable_kernel_fp();
        preempt_enable();
 
        return emulated;
index 64891b081ad54f57bbaa603d3315bf32b48368c8..49f5dad1bd45869a41fdfe4811eabf237d87d074 100644 (file)
@@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                t->fp_save_area = &vcpu->arch.fp;
                preempt_enable();
        }
@@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
+               disable_kernel_altivec();
                t->vr_save_area = &vcpu->arch.vr;
                preempt_enable();
 #endif
@@ -788,6 +790,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
                preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                preempt_enable();
        }
 #ifdef CONFIG_ALTIVEC
@@ -795,6 +798,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
                preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
+               disable_kernel_altivec();
                preempt_enable();
        }
 #endif
index fd5875179e5c0e6738a1e7867a75f05cbe1fa0c8..778ef86e187eca40d63585a54b3d0ff4161ba613 100644 (file)
@@ -98,6 +98,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
        preempt_disable();
        enable_kernel_spe();
        kvmppc_save_guest_spe(vcpu);
+       disable_kernel_spe();
        vcpu->arch.shadow_msr &= ~MSR_SPE;
        preempt_enable();
 }
@@ -107,6 +108,7 @@ static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
        preempt_disable();
        enable_kernel_spe();
        kvmppc_load_guest_spe(vcpu);
+       disable_kernel_spe();
        vcpu->arch.shadow_msr |= MSR_SPE;
        preempt_enable();
 }
@@ -141,6 +143,7 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
        if (!(current->thread.regs->msr & MSR_FP)) {
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                current->thread.fp_save_area = &vcpu->arch.fp;
                current->thread.regs->msr |= MSR_FP;
        }
@@ -182,6 +185,7 @@ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
                if (!(current->thread.regs->msr & MSR_VEC)) {
                        enable_kernel_altivec();
                        load_vr_state(&vcpu->arch.vr);
+                       disable_kernel_altivec();
                        current->thread.vr_save_area = &vcpu->arch.vr;
                        current->thread.regs->msr |= MSR_VEC;
                }
index ac93a3bd27300f9d058f45a2df4838379ecde52e..b27e030fc9f865be037729f2aa46d1c3e1d35c38 100644 (file)
@@ -46,6 +46,7 @@ int enter_vmx_usercopy(void)
  */
 int exit_vmx_usercopy(void)
 {
+       disable_kernel_altivec();
        pagefault_enable();
        preempt_enable();
        return 0;
@@ -70,6 +71,7 @@ int enter_vmx_copy(void)
  */
 void *exit_vmx_copy(void *dest)
 {
+       disable_kernel_altivec();
        preempt_enable();
        return dest;
 }
index e905f7c2ea7bf9ce5a370a7442daa6b2210a38ac..07f49f1568e5eacccf5ea19929d572ff2eb68cfc 100644 (file)
@@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
                v2 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_2);
@@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
                v3 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_3);
@@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
                v4 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_4);
@@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
                v5 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_5);
index 20539fb7e975d2f43f7f93cb2188bb0e41b86d49..022c7ab7351a08c4f6afdff324e0b51eb1214bb1 100644 (file)
@@ -86,6 +86,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
 
@@ -104,6 +105,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
                pagefault_disable();
                enable_kernel_vsx();
                aes_p8_encrypt(src, dst, &ctx->enc_key);
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
@@ -120,6 +122,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
                pagefault_disable();
                enable_kernel_vsx();
                aes_p8_decrypt(src, dst, &ctx->dec_key);
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
index 8847b92e9ff07265965164a222feab374aaf6b30..1881b3f413fac2c8f386dff031bb535a4ae309d5 100644 (file)
@@ -87,6 +87,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
 
@@ -127,6 +128,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
 
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
@@ -167,6 +169,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
 
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
index 80958660c31ad5388092316a988838023a4a6d3d..2d58b18acc10a343c912cc879f6d2332c262c067 100644 (file)
@@ -83,6 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
        pagefault_disable();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       disable_kernel_vsx();
        pagefault_enable();
 
        ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
@@ -101,6 +102,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
        pagefault_disable();
        enable_kernel_vsx();
        aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+       disable_kernel_vsx();
        pagefault_enable();
 
        crypto_xor(keystream, src, nbytes);
@@ -139,6 +141,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                                                    AES_BLOCK_SIZE,
                                                    &ctx->enc_key,
                                                    walk.iv);
+                       disable_kernel_vsx();
                        pagefault_enable();
 
                        /* We need to update IV mostly for last bytes/round */
index 1f4586c2fd2550171a9a8cea6132e1a9f27074d3..6c999cb01b804839b4565a75a248063c0330dda3 100644 (file)
@@ -120,6 +120,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
        pagefault_disable();
        enable_kernel_vsx();
        gcm_init_p8(ctx->htable, (const u64 *) key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
        return crypto_shash_setkey(ctx->fallback, key, keylen);
@@ -150,6 +151,7 @@ static int p8_ghash_update(struct shash_desc *desc,
                        enable_kernel_vsx();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        src += GHASH_DIGEST_SIZE - dctx->bytes;
@@ -162,6 +164,7 @@ static int p8_ghash_update(struct shash_desc *desc,
                        pagefault_disable();
                        enable_kernel_vsx();
                        gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        src += len;
@@ -192,6 +195,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
                        enable_kernel_vsx();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        dctx->bytes = 0;
index bec27fce7501702a7da2a16c621c1b91adb938f5..682aae8a1fef2d78ba289bb63cdf697b84cb1648 100644 (file)
@@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
 
        raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
 
+       disable_kernel_altivec();
        preempt_enable();
 }