]> git.karo-electronics.de Git - linux-beck.git/commitdiff
selftests/powerpc: Move VMX stack frame macros to header file
authorCyril Bur <cyrilbur@gmail.com>
Fri, 23 Sep 2016 06:18:15 +0000 (16:18 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 4 Oct 2016 09:10:12 +0000 (20:10 +1100)
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
tools/testing/selftests/powerpc/math/vmx_asm.S
tools/testing/selftests/powerpc/vmx_asm.h [new file with mode: 0644]

index 1b8c248b3ac18770a23a30c3946da27d92aa708a..fd74da48862597df634d41b649b1412cdd5fb02a 100644 (file)
@@ -8,90 +8,7 @@
  */
 
 #include "../basic_asm.h"
-
-# POS MUST BE 16 ALIGNED!
-#define PUSH_VMX(pos,reg) \
-       li      reg,pos; \
-       stvx    v20,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v21,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v22,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v23,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v24,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v25,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v26,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v27,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v28,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v29,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v30,reg,sp; \
-       addi    reg,reg,16; \
-       stvx    v31,reg,sp;
-
-# POS MUST BE 16 ALIGNED!
-#define POP_VMX(pos,reg) \
-       li      reg,pos; \
-       lvx     v20,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v21,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v22,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v23,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v24,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v25,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v26,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v27,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v28,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v29,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v30,reg,sp; \
-       addi    reg,reg,16; \
-       lvx     v31,reg,sp;
-
-# Carefull this will 'clobber' vmx (by design)
-# Don't call this from C
-FUNC_START(load_vmx)
-       li      r5,0
-       lvx     v20,r5,r3
-       addi    r5,r5,16
-       lvx     v21,r5,r3
-       addi    r5,r5,16
-       lvx     v22,r5,r3
-       addi    r5,r5,16
-       lvx     v23,r5,r3
-       addi    r5,r5,16
-       lvx     v24,r5,r3
-       addi    r5,r5,16
-       lvx     v25,r5,r3
-       addi    r5,r5,16
-       lvx     v26,r5,r3
-       addi    r5,r5,16
-       lvx     v27,r5,r3
-       addi    r5,r5,16
-       lvx     v28,r5,r3
-       addi    r5,r5,16
-       lvx     v29,r5,r3
-       addi    r5,r5,16
-       lvx     v30,r5,r3
-       addi    r5,r5,16
-       lvx     v31,r5,r3
-       blr
-FUNC_END(load_vmx)
+#include "../vmx_asm.h"
 
 # Should be safe from C, only touches r4, r5 and v0,v1,v2
 FUNC_START(check_vmx)
diff --git a/tools/testing/selftests/powerpc/vmx_asm.h b/tools/testing/selftests/powerpc/vmx_asm.h
new file mode 100644 (file)
index 0000000..2eaaeca
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "basic_asm.h"
+
+/* POS MUST BE 16 ALIGNED! */
+#define PUSH_VMX(pos,reg) \
+       li      reg,pos; \
+       stvx    v20,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v21,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v22,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v23,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v24,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v25,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v26,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v27,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v28,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v29,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v30,reg,%r1; \
+       addi    reg,reg,16; \
+       stvx    v31,reg,%r1;
+
+/* POS MUST BE 16 ALIGNED! */
+#define POP_VMX(pos,reg) \
+       li      reg,pos; \
+       lvx     v20,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v21,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v22,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v23,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v24,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v25,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v26,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v27,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v28,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v29,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v30,reg,%r1; \
+       addi    reg,reg,16; \
+       lvx     v31,reg,%r1;
+
+/*
+ * Careful this will 'clobber' vmx (by design)
+ * Don't call this from C
+ */
+FUNC_START(load_vmx)
+       li      r5,0
+       lvx     v20,r5,r3
+       addi    r5,r5,16
+       lvx     v21,r5,r3
+       addi    r5,r5,16
+       lvx     v22,r5,r3
+       addi    r5,r5,16
+       lvx     v23,r5,r3
+       addi    r5,r5,16
+       lvx     v24,r5,r3
+       addi    r5,r5,16
+       lvx     v25,r5,r3
+       addi    r5,r5,16
+       lvx     v26,r5,r3
+       addi    r5,r5,16
+       lvx     v27,r5,r3
+       addi    r5,r5,16
+       lvx     v28,r5,r3
+       addi    r5,r5,16
+       lvx     v29,r5,r3
+       addi    r5,r5,16
+       lvx     v30,r5,r3
+       addi    r5,r5,16
+       lvx     v31,r5,r3
+       blr
+FUNC_END(load_vmx)