1 #ifndef _ASM_X86_XOR_64_H
2 #define _ASM_X86_XOR_64_H
5 * Optimized RAID-5 checksumming functions for MMX and SSE.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Cache avoiding checksumming functions utilizing KNI instructions
20 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
25 * High-speed RAID5 checksumming functions utilizing SSE instructions.
26 * Copyright (C) 1998 Ingo Molnar.
30 * x86-64 changes / gcc fixes from Andi Kleen.
31 * Copyright 2002 Andi Kleen, SuSE Labs.
33 * This hasn't been optimized for the hammer yet, but there are likely
34 * no advantages to be gotten from x86-64 here anyways.
39 #define OFFS(x) "16*("#x")"
40 #define PF_OFFS(x) "256+16*("#x")"
41 #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
42 #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
43 #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
44 #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
45 #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
46 #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
47 #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
48 #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
49 #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
50 #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
51 #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
52 #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
53 #define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
57 xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
59 unsigned int lines = bytes >> 8;
95 " addq %[inc], %[p1] ;\n"
96 " addq %[inc], %[p2] ;\n"
97 " decl %[cnt] ; jnz 1b"
98 : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
106 xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
109 unsigned int lines = bytes >> 8;
150 " addq %[inc], %[p1] ;\n"
151 " addq %[inc], %[p2] ;\n"
152 " addq %[inc], %[p3] ;\n"
153 " decl %[cnt] ; jnz 1b"
154 : [cnt] "+r" (lines),
155 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
162 xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
163 unsigned long *p3, unsigned long *p4)
165 unsigned int lines = bytes >> 8;
213 " addq %[inc], %[p1] ;\n"
214 " addq %[inc], %[p2] ;\n"
215 " addq %[inc], %[p3] ;\n"
216 " addq %[inc], %[p4] ;\n"
217 " decl %[cnt] ; jnz 1b"
218 : [cnt] "+c" (lines),
219 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
227 xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
228 unsigned long *p3, unsigned long *p4, unsigned long *p5)
230 unsigned int lines = bytes >> 8;
284 " addq %[inc], %[p1] ;\n"
285 " addq %[inc], %[p2] ;\n"
286 " addq %[inc], %[p3] ;\n"
287 " addq %[inc], %[p4] ;\n"
288 " addq %[inc], %[p5] ;\n"
289 " decl %[cnt] ; jnz 1b"
290 : [cnt] "+c" (lines),
291 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
299 static struct xor_block_template xor_block_sse = {
300 .name = "generic_sse",
308 /* Also try the AVX routines */
309 #include <asm/xor_avx.h>
311 #undef XOR_TRY_TEMPLATES
312 #define XOR_TRY_TEMPLATES \
315 xor_speed(&xor_block_sse); \
318 /* We force the use of the SSE xor block because it can write around L2.
319 We may also be able to load into the L1 only depending on how the cpu
320 deals with a load to a line that is being prefetched. */
321 #define XOR_SELECT_TEMPLATE(FASTEST) \
322 AVX_SELECT(&xor_block_sse)
324 #endif /* _ASM_X86_XOR_64_H */