1 /* U3memcpy.S: UltraSparc-III optimized memcpy.
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
7 #include <asm/visasm.h>
9 #define GLOBAL_SPARE %g7
11 #define ASI_BLK_P 0xf0
14 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
15 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
16 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
18 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
19 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
21 #define GLOBAL_SPARE %g5
33 #define EX_RETVAL(x) x
37 #define LOAD(type,addr,dest) type [addr], dest
41 #define STORE(type,src,addr) type src, [addr]
45 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
49 #define FUNC_NAME U3memcpy
60 .register %g2,#scratch
61 .register %g3,#scratch
63 /* Special/non-trivial issues of this code:
65 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
66 * 2) Only low 32 FPU registers are used so that only the
67 * lower half of the FPU register set is dirtied by this
68 * code. This is especially important in the kernel.
69 * 3) This code never prefetches cachelines past the end
70 * of the source buffer.
76 /* The cheetah's flexible spine, oversized liver, enlarged heart,
77 * slender muscular body, and claws make it the swiftest hunter
78 * in Africa and the fastest animal on land. Can reach speeds
79 * of up to 2.4GB per second.
83 .type FUNC_NAME,#function
84 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
101 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
102 * o5 from here until we hit VISExitHalf.
106 /* Is 'dst' already aligned on an 64-byte boundary? */
110 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
111 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
112 * subtract this from 'len'.
114 sub %o0, %o1, GLOBAL_SPARE
122 1: subcc %g1, 0x1, %g1
123 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
124 EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
128 add %o1, GLOBAL_SPARE, %o0
133 alignaddr %o1, %g0, %o1
135 EX_LD(LOAD(ldd, %o1, %f4))
136 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
139 faligndata %f4, %f6, %f0
140 EX_ST(STORE(std, %f0, %o0))
144 EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
147 faligndata %f6, %f4, %f2
148 EX_ST(STORE(std, %f2, %o0))
152 3: LOAD(prefetch, %o1 + 0x000, #one_read)
153 LOAD(prefetch, %o1 + 0x040, #one_read)
154 andn %o2, (0x40 - 1), GLOBAL_SPARE
155 LOAD(prefetch, %o1 + 0x080, #one_read)
156 LOAD(prefetch, %o1 + 0x0c0, #one_read)
157 LOAD(prefetch, %o1 + 0x100, #one_read)
158 EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
159 LOAD(prefetch, %o1 + 0x140, #one_read)
160 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
161 LOAD(prefetch, %o1 + 0x180, #one_read)
162 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
163 LOAD(prefetch, %o1 + 0x1c0, #one_read)
164 faligndata %f0, %f2, %f16
165 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
166 faligndata %f2, %f4, %f18
167 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
168 faligndata %f4, %f6, %f20
169 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
170 faligndata %f6, %f8, %f22
172 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
173 faligndata %f8, %f10, %f24
174 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
175 faligndata %f10, %f12, %f26
176 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
178 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
181 srl GLOBAL_SPARE, 6, %o3
187 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
188 faligndata %f12, %f14, %f28
189 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
190 faligndata %f14, %f0, %f30
191 EX_ST(STORE_BLK(%f16, %o0))
192 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
193 faligndata %f0, %f2, %f16
196 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
197 faligndata %f2, %f4, %f18
198 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
199 faligndata %f4, %f6, %f20
200 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
202 faligndata %f6, %f8, %f22
203 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
205 faligndata %f8, %f10, %f24
206 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
207 LOAD(prefetch, %o1 + 0x1c0, #one_read)
208 faligndata %f10, %f12, %f26
212 /* Finally we copy the last full 64-byte block. */
214 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
215 faligndata %f12, %f14, %f28
216 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
217 faligndata %f14, %f0, %f30
218 EX_ST(STORE_BLK(%f16, %o0))
219 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
220 faligndata %f0, %f2, %f16
221 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
222 faligndata %f2, %f4, %f18
223 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
224 faligndata %f4, %f6, %f20
225 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
226 faligndata %f6, %f8, %f22
227 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
228 faligndata %f8, %f10, %f24
232 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
233 1: faligndata %f10, %f12, %f26
234 faligndata %f12, %f14, %f28
235 faligndata %f14, %f0, %f30
236 EX_ST(STORE_BLK(%f16, %o0))
241 /* Now we copy the (len modulo 64) bytes at the end.
242 * Note how we borrow the %f0 loaded above.
244 * Also notice how this code is careful not to perform a
245 * load past the end of the src buffer.
256 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
258 1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
261 faligndata %f0, %f2, %f8
262 EX_ST(STORE(std, %f8, %o0))
265 EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
268 faligndata %f2, %f0, %f8
269 EX_ST(STORE(std, %f8, %o0))
273 /* If anything is left, we copy it one byte at a time.
274 * Note that %g1 is (src & 0x3) saved above before the
275 * alignaddr was performed.
289 EX_LD(LOAD(ldx, %o1, %o5))
290 EX_ST(STORE(stx, %o5, %o1 + %o3))
293 1: andcc %o2, 0x4, %g0
296 EX_LD(LOAD(lduw, %o1, %o5))
297 EX_ST(STORE(stw, %o5, %o1 + %o3))
300 1: andcc %o2, 0x2, %g0
303 EX_LD(LOAD(lduh, %o1, %o5))
304 EX_ST(STORE(sth, %o5, %o1 + %o3))
307 1: andcc %o2, 0x1, %g0
310 EX_LD(LOAD(ldub, %o1, %o5))
312 EX_ST(STORE(stb, %o5, %o1 + %o3))
315 70: /* 16 < len <= 64 */
320 andn %o2, 0xf, GLOBAL_SPARE
322 1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
323 EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
324 EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
325 EX_ST(STORE(stx, %o5, %o1 + %o3))
327 EX_ST(STORE(stx, %g1, %o1 + %o3))
330 73: andcc %o2, 0x8, %g0
334 EX_LD(LOAD(ldx, %o1, %o5))
335 EX_ST(STORE(stx, %o5, %o1 + %o3))
337 1: andcc %o2, 0x4, %g0
341 EX_LD(LOAD(lduw, %o1, %o5))
342 EX_ST(STORE(stw, %o5, %o1 + %o3))
358 EX_LD(LOAD(ldub, %o1, %o5))
359 EX_ST(STORE(stb, %o5, %o1 + %o3))
375 EX_LD(LOAD(ldx, %o1, %g2))
377 andn %o2, 0x7, GLOBAL_SPARE
379 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
380 subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
384 EX_ST(STORE(stx, %o5, %o0))
397 80: /* 0 < len <= 16 */
404 EX_LD(LOAD(lduw, %o1, %g1))
405 EX_ST(STORE(stw, %g1, %o1 + %o3))
410 mov EX_RETVAL(%o4), %o0
415 EX_LD(LOAD(ldub, %o1, %g1))
416 EX_ST(STORE(stb, %g1, %o1 + %o3))
420 mov EX_RETVAL(%o4), %o0
422 .size FUNC_NAME, .-FUNC_NAME