2 * User address space access functions.
3 * The non-inlined parts of asm-metag/uaccess.h are here.
5 * Copyright (C) 2006, Imagination Technologies.
6 * Copyright (C) 2000, Axis Communications AB.
8 * Written by Hans-Peter Nilsson.
9 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10 * Modified for Meta by Will Newton.
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h> /* def of L1_CACHE_BYTES */
18 #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
21 /* The "double write" in this code is because the Meta will not fault
22 * immediately unless the memory pipe is forced to by e.g. a data stall or
23 * another memory op. The second write should be discarded by the write
24 * combiner so should have virtually no cost.
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
31 " .section .fixup,\"ax\"\n" \
33 " MOVT D1Ar1,#HI(1b)\n" \
34 " JUMP D1Ar1,#LO(1b)\n" \
36 " .section __ex_table,\"a\"\n" \
39 : "=r" (to), "=r" (from), "=r" (ret) \
40 : "0" (to), "1" (from), "2" (ret) \
44 #define __asm_copy_to_user_1(to, from, ret) \
45 __asm_copy_user_cont(to, from, ret, \
46 " GETB D1Ar1,[%1++]\n" \
47 " SETB [%0],D1Ar1\n" \
48 "2: SETB [%0++],D1Ar1\n", \
49 "3: ADD %2,%2,#1\n", \
52 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
53 __asm_copy_user_cont(to, from, ret, \
54 " GETW D1Ar1,[%1++]\n" \
55 " SETW [%0],D1Ar1\n" \
56 "2: SETW [%0++],D1Ar1\n" COPY, \
57 "3: ADD %2,%2,#2\n" FIXUP, \
58 " .long 2b,3b\n" TENTRY)
60 #define __asm_copy_to_user_2(to, from, ret) \
61 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
63 #define __asm_copy_to_user_3(to, from, ret) \
64 __asm_copy_to_user_2x_cont(to, from, ret, \
65 " GETB D1Ar1,[%1++]\n" \
66 " SETB [%0],D1Ar1\n" \
67 "4: SETB [%0++],D1Ar1\n", \
68 "5: ADD %2,%2,#1\n", \
71 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
72 __asm_copy_user_cont(to, from, ret, \
73 " GETD D1Ar1,[%1++]\n" \
74 " SETD [%0],D1Ar1\n" \
75 "2: SETD [%0++],D1Ar1\n" COPY, \
76 "3: ADD %2,%2,#4\n" FIXUP, \
77 " .long 2b,3b\n" TENTRY)
79 #define __asm_copy_to_user_4(to, from, ret) \
80 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
82 #define __asm_copy_to_user_5(to, from, ret) \
83 __asm_copy_to_user_4x_cont(to, from, ret, \
84 " GETB D1Ar1,[%1++]\n" \
85 " SETB [%0],D1Ar1\n" \
86 "4: SETB [%0++],D1Ar1\n", \
87 "5: ADD %2,%2,#1\n", \
90 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
91 __asm_copy_to_user_4x_cont(to, from, ret, \
92 " GETW D1Ar1,[%1++]\n" \
93 " SETW [%0],D1Ar1\n" \
94 "4: SETW [%0++],D1Ar1\n" COPY, \
95 "5: ADD %2,%2,#2\n" FIXUP, \
96 " .long 4b,5b\n" TENTRY)
98 #define __asm_copy_to_user_6(to, from, ret) \
99 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
101 #define __asm_copy_to_user_7(to, from, ret) \
102 __asm_copy_to_user_6x_cont(to, from, ret, \
103 " GETB D1Ar1,[%1++]\n" \
104 " SETB [%0],D1Ar1\n" \
105 "6: SETB [%0++],D1Ar1\n", \
106 "7: ADD %2,%2,#1\n", \
109 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
110 __asm_copy_to_user_4x_cont(to, from, ret, \
111 " GETD D1Ar1,[%1++]\n" \
112 " SETD [%0],D1Ar1\n" \
113 "4: SETD [%0++],D1Ar1\n" COPY, \
114 "5: ADD %2,%2,#4\n" FIXUP, \
115 " .long 4b,5b\n" TENTRY)
117 #define __asm_copy_to_user_8(to, from, ret) \
118 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
120 #define __asm_copy_to_user_9(to, from, ret) \
121 __asm_copy_to_user_8x_cont(to, from, ret, \
122 " GETB D1Ar1,[%1++]\n" \
123 " SETB [%0],D1Ar1\n" \
124 "6: SETB [%0++],D1Ar1\n", \
125 "7: ADD %2,%2,#1\n", \
128 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
129 __asm_copy_to_user_8x_cont(to, from, ret, \
130 " GETW D1Ar1,[%1++]\n" \
131 " SETW [%0],D1Ar1\n" \
132 "6: SETW [%0++],D1Ar1\n" COPY, \
133 "7: ADD %2,%2,#2\n" FIXUP, \
134 " .long 6b,7b\n" TENTRY)
136 #define __asm_copy_to_user_10(to, from, ret) \
137 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
139 #define __asm_copy_to_user_11(to, from, ret) \
140 __asm_copy_to_user_10x_cont(to, from, ret, \
141 " GETB D1Ar1,[%1++]\n" \
142 " SETB [%0],D1Ar1\n" \
143 "8: SETB [%0++],D1Ar1\n", \
144 "9: ADD %2,%2,#1\n", \
147 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
148 __asm_copy_to_user_8x_cont(to, from, ret, \
149 " GETD D1Ar1,[%1++]\n" \
150 " SETD [%0],D1Ar1\n" \
151 "6: SETD [%0++],D1Ar1\n" COPY, \
152 "7: ADD %2,%2,#4\n" FIXUP, \
153 " .long 6b,7b\n" TENTRY)
154 #define __asm_copy_to_user_12(to, from, ret) \
155 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
157 #define __asm_copy_to_user_13(to, from, ret) \
158 __asm_copy_to_user_12x_cont(to, from, ret, \
159 " GETB D1Ar1,[%1++]\n" \
160 " SETB [%0],D1Ar1\n" \
161 "8: SETB [%0++],D1Ar1\n", \
162 "9: ADD %2,%2,#1\n", \
165 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
166 __asm_copy_to_user_12x_cont(to, from, ret, \
167 " GETW D1Ar1,[%1++]\n" \
168 " SETW [%0],D1Ar1\n" \
169 "8: SETW [%0++],D1Ar1\n" COPY, \
170 "9: ADD %2,%2,#2\n" FIXUP, \
171 " .long 8b,9b\n" TENTRY)
173 #define __asm_copy_to_user_14(to, from, ret) \
174 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
176 #define __asm_copy_to_user_15(to, from, ret) \
177 __asm_copy_to_user_14x_cont(to, from, ret, \
178 " GETB D1Ar1,[%1++]\n" \
179 " SETB [%0],D1Ar1\n" \
180 "10: SETB [%0++],D1Ar1\n", \
181 "11: ADD %2,%2,#1\n", \
184 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
185 __asm_copy_to_user_12x_cont(to, from, ret, \
186 " GETD D1Ar1,[%1++]\n" \
187 " SETD [%0],D1Ar1\n" \
188 "8: SETD [%0++],D1Ar1\n" COPY, \
189 "9: ADD %2,%2,#4\n" FIXUP, \
190 " .long 8b,9b\n" TENTRY)
192 #define __asm_copy_to_user_16(to, from, ret) \
193 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
195 #define __asm_copy_to_user_8x64(to, from, ret) \
197 " GETL D0Ar2,D1Ar1,[%1++]\n" \
198 " SETL [%0],D0Ar2,D1Ar1\n" \
199 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
201 " .section .fixup,\"ax\"\n" \
202 "3: ADD %2,%2,#8\n" \
203 " MOVT D0Ar2,#HI(1b)\n" \
204 " JUMP D0Ar2,#LO(1b)\n" \
206 " .section __ex_table,\"a\"\n" \
209 : "=r" (to), "=r" (from), "=r" (ret) \
210 : "0" (to), "1" (from), "2" (ret) \
211 : "D1Ar1", "D0Ar2", "memory")
214 * optimized copying loop using RAPF when 64 bit aligned
216 * n will be automatically decremented inside the loop
217 * ret will be left intact. if error occurs we will rewind
218 * so that the original non optimized code will fill up
219 * this value correctly.
222 * > n will hold total number of uncopied bytes
224 * > {'to','from'} will be rewind back so that
225 * the non-optimized code will do the proper fix up
227 * DCACHE drops the cacheline which helps in reducing cache
230 * We introduce an extra SETL at the end of the loop to
231 * ensure we don't fall off the loop before we catch all
235 * LSM_STEP in TXSTATUS must be cleared in fix up code.
236 * since we're using M{S,G}ETL, a fault might happen at
237 * any address in the middle of M{S,G}ETL causing
238 * the value of LSM_STEP to be incorrect which can
239 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
240 * ie: if LSM_STEP was 1 when a fault occurs, the
241 * next call to M{S,G}ET{L,D} will skip the first
242 * copy/getting as it think that the first 1 has already
246 #define __asm_copy_user_64bit_rapf_loop( \
247 to, from, ret, n, id, FIXUP) \
251 "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
253 "LSR D1Ar5, %3, #6\n" \
254 "SUB TXRPT, D1Ar5, #2\n" \
257 "ADD RAPF, %1, #64\n" \
259 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
261 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
262 "SUB %3, %3, #32\n" \
264 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
266 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
267 "SUB %3, %3, #32\n" \
268 "DCACHE [%1+#-64], D0Ar6\n" \
273 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
275 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
276 "SUB %3, %3, #32\n" \
278 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
280 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
283 "SETL [%0++], D0.7, D1.7\n" \
284 "SUB %3, %3, #32\n" \
286 "DCACHE [%1+#-64], D0Ar6\n" \
287 "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
288 "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
289 "GETL D0.5, D1.5, [A0StP+#-24]\n" \
290 "GETL D0.6, D1.6, [A0StP+#-16]\n" \
291 "GETL D0.7, D1.7, [A0StP+#-8]\n" \
292 "SUB A0StP, A0StP, #40\n" \
293 " .section .fixup,\"ax\"\n" \
295 " ADD %0, %0, #8\n" \
297 " MOV D0Ar2, TXSTATUS\n" \
298 " MOV D1Ar1, TXSTATUS\n" \
299 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
300 " MOV TXSTATUS, D1Ar1\n" \
302 " MOVT D0Ar2,#HI(1b)\n" \
303 " JUMP D0Ar2,#LO(1b)\n" \
305 " .section __ex_table,\"a\"\n" \
316 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
317 : "0" (to), "1" (from), "2" (ret), "3" (n) \
318 : "D1Ar1", "D0Ar2", "cc", "memory")
320 /* rewind 'to' and 'from' pointers when a fault occurs
323 * A fault always occurs on writing to user buffer. A fault
324 * is at a single address, so we need to rewind by only 4
326 * Since we do a complete read from kernel buffer before
327 * writing, we need to rewind it also. The amount to be
328 * rewind equals the number of faulty writes in MSETD
329 * which is: [4 - (LSM_STEP-1)]*8
330 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
331 * and stored in D0Ar2
333 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
334 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
335 * a fault happens at the 4th write, LSM_STEP will be 0
336 * instead of 4. The code copes with that.
338 * n is updated by the number of successful writes, which is:
339 * n = n - (LSM_STEP-1)*8
341 #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
342 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
343 "LSR D0Ar2, D0Ar2, #8\n" \
344 "ANDS D0Ar2, D0Ar2, #0x7\n" \
345 "ADDZ D0Ar2, D0Ar2, #4\n" \
346 "SUB D0Ar2, D0Ar2, #1\n" \
348 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
349 "LSL D0Ar2, D0Ar2, #3\n" \
350 "LSL D1Ar1, D1Ar1, #3\n" \
351 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
353 "SUB %1, %1,D0Ar2\n" \
354 "SUB %3, %3, D1Ar1\n")
357 * optimized copying loop using RAPF when 32 bit aligned
359 * n will be automatically decremented inside the loop
360 * ret will be left intact. if error occurs we will rewind
361 * so that the original non optimized code will fill up
362 * this value correctly.
365 * > n will hold total number of uncopied bytes
367 * > {'to','from'} will be rewind back so that
368 * the non-optimized code will do the proper fix up
370 * DCACHE drops the cacheline which helps in reducing cache
373 * We introduce an extra SETD at the end of the loop to
374 * ensure we don't fall off the loop before we catch all
378 * LSM_STEP in TXSTATUS must be cleared in fix up code.
379 * since we're using M{S,G}ETL, a fault might happen at
380 * any address in the middle of M{S,G}ETL causing
381 * the value of LSM_STEP to be incorrect which can
382 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
383 * ie: if LSM_STEP was 1 when a fault occurs, the
384 * next call to M{S,G}ET{L,D} will skip the first
385 * copy/getting as it think that the first 1 has already
389 #define __asm_copy_user_32bit_rapf_loop( \
390 to, from, ret, n, id, FIXUP) \
394 "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
396 "LSR D1Ar5, %3, #6\n" \
397 "SUB TXRPT, D1Ar5, #2\n" \
400 "ADD RAPF, %1, #64\n" \
402 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
404 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
405 "SUB %3, %3, #16\n" \
407 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
409 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
410 "SUB %3, %3, #16\n" \
412 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
415 "SUB %3, %3, #16\n" \
417 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
419 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
420 "SUB %3, %3, #16\n" \
421 "DCACHE [%1+#-64], D0Ar6\n" \
426 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
428 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
429 "SUB %3, %3, #16\n" \
431 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
433 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
434 "SUB %3, %3, #16\n" \
436 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
438 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
439 "SUB %3, %3, #16\n" \
441 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
443 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
446 "SETD [%0++], D0.7\n" \
447 "SUB %3, %3, #16\n" \
449 "DCACHE [%1+#-64], D0Ar6\n" \
450 "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
451 "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
452 "GETL D0.5, D1.5, [A0StP+#-24]\n" \
453 "GETL D0.6, D1.6, [A0StP+#-16]\n" \
454 "GETL D0.7, D1.7, [A0StP+#-8]\n" \
455 "SUB A0StP, A0StP, #40\n" \
456 " .section .fixup,\"ax\"\n" \
458 " ADD %0, %0, #4\n" \
460 " MOV D0Ar2, TXSTATUS\n" \
461 " MOV D1Ar1, TXSTATUS\n" \
462 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
463 " MOV TXSTATUS, D1Ar1\n" \
465 " MOVT D0Ar2,#HI(1b)\n" \
466 " JUMP D0Ar2,#LO(1b)\n" \
468 " .section __ex_table,\"a\"\n" \
487 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
488 : "0" (to), "1" (from), "2" (ret), "3" (n) \
489 : "D1Ar1", "D0Ar2", "cc", "memory")
491 /* rewind 'to' and 'from' pointers when a fault occurs
494 * A fault always occurs on writing to user buffer. A fault
495 * is at a single address, so we need to rewind by only 4
497 * Since we do a complete read from kernel buffer before
498 * writing, we need to rewind it also. The amount to be
499 * rewind equals the number of faulty writes in MSETD
500 * which is: [4 - (LSM_STEP-1)]*4
501 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
502 * and stored in D0Ar2
504 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
505 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
506 * a fault happens at the 4th write, LSM_STEP will be 0
507 * instead of 4. The code copes with that.
509 * n is updated by the number of successful writes, which is:
510 * n = n - (LSM_STEP-1)*4
512 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
513 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
514 "LSR D0Ar2, D0Ar2, #8\n" \
515 "ANDS D0Ar2, D0Ar2, #0x7\n" \
516 "ADDZ D0Ar2, D0Ar2, #4\n" \
517 "SUB D0Ar2, D0Ar2, #1\n" \
519 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
520 "LSL D0Ar2, D0Ar2, #2\n" \
521 "LSL D1Ar1, D1Ar1, #2\n" \
522 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
524 "SUB %1, %1, D0Ar2\n" \
525 "SUB %3, %3, D1Ar1\n")
527 unsigned long __copy_user(void __user *pdst, const void *psrc,
530 register char __user *dst asm ("A0.2") = pdst;
531 register const char *src asm ("A1.2") = psrc;
532 unsigned long retn = 0;
537 if ((unsigned long) src & 1) {
538 __asm_copy_to_user_1(dst, src, retn);
543 if ((unsigned long) dst & 1) {
544 /* Worst case - byte copy */
546 __asm_copy_to_user_1(dst, src, retn);
552 if (((unsigned long) src & 2) && n >= 2) {
553 __asm_copy_to_user_2(dst, src, retn);
558 if ((unsigned long) dst & 2) {
559 /* Second worst case - word copy */
561 __asm_copy_to_user_2(dst, src, retn);
569 /* 64 bit copy loop */
570 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
571 if (n >= RAPF_MIN_BUF_SIZE) {
572 /* copy user using 64 bit rapf copy */
573 __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
577 __asm_copy_to_user_8x64(dst, src, retn);
583 if (n >= RAPF_MIN_BUF_SIZE) {
584 /* copy user using 32 bit rapf copy */
585 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
588 /* 64 bit copy loop */
589 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
591 __asm_copy_to_user_8x64(dst, src, retn);
600 __asm_copy_to_user_16(dst, src, retn);
607 __asm_copy_to_user_4(dst, src, retn);
617 __asm_copy_to_user_1(dst, src, retn);
620 __asm_copy_to_user_2(dst, src, retn);
623 __asm_copy_to_user_3(dst, src, retn);
628 * If we get here, retn correctly reflects the number of failing
633 EXPORT_SYMBOL(__copy_user);
635 #define __asm_copy_from_user_1(to, from, ret) \
636 __asm_copy_user_cont(to, from, ret, \
637 " GETB D1Ar1,[%1++]\n" \
638 "2: SETB [%0++],D1Ar1\n", \
639 "3: ADD %2,%2,#1\n", \
642 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
643 __asm_copy_user_cont(to, from, ret, \
644 " GETW D1Ar1,[%1++]\n" \
645 "2: SETW [%0++],D1Ar1\n" COPY, \
646 "3: ADD %2,%2,#2\n" FIXUP, \
647 " .long 2b,3b\n" TENTRY)
649 #define __asm_copy_from_user_2(to, from, ret) \
650 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
652 #define __asm_copy_from_user_3(to, from, ret) \
653 __asm_copy_from_user_2x_cont(to, from, ret, \
654 " GETB D1Ar1,[%1++]\n" \
655 "4: SETB [%0++],D1Ar1\n", \
656 "5: ADD %2,%2,#1\n", \
659 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
660 __asm_copy_user_cont(to, from, ret, \
661 " GETD D1Ar1,[%1++]\n" \
662 "2: SETD [%0++],D1Ar1\n" COPY, \
663 "3: ADD %2,%2,#4\n" FIXUP, \
664 " .long 2b,3b\n" TENTRY)
666 #define __asm_copy_from_user_4(to, from, ret) \
667 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
669 #define __asm_copy_from_user_8x64(to, from, ret) \
671 " GETL D0Ar2,D1Ar1,[%1++]\n" \
672 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
674 " .section .fixup,\"ax\"\n" \
675 "3: ADD %2,%2,#8\n" \
676 " MOVT D0Ar2,#HI(1b)\n" \
677 " JUMP D0Ar2,#LO(1b)\n" \
679 " .section __ex_table,\"a\"\n" \
682 : "=a" (to), "=r" (from), "=r" (ret) \
683 : "0" (to), "1" (from), "2" (ret) \
684 : "D1Ar1", "D0Ar2", "memory")
686 /* rewind 'from' pointer when a fault occurs
689 * A fault occurs while reading from user buffer, which is the
690 * source. Since the fault is at a single address, we only
691 * need to rewind by 8 bytes.
692 * Since we don't write to kernel buffer until we read first,
693 * the kernel buffer is at the right state and needn't be
696 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
697 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
700 /* rewind 'from' pointer when a fault occurs
703 * A fault occurs while reading from user buffer, which is the
704 * source. Since the fault is at a single address, we only
705 * need to rewind by 4 bytes.
706 * Since we don't write to kernel buffer until we read first,
707 * the kernel buffer is at the right state and needn't be
710 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
711 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
716 * Copy from user to kernel. The return-value is the number of bytes that were
719 unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
722 register char *dst asm ("A0.2") = pdst;
723 register const char __user *src asm ("A1.2") = psrc;
724 unsigned long retn = 0;
729 if ((unsigned long) src & 1) {
730 __asm_copy_from_user_1(dst, src, retn);
735 if ((unsigned long) dst & 1) {
736 /* Worst case - byte copy */
738 __asm_copy_from_user_1(dst, src, retn);
744 if (((unsigned long) src & 2) && n >= 2) {
745 __asm_copy_from_user_2(dst, src, retn);
750 if ((unsigned long) dst & 2) {
751 /* Second worst case - word copy */
753 __asm_copy_from_user_2(dst, src, retn);
761 /* 64 bit copy loop */
762 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
763 if (n >= RAPF_MIN_BUF_SIZE) {
764 /* Copy using fast 64bit rapf */
765 __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
769 __asm_copy_from_user_8x64(dst, src, retn);
776 if (n >= RAPF_MIN_BUF_SIZE) {
777 /* Copy using fast 32bit rapf */
778 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
782 /* 64 bit copy loop */
783 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
785 __asm_copy_from_user_8x64(dst, src, retn);
794 __asm_copy_from_user_4(dst, src, retn);
801 /* If we get here, there were no memory read faults. */
803 /* These copies are at least "naturally aligned" (so we don't
804 have to check each byte), due to the src alignment code.
805 The *_3 case *will* get the correct count for retn. */
807 /* This case deliberately left in (if you have doubts check the
808 generated assembly code). */
811 __asm_copy_from_user_1(dst, src, retn);
814 __asm_copy_from_user_2(dst, src, retn);
817 __asm_copy_from_user_3(dst, src, retn);
821 /* If we get here, retn correctly reflects the number of failing
825 EXPORT_SYMBOL(raw_copy_from_user);
827 #define __asm_clear_8x64(to, ret) \
831 " SETL [%0],D0Ar2,D1Ar1\n" \
832 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
834 " .section .fixup,\"ax\"\n" \
835 "3: ADD %1,%1,#8\n" \
836 " MOVT D0Ar2,#HI(1b)\n" \
837 " JUMP D0Ar2,#LO(1b)\n" \
839 " .section __ex_table,\"a\"\n" \
842 : "=r" (to), "=r" (ret) \
843 : "0" (to), "1" (ret) \
844 : "D1Ar1", "D0Ar2", "memory")
846 /* Zero userspace. */
848 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
853 " .section .fixup,\"ax\"\n" \
855 " MOVT D1Ar1,#HI(1b)\n" \
856 " JUMP D1Ar1,#LO(1b)\n" \
858 " .section __ex_table,\"a\"\n" \
861 : "=r" (to), "=r" (ret) \
862 : "0" (to), "1" (ret) \
865 #define __asm_clear_1(to, ret) \
866 __asm_clear(to, ret, \
867 " SETB [%0],D1Ar1\n" \
868 "2: SETB [%0++],D1Ar1\n", \
869 "3: ADD %1,%1,#1\n", \
872 #define __asm_clear_2(to, ret) \
873 __asm_clear(to, ret, \
874 " SETW [%0],D1Ar1\n" \
875 "2: SETW [%0++],D1Ar1\n", \
876 "3: ADD %1,%1,#2\n", \
879 #define __asm_clear_3(to, ret) \
880 __asm_clear(to, ret, \
881 "2: SETW [%0++],D1Ar1\n" \
882 " SETB [%0],D1Ar1\n" \
883 "3: SETB [%0++],D1Ar1\n", \
884 "4: ADD %1,%1,#2\n" \
885 "5: ADD %1,%1,#1\n", \
889 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
890 __asm_clear(to, ret, \
891 " SETD [%0],D1Ar1\n" \
892 "2: SETD [%0++],D1Ar1\n" CLEAR, \
893 "3: ADD %1,%1,#4\n" FIXUP, \
894 " .long 2b,3b\n" TENTRY)
896 #define __asm_clear_4(to, ret) \
897 __asm_clear_4x_cont(to, ret, "", "", "")
899 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
900 __asm_clear_4x_cont(to, ret, \
901 " SETD [%0],D1Ar1\n" \
902 "4: SETD [%0++],D1Ar1\n" CLEAR, \
903 "5: ADD %1,%1,#4\n" FIXUP, \
904 " .long 4b,5b\n" TENTRY)
906 #define __asm_clear_8(to, ret) \
907 __asm_clear_8x_cont(to, ret, "", "", "")
909 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
910 __asm_clear_8x_cont(to, ret, \
911 " SETD [%0],D1Ar1\n" \
912 "6: SETD [%0++],D1Ar1\n" CLEAR, \
913 "7: ADD %1,%1,#4\n" FIXUP, \
914 " .long 6b,7b\n" TENTRY)
916 #define __asm_clear_12(to, ret) \
917 __asm_clear_12x_cont(to, ret, "", "", "")
919 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
920 __asm_clear_12x_cont(to, ret, \
921 " SETD [%0],D1Ar1\n" \
922 "8: SETD [%0++],D1Ar1\n" CLEAR, \
923 "9: ADD %1,%1,#4\n" FIXUP, \
924 " .long 8b,9b\n" TENTRY)
926 #define __asm_clear_16(to, ret) \
927 __asm_clear_16x_cont(to, ret, "", "", "")
929 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
931 register char __user *dst asm ("D0Re0") = pto;
932 register unsigned long n asm ("D1Re0") = pn;
933 register unsigned long retn asm ("D0Ar6") = 0;
935 if ((unsigned long) dst & 1) {
936 __asm_clear_1(dst, retn);
940 if ((unsigned long) dst & 2) {
941 __asm_clear_2(dst, retn);
945 /* 64 bit copy loop */
946 if (!((__force unsigned long) dst & 7)) {
948 __asm_clear_8x64(dst, retn);
954 __asm_clear_16(dst, retn);
959 __asm_clear_4(dst, retn);
967 __asm_clear_1(dst, retn);
970 __asm_clear_2(dst, retn);
973 __asm_clear_3(dst, retn);
979 EXPORT_SYMBOL(__do_clear_user);
981 unsigned char __get_user_asm_b(const void __user *addr, long *err)
983 register unsigned char x asm ("D0Re0") = 0;
989 " .section .fixup,\"ax\"\n"
992 " MOVT D0FrT,#HI(2b)\n"
993 " JUMP D0FrT,#LO(2b)\n"
995 " .section __ex_table,\"a\"\n"
999 : "r" (err), "r" (addr), "P" (-EFAULT)
1003 EXPORT_SYMBOL(__get_user_asm_b);
1005 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1007 register unsigned short x asm ("D0Re0") = 0;
1013 " .section .fixup,\"ax\"\n"
1015 " SETD [%1],D0FrT\n"
1016 " MOVT D0FrT,#HI(2b)\n"
1017 " JUMP D0FrT,#LO(2b)\n"
1019 " .section __ex_table,\"a\"\n"
1023 : "r" (err), "r" (addr), "P" (-EFAULT)
1027 EXPORT_SYMBOL(__get_user_asm_w);
1029 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1031 register unsigned int x asm ("D0Re0") = 0;
1037 " .section .fixup,\"ax\"\n"
1039 " SETD [%1],D0FrT\n"
1040 " MOVT D0FrT,#HI(2b)\n"
1041 " JUMP D0FrT,#LO(2b)\n"
1043 " .section __ex_table,\"a\"\n"
1047 : "r" (err), "r" (addr), "P" (-EFAULT)
1051 EXPORT_SYMBOL(__get_user_asm_d);
1053 long __put_user_asm_b(unsigned int x, void __user *addr)
1055 register unsigned int err asm ("D0Re0") = 0;
1062 ".section .fixup,\"ax\"\n"
1064 " MOVT D0FrT,#HI(2b)\n"
1065 " JUMP D0FrT,#LO(2b)\n"
1067 ".section __ex_table,\"a\"\n"
1071 : "d" (x), "a" (addr), "P"(-EFAULT)
1075 EXPORT_SYMBOL(__put_user_asm_b);
1077 long __put_user_asm_w(unsigned int x, void __user *addr)
1079 register unsigned int err asm ("D0Re0") = 0;
1086 ".section .fixup,\"ax\"\n"
1088 " MOVT D0FrT,#HI(2b)\n"
1089 " JUMP D0FrT,#LO(2b)\n"
1091 ".section __ex_table,\"a\"\n"
1095 : "d" (x), "a" (addr), "P"(-EFAULT)
1099 EXPORT_SYMBOL(__put_user_asm_w);
1101 long __put_user_asm_d(unsigned int x, void __user *addr)
1103 register unsigned int err asm ("D0Re0") = 0;
1110 ".section .fixup,\"ax\"\n"
1112 " MOVT D0FrT,#HI(2b)\n"
1113 " JUMP D0FrT,#LO(2b)\n"
1115 ".section __ex_table,\"a\"\n"
1119 : "d" (x), "a" (addr), "P"(-EFAULT)
1123 EXPORT_SYMBOL(__put_user_asm_d);
1125 long __put_user_asm_l(unsigned long long x, void __user *addr)
1127 register unsigned int err asm ("D0Re0") = 0;
1130 " SETL [%2],%1,%t1\n"
1132 " SETL [%2],%1,%t1\n"
1134 ".section .fixup,\"ax\"\n"
1136 " MOVT D0FrT,#HI(2b)\n"
1137 " JUMP D0FrT,#LO(2b)\n"
1139 ".section __ex_table,\"a\"\n"
1143 : "d" (x), "a" (addr), "P"(-EFAULT)
1147 EXPORT_SYMBOL(__put_user_asm_l);
1149 long strnlen_user(const char __user *src, long count)
1153 if (!access_ok(VERIFY_READ, src, 0))
1156 asm volatile (" MOV D0Ar4, %1\n"
1159 " SUBS D0FrT, D0Ar6, #0\n"
1160 " SUB D0Ar6, D0Ar6, #1\n"
1162 " GETB D0FrT, [D0Ar4+#1++]\n"
1164 " TST D0FrT, #255\n"
1167 " SUB %0, %2, D0Ar6\n"
1169 " .section .fixup,\"ax\"\n"
1172 " MOVT D0FrT,#HI(3b)\n"
1173 " JUMP D0FrT,#LO(3b)\n"
1175 " .section __ex_table,\"a\"\n"
1179 : "r" (src), "r" (count)
1180 : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1184 EXPORT_SYMBOL(strnlen_user);
1186 long __strncpy_from_user(char *dst, const char __user *src, long count)
1194 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1197 * This code is deduced from:
1202 * while ((*dst++ = (tmp2 = *src++)) != 0
1206 * res = count - tmp1;
1211 asm volatile (" MOV %0,%3\n"
1213 " GETB D0FrT,[%2++]\n"
1216 " SETB [%1++],D0FrT\n"
1223 " .section .fixup,\"ax\"\n"
1226 " MOVT D0FrT,#HI(4b)\n"
1227 " JUMP D0FrT,#LO(4b)\n"
1229 " .section __ex_table,\"a\"\n"
1232 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1233 : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1234 : "D0FrT", "memory", "cc");
1238 EXPORT_SYMBOL(__strncpy_from_user);