1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
5 * User space memory access functions
9 #include <asm-generic/uaccess-unaligned.h>
11 #include <linux/bug.h>
12 #include <linux/string.h>
14 #define KERNEL_DS ((mm_segment_t){0})
15 #define USER_DS ((mm_segment_t){1})
17 #define segment_eq(a, b) ((a).seg == (b).seg)
19 #define get_ds() (KERNEL_DS)
20 #define get_fs() (current_thread_info()->addr_limit)
21 #define set_fs(x) (current_thread_info()->addr_limit = (x))
24 * Note that since kernel addresses are in a separate address space on
25 * parisc, we don't need to do anything for access_ok().
26 * We just let the page fault handler do the right thing. This also means
27 * that put_user is the same as __put_user, etc.
30 #define access_ok(type, uaddr, size) (1)
32 #define put_user __put_user
33 #define get_user __get_user
35 #if !defined(CONFIG_64BIT)
36 #define LDD_USER(ptr) __get_user_asm64(ptr)
37 #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
39 #define LDD_USER(ptr) __get_user_asm("ldd", ptr)
40 #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
44 * The exception table contains two values: the first is the relative offset to
45 * the address of the instruction that is allowed to fault, and the second is
46 * the relative offset to the address of the fixup routine. Since relative
47 * addresses are used, 32bit values are sufficient even on 64bit kernel.
50 #define ARCH_HAS_RELATIVE_EXTABLE
51 struct exception_table_entry {
52 int insn; /* relative address of insn that is allowed to fault. */
53 int fixup; /* relative address of fixup routine */
56 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
57 ".section __ex_table,\"aw\"\n" \
58 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
62 * The page fault handler stores, in a per-cpu area, the following information
63 * if a fixup routine is available.
65 struct exception_data {
66 unsigned long fault_ip;
67 unsigned long fault_gp;
68 unsigned long fault_space;
69 unsigned long fault_addr;
73 * load_sr2() preloads the space register %%sr2 - based on the value of
74 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
75 * is 0), or with the current value of %%sr3 to access user space (USER_DS)
76 * memory. The following __get_user_asm() and __put_user_asm() functions have
77 * %%sr2 hard-coded to access the requested memory.
80 __asm__(" or,= %0,%%r0,%%r0\n\t" \
81 " mfsp %%sr3,%0\n\t" \
82 " mtsp %0,%%sr2\n\t" \
85 #define __get_user(x, ptr) \
87 register long __gu_err __asm__ ("r8") = 0; \
88 register long __gu_val __asm__ ("r9") = 0; \
91 switch (sizeof(*(ptr))) { \
92 case 1: __get_user_asm("ldb", ptr); break; \
93 case 2: __get_user_asm("ldh", ptr); break; \
94 case 4: __get_user_asm("ldw", ptr); break; \
95 case 8: LDD_USER(ptr); break; \
96 default: BUILD_BUG(); break; \
99 (x) = (__force __typeof__(*(ptr))) __gu_val; \
103 #define __get_user_asm(ldx, ptr) \
104 __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
105 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
106 : "=r"(__gu_val), "=r"(__gu_err) \
107 : "r"(ptr), "1"(__gu_err) \
110 #if !defined(CONFIG_64BIT)
112 #define __get_user_asm64(ptr) \
113 __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
114 "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
115 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
116 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
117 : "=r"(__gu_val), "=r"(__gu_err) \
118 : "r"(ptr), "1"(__gu_err) \
121 #endif /* !defined(CONFIG_64BIT) */
124 #define __put_user(x, ptr) \
126 register long __pu_err __asm__ ("r8") = 0; \
127 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
130 switch (sizeof(*(ptr))) { \
131 case 1: __put_user_asm("stb", __x, ptr); break; \
132 case 2: __put_user_asm("sth", __x, ptr); break; \
133 case 4: __put_user_asm("stw", __x, ptr); break; \
134 case 8: STD_USER(__x, ptr); break; \
135 default: BUILD_BUG(); break; \
142 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
143 * instead of writing. This is because they do not write to any memory
144 * gcc knows about, so there are no aliasing issues. These macros must
145 * also be aware that "fixup_put_user_skip_[12]" are executed in the
146 * context of the fault, and any registers used there must be listed
147 * as clobbers. In this case only "r1" is used by the current routines.
148 * r8/r9 are already listed as err/val.
151 #define __put_user_asm(stx, x, ptr) \
152 __asm__ __volatile__ ( \
153 "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
154 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
156 : "r"(ptr), "r"(x), "0"(__pu_err) \
160 #if !defined(CONFIG_64BIT)
162 #define __put_user_asm64(__val, ptr) do { \
163 __asm__ __volatile__ ( \
164 "\n1:\tstw %2,0(%%sr2,%1)" \
165 "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
166 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
167 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
169 : "r"(ptr), "r"(__val), "0"(__pu_err) \
173 #endif /* !defined(CONFIG_64BIT) */
177 * Complex access routines -- external declarations
180 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
181 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
182 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
183 extern long strncpy_from_user(char *, const char __user *, long);
184 extern unsigned lclear_user(void __user *, unsigned long);
185 extern long lstrnlen_user(const char __user *, long);
187 * Complex access routines -- macros
189 #define user_addr_max() (~0UL)
191 #define strnlen_user lstrnlen_user
192 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
193 #define clear_user lclear_user
194 #define __clear_user lclear_user
196 unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
198 unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
200 unsigned long copy_in_user(void __user *dst, const void __user *src,
202 #define __copy_in_user copy_in_user
203 #define __copy_to_user_inatomic __copy_to_user
204 #define __copy_from_user_inatomic __copy_from_user
206 extern void __compiletime_error("usercopy buffer size is too small")
207 __bad_copy_user(void);
209 static inline void copy_user_overflow(int size, unsigned long count)
211 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
214 static __always_inline unsigned long __must_check
215 copy_from_user(void *to, const void __user *from, unsigned long n)
217 int sz = __compiletime_object_size(to);
218 unsigned long ret = n;
220 if (likely(sz < 0 || sz >= n)) {
221 check_object_size(to, n, false);
222 ret = __copy_from_user(to, from, n);
223 } else if (!__builtin_constant_p(n))
224 copy_user_overflow(sz, n);
229 memset(to + (n - ret), 0, ret);
234 static __always_inline unsigned long __must_check
235 copy_to_user(void __user *to, const void *from, unsigned long n)
237 int sz = __compiletime_object_size(from);
239 if (likely(sz < 0 || sz >= n)) {
240 check_object_size(from, n, true);
241 n = __copy_to_user(to, from, n);
242 } else if (!__builtin_constant_p(n))
243 copy_user_overflow(sz, n);
251 int fixup_exception(struct pt_regs *regs);
253 #endif /* __PARISC_UACCESS_H */