11
11
#include <linux/bug.h>
12
12
#include <linux/string.h>
13
13
14
- #define KERNEL_DS ((mm_segment_t){0})
15
- #define USER_DS ((mm_segment_t){1})
16
-
17
- #define uaccess_kernel () (get_fs().seg == KERNEL_DS.seg)
18
-
19
- #define get_fs () (current_thread_info()->addr_limit)
20
- #define set_fs (x ) (current_thread_info()->addr_limit = (x))
21
-
22
14
/*
23
15
* Note that since kernel addresses are in a separate address space on
24
16
* parisc, we don't need to do anything for access_ok().
33
25
#define get_user __get_user
34
26
35
27
#if !defined(CONFIG_64BIT )
36
- #define LDD_USER (val , ptr ) __get_user_asm64(val, ptr)
37
- #define STD_USER (x , ptr ) __put_user_asm64(x, ptr)
28
+ #define LDD_USER (sr , val , ptr ) __get_user_asm64(sr, val, ptr)
29
+ #define STD_USER (sr , x , ptr ) __put_user_asm64(sr, x, ptr)
38
30
#else
39
- #define LDD_USER (val , ptr ) __get_user_asm(val, "ldd", ptr)
40
- #define STD_USER (x , ptr ) __put_user_asm("std", x, ptr)
31
+ #define LDD_USER (sr , val , ptr ) __get_user_asm(sr, val, "ldd", ptr)
32
+ #define STD_USER (sr , x , ptr ) __put_user_asm(sr, "std", x, ptr)
41
33
#endif
42
34
43
35
/*
@@ -67,28 +59,15 @@ struct exception_table_entry {
67
59
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT ( fault_addr , except_addr )\
68
60
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
69
61
70
- /*
71
- * load_sr2() preloads the space register %%sr2 - based on the value of
72
- * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
73
- * is 0), or with the current value of %%sr3 to access user space (USER_DS)
74
- * memory. The following __get_user_asm() and __put_user_asm() functions have
75
- * %%sr2 hard-coded to access the requested memory.
76
- */
77
- #define load_sr2 () \
78
- __asm__(" or,= %0,%%r0,%%r0\n\t" \
79
- " mfsp %%sr3,%0\n\t" \
80
- " mtsp %0,%%sr2\n\t" \
81
- : : "r"(get_fs()) : )
82
-
83
- #define __get_user_internal (val , ptr ) \
62
+ #define __get_user_internal (sr , val , ptr ) \
84
63
({ \
85
64
register long __gu_err __asm__ ("r8") = 0; \
86
65
\
87
66
switch (sizeof(*(ptr))) { \
88
- case 1: __get_user_asm(val, "ldb", ptr); break; \
89
- case 2: __get_user_asm(val, "ldh", ptr); break; \
90
- case 4: __get_user_asm(val, "ldw", ptr); break; \
91
- case 8: LDD_USER(val, ptr); break; \
67
+ case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
68
+ case 2: __get_user_asm(sr, val, "ldh", ptr); break; \
69
+ case 4: __get_user_asm(sr, val, "ldw", ptr); break; \
70
+ case 8: LDD_USER(sr, val, ptr); break; \
92
71
default: BUILD_BUG(); \
93
72
} \
94
73
\
@@ -97,15 +76,14 @@ struct exception_table_entry {
97
76
98
77
#define __get_user (val , ptr ) \
99
78
({ \
100
- load_sr2(); \
101
- __get_user_internal(val, ptr); \
79
+ __get_user_internal("%%sr3,", val, ptr); \
102
80
})
103
81
104
- #define __get_user_asm (val , ldx , ptr ) \
82
+ #define __get_user_asm (sr , val , ldx , ptr ) \
105
83
{ \
106
84
register long __gu_val; \
107
85
\
108
- __asm__("1: " ldx " 0(%%sr2, %2),%0\n" \
86
+ __asm__("1: " ldx " 0(" sr " %2),%0\n" \
109
87
"9:\n" \
110
88
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
111
89
: "=r"(__gu_val), "=r"(__gu_err) \
@@ -114,18 +92,31 @@ struct exception_table_entry {
114
92
(val) = (__force __typeof__(*(ptr))) __gu_val; \
115
93
}
116
94
95
+ #define HAVE_GET_KERNEL_NOFAULT
96
+ #define __get_kernel_nofault (dst , src , type , err_label ) \
97
+ { \
98
+ type __z; \
99
+ long __err; \
100
+ __err = __get_user_internal("%%sr0,", __z, (type *)(src)); \
101
+ if (unlikely(__err)) \
102
+ goto err_label; \
103
+ else \
104
+ *(type *)(dst) = __z; \
105
+ }
106
+
107
+
117
108
#if !defined(CONFIG_64BIT )
118
109
119
- #define __get_user_asm64 (val , ptr ) \
110
+ #define __get_user_asm64 (sr , val , ptr ) \
120
111
{ \
121
112
union { \
122
113
unsigned long long l; \
123
114
__typeof__(*(ptr)) t; \
124
115
} __gu_tmp; \
125
116
\
126
117
__asm__(" copy %%r0,%R0\n" \
127
- "1: ldw 0(%%sr2, %2),%0\n" \
128
- "2: ldw 4(%%sr2, %2),%R0\n" \
118
+ "1: ldw 0(" sr " %2),%0\n" \
119
+ "2: ldw 4(" sr " %2),%R0\n" \
129
120
"9:\n" \
130
121
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
131
122
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
@@ -138,16 +129,16 @@ struct exception_table_entry {
138
129
#endif /* !defined(CONFIG_64BIT) */
139
130
140
131
141
- #define __put_user_internal (x , ptr ) \
132
+ #define __put_user_internal (sr , x , ptr ) \
142
133
({ \
143
134
register long __pu_err __asm__ ("r8") = 0; \
144
135
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
145
136
\
146
137
switch (sizeof(*(ptr))) { \
147
- case 1: __put_user_asm("stb", __x, ptr); break; \
148
- case 2: __put_user_asm("sth", __x, ptr); break; \
149
- case 4: __put_user_asm("stw", __x, ptr); break; \
150
- case 8: STD_USER(__x, ptr); break; \
138
+ case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
139
+ case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
140
+ case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
141
+ case 8: STD_USER(sr, __x, ptr); break; \
151
142
default: BUILD_BUG(); \
152
143
} \
153
144
\
@@ -156,10 +147,20 @@ struct exception_table_entry {
156
147
157
148
#define __put_user (x , ptr ) \
158
149
({ \
159
- load_sr2(); \
160
- __put_user_internal(x, ptr); \
150
+ __put_user_internal("%%sr3,", x, ptr); \
161
151
})
162
152
153
+ #define __put_kernel_nofault (dst , src , type , err_label ) \
154
+ { \
155
+ type __z = *(type *)(src); \
156
+ long __err; \
157
+ __err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \
158
+ if (unlikely(__err)) \
159
+ goto err_label; \
160
+ }
161
+
162
+
163
+
163
164
164
165
/*
165
166
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
@@ -170,26 +171,26 @@ struct exception_table_entry {
170
171
* r8 is already listed as err.
171
172
*/
172
173
173
- #define __put_user_asm (stx , x , ptr ) \
174
- __asm__ __volatile__ ( \
175
- "1: " stx " %2,0(%%sr2,% 1)\n" \
176
- "9:\n" \
177
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
178
- : "=r"(__pu_err) \
174
+ #define __put_user_asm (sr , stx , x , ptr ) \
175
+ __asm__ __volatile__ ( \
176
+ "1: " stx " %2,0(" sr "% 1)\n" \
177
+ "9:\n" \
178
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
179
+ : "=r"(__pu_err) \
179
180
: "r"(ptr), "r"(x), "0"(__pu_err))
180
181
181
182
182
183
#if !defined(CONFIG_64BIT )
183
184
184
- #define __put_user_asm64 (__val , ptr ) do { \
185
- __asm__ __volatile__ ( \
186
- "1: stw %2,0(%%sr2,% 1)\n" \
187
- "2: stw %R2,4(%%sr2,% 1)\n" \
188
- "9:\n" \
189
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
190
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
191
- : "=r"(__pu_err) \
192
- : "r"(ptr), "r"(__val), "0"(__pu_err)); \
185
+ #define __put_user_asm64 (sr , __val , ptr ) do { \
186
+ __asm__ __volatile__ ( \
187
+ "1: stw %2,0(" sr "% 1)\n" \
188
+ "2: stw %R2,4(" sr "% 1)\n" \
189
+ "9:\n" \
190
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
191
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
192
+ : "=r"(__pu_err) \
193
+ : "r"(ptr), "r"(__val), "0"(__pu_err)); \
193
194
} while (0)
194
195
195
196
#endif /* !defined(CONFIG_64BIT) */
@@ -200,14 +201,12 @@ struct exception_table_entry {
200
201
*/
201
202
202
203
extern long strncpy_from_user (char * , const char __user * , long );
203
- extern unsigned lclear_user (void __user * , unsigned long );
204
- extern long lstrnlen_user (const char __user * , long );
204
+ extern __must_check unsigned lclear_user (void __user * , unsigned long );
205
+ extern __must_check long strnlen_user (const char __user * src , long n );
205
206
/*
206
207
* Complex access routines -- macros
207
208
*/
208
- #define user_addr_max () (~0UL)
209
209
210
- #define strnlen_user lstrnlen_user
211
210
#define clear_user lclear_user
212
211
#define __clear_user lclear_user
213
212
0 commit comments