Line data Source code
1 : #ifndef _ASM_X86_UACCESS_H
2 : #define _ASM_X86_UACCESS_H
3 : /*
4 : * User space memory access functions
5 : */
6 : #include <linux/errno.h>
7 : #include <linux/compiler.h>
8 : #include <linux/thread_info.h>
9 : #include <linux/string.h>
10 : #include <asm/asm.h>
11 : #include <asm/page.h>
12 : #include <asm/smap.h>
13 :
14 : #define VERIFY_READ 0
15 : #define VERIFY_WRITE 1
16 :
17 : /*
18 : * The fs value determines whether argument validity checking should be
19 : * performed or not. If get_fs() == USER_DS, checking is performed, with
20 : * get_fs() == KERNEL_DS, checking is bypassed.
21 : *
22 : * For historical reasons, these macros are grossly misnamed.
23 : */
24 :
25 : #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26 :
27 : #define KERNEL_DS MAKE_MM_SEG(-1UL)
28 : #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
29 :
30 : #define get_ds() (KERNEL_DS)
31 : #define get_fs() (current_thread_info()->addr_limit)
32 : #define set_fs(x) (current_thread_info()->addr_limit = (x))
33 :
34 : #define segment_eq(a, b) ((a).seg == (b).seg)
35 :
36 : #define user_addr_max() (current_thread_info()->addr_limit.seg)
37 : #define __addr_ok(addr) \
38 : ((unsigned long __force)(addr) < user_addr_max())
39 :
40 : /*
41 : * Test whether a block of memory is a valid user space address.
42 : * Returns 0 if the range is valid, nonzero otherwise.
43 : */
44 : static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45 : {
46 : /*
47 : * If we have used "sizeof()" for the size,
48 : * we know it won't overflow the limit (but
49 : * it might overflow the 'addr', so it's
50 : * important to subtract the size from the
51 : * limit, not add it to the address).
52 : */
53 39 : if (__builtin_constant_p(size))
54 0 : return addr > limit - size;
55 :
56 : /* Arbitrary sizes? Be careful about overflow */
57 39 : addr += size;
58 39 : if (addr < size)
59 : return true;
60 39 : return addr > limit;
61 : }
62 :
63 : #define __range_not_ok(addr, size, limit) \
64 : ({ \
65 : __chk_user_ptr(addr); \
66 : __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67 : })
68 :
69 : /**
70 : * access_ok: - Checks if a user space pointer is valid
71 : * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
72 : * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
73 : * to write to a block, it is always safe to read from it.
74 : * @addr: User space pointer to start of block to check
75 : * @size: Size of block to check
76 : *
77 : * Context: User context only. This function may sleep.
78 : *
79 : * Checks if a pointer to a block of memory in user space is valid.
80 : *
81 : * Returns true (nonzero) if the memory block may be valid, false (zero)
82 : * if it is definitely invalid.
83 : *
84 : * Note that, depending on architecture, this function probably just
85 : * checks that the pointer is in the user space range - after calling
86 : * this function, memory access functions may still return -EFAULT.
87 : */
88 : #define access_ok(type, addr, size) \
89 : likely(!__range_not_ok(addr, size, user_addr_max()))
90 :
91 : /*
92 : * The exception table consists of pairs of addresses relative to the
93 : * exception table enty itself: the first is the address of an
94 : * instruction that is allowed to fault, and the second is the address
95 : * at which the program should continue. No registers are modified,
96 : * so it is entirely up to the continuation code to figure out what to
97 : * do.
98 : *
99 : * All the routines below use bits of fixup code that are out of line
100 : * with the main instruction path. This means when everything is well,
101 : * we don't even have to jump over them. Further, they do not intrude
102 : * on our cache or tlb entries.
103 : */
104 :
105 : struct exception_table_entry {
106 : int insn, fixup;
107 : };
108 : /* This is not the generic standard exception_table_entry format */
109 : #define ARCH_HAS_SORT_EXTABLE
110 : #define ARCH_HAS_SEARCH_EXTABLE
111 :
112 : extern int fixup_exception(struct pt_regs *regs);
113 : extern int early_fixup_exception(unsigned long *ip);
114 :
115 : /*
116 : * These are the main single-value transfer routines. They automatically
117 : * use the right size if we just have the right pointer type.
118 : *
119 : * This gets kind of ugly. We want to return _two_ values in "get_user()"
120 : * and yet we don't want to do any pointers, because that is too much
121 : * of a performance impact. Thus we have a few rather ugly macros here,
122 : * and hide all the ugliness from the user.
123 : *
124 : * The "__xxx" versions of the user access functions are versions that
125 : * do not verify the address space, that must have been done previously
126 : * with a separate "access_ok()" call (this is used when we do multiple
127 : * accesses to the same area of user memory).
128 : */
129 :
130 : extern int __get_user_1(void);
131 : extern int __get_user_2(void);
132 : extern int __get_user_4(void);
133 : extern int __get_user_8(void);
134 : extern int __get_user_bad(void);
135 :
136 : /*
137 : * This is a type: either unsigned long, if the argument fits into
138 : * that type, or otherwise unsigned long long.
139 : */
140 : #define __inttype(x) \
141 : __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
142 :
143 : /**
144 : * get_user: - Get a simple variable from user space.
145 : * @x: Variable to store result.
146 : * @ptr: Source address, in user space.
147 : *
148 : * Context: User context only. This function may sleep.
149 : *
150 : * This macro copies a single simple variable from user space to kernel
151 : * space. It supports simple types like char and int, but not larger
152 : * data types like structures or arrays.
153 : *
154 : * @ptr must have pointer-to-simple-variable type, and the result of
155 : * dereferencing @ptr must be assignable to @x without a cast.
156 : *
157 : * Returns zero on success, or -EFAULT on error.
158 : * On error, the variable @x is set to zero.
159 : */
160 : /*
161 : * Careful: we have to cast the result to the type of the pointer
162 : * for sign reasons.
163 : *
164 : * The use of _ASM_DX as the register specifier is a bit of a
165 : * simplification, as gcc only cares about it as the starting point
166 : * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
167 : * (%ecx being the next register in gcc's x86 register sequence), and
168 : * %rdx on 64 bits.
169 : *
170 : * Clang/LLVM cares about the size of the register, but still wants
171 : * the base register for something that ends up being a pair.
172 : */
173 : #define get_user(x, ptr) \
174 : ({ \
175 : int __ret_gu; \
176 : register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
177 : __chk_user_ptr(ptr); \
178 : might_fault(); \
179 : asm volatile("call __get_user_%P3" \
180 : : "=a" (__ret_gu), "=r" (__val_gu) \
181 : : "0" (ptr), "i" (sizeof(*(ptr)))); \
182 : (x) = (__typeof__(*(ptr))) __val_gu; \
183 : __ret_gu; \
184 : })
185 :
186 : #define __put_user_x(size, x, ptr, __ret_pu) \
187 : asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
188 : : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
189 :
190 :
191 :
192 : #ifdef CONFIG_X86_32
193 : #define __put_user_asm_u64(x, addr, err, errret) \
194 : asm volatile(ASM_STAC "\n" \
195 : "1: movl %%eax,0(%2)\n" \
196 : "2: movl %%edx,4(%2)\n" \
197 : "3: " ASM_CLAC "\n" \
198 : ".section .fixup,\"ax\"\n" \
199 : "4: movl %3,%0\n" \
200 : " jmp 3b\n" \
201 : ".previous\n" \
202 : _ASM_EXTABLE(1b, 4b) \
203 : _ASM_EXTABLE(2b, 4b) \
204 : : "=r" (err) \
205 : : "A" (x), "r" (addr), "i" (errret), "0" (err))
206 :
207 : #define __put_user_asm_ex_u64(x, addr) \
208 : asm volatile(ASM_STAC "\n" \
209 : "1: movl %%eax,0(%1)\n" \
210 : "2: movl %%edx,4(%1)\n" \
211 : "3: " ASM_CLAC "\n" \
212 : _ASM_EXTABLE_EX(1b, 2b) \
213 : _ASM_EXTABLE_EX(2b, 3b) \
214 : : : "A" (x), "r" (addr))
215 :
216 : #define __put_user_x8(x, ptr, __ret_pu) \
217 : asm volatile("call __put_user_8" : "=a" (__ret_pu) \
218 : : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
219 : #else
220 : #define __put_user_asm_u64(x, ptr, retval, errret) \
221 : __put_user_asm(x, ptr, retval, "q", "", "er", errret)
222 : #define __put_user_asm_ex_u64(x, addr) \
223 : __put_user_asm_ex(x, addr, "q", "", "er")
224 : #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
225 : #endif
226 :
227 : extern void __put_user_bad(void);
228 :
229 : /*
230 : * Strange magic calling convention: pointer in %ecx,
231 : * value in %eax(:%edx), return value in %eax. clobbers %rbx
232 : */
233 : extern void __put_user_1(void);
234 : extern void __put_user_2(void);
235 : extern void __put_user_4(void);
236 : extern void __put_user_8(void);
237 :
238 : /**
239 : * put_user: - Write a simple value into user space.
240 : * @x: Value to copy to user space.
241 : * @ptr: Destination address, in user space.
242 : *
243 : * Context: User context only. This function may sleep.
244 : *
245 : * This macro copies a single simple value from kernel space to user
246 : * space. It supports simple types like char and int, but not larger
247 : * data types like structures or arrays.
248 : *
249 : * @ptr must have pointer-to-simple-variable type, and @x must be assignable
250 : * to the result of dereferencing @ptr.
251 : *
252 : * Returns zero on success, or -EFAULT on error.
253 : */
254 : #define put_user(x, ptr) \
255 : ({ \
256 : int __ret_pu; \
257 : __typeof__(*(ptr)) __pu_val; \
258 : __chk_user_ptr(ptr); \
259 : might_fault(); \
260 : __pu_val = x; \
261 : switch (sizeof(*(ptr))) { \
262 : case 1: \
263 : __put_user_x(1, __pu_val, ptr, __ret_pu); \
264 : break; \
265 : case 2: \
266 : __put_user_x(2, __pu_val, ptr, __ret_pu); \
267 : break; \
268 : case 4: \
269 : __put_user_x(4, __pu_val, ptr, __ret_pu); \
270 : break; \
271 : case 8: \
272 : __put_user_x8(__pu_val, ptr, __ret_pu); \
273 : break; \
274 : default: \
275 : __put_user_x(X, __pu_val, ptr, __ret_pu); \
276 : break; \
277 : } \
278 : __ret_pu; \
279 : })
280 :
281 : #define __put_user_size(x, ptr, size, retval, errret) \
282 : do { \
283 : retval = 0; \
284 : __chk_user_ptr(ptr); \
285 : switch (size) { \
286 : case 1: \
287 : __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
288 : break; \
289 : case 2: \
290 : __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
291 : break; \
292 : case 4: \
293 : __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
294 : break; \
295 : case 8: \
296 : __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
297 : errret); \
298 : break; \
299 : default: \
300 : __put_user_bad(); \
301 : } \
302 : } while (0)
303 :
304 : #define __put_user_size_ex(x, ptr, size) \
305 : do { \
306 : __chk_user_ptr(ptr); \
307 : switch (size) { \
308 : case 1: \
309 : __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
310 : break; \
311 : case 2: \
312 : __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
313 : break; \
314 : case 4: \
315 : __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
316 : break; \
317 : case 8: \
318 : __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
319 : break; \
320 : default: \
321 : __put_user_bad(); \
322 : } \
323 : } while (0)
324 :
325 : #ifdef CONFIG_X86_32
326 : #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
327 : #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
328 : #else
329 : #define __get_user_asm_u64(x, ptr, retval, errret) \
330 : __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
331 : #define __get_user_asm_ex_u64(x, ptr) \
332 : __get_user_asm_ex(x, ptr, "q", "", "=r")
333 : #endif
334 :
335 : #define __get_user_size(x, ptr, size, retval, errret) \
336 : do { \
337 : retval = 0; \
338 : __chk_user_ptr(ptr); \
339 : switch (size) { \
340 : case 1: \
341 : __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
342 : break; \
343 : case 2: \
344 : __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
345 : break; \
346 : case 4: \
347 : __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
348 : break; \
349 : case 8: \
350 : __get_user_asm_u64(x, ptr, retval, errret); \
351 : break; \
352 : default: \
353 : (x) = __get_user_bad(); \
354 : } \
355 : } while (0)
356 :
357 : #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
358 : asm volatile(ASM_STAC "\n" \
359 : "1: mov"itype" %2,%"rtype"1\n" \
360 : "2: " ASM_CLAC "\n" \
361 : ".section .fixup,\"ax\"\n" \
362 : "3: mov %3,%0\n" \
363 : " xor"itype" %"rtype"1,%"rtype"1\n" \
364 : " jmp 2b\n" \
365 : ".previous\n" \
366 : _ASM_EXTABLE(1b, 3b) \
367 : : "=r" (err), ltype(x) \
368 : : "m" (__m(addr)), "i" (errret), "0" (err))
369 :
370 : #define __get_user_size_ex(x, ptr, size) \
371 : do { \
372 : __chk_user_ptr(ptr); \
373 : switch (size) { \
374 : case 1: \
375 : __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
376 : break; \
377 : case 2: \
378 : __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
379 : break; \
380 : case 4: \
381 : __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
382 : break; \
383 : case 8: \
384 : __get_user_asm_ex_u64(x, ptr); \
385 : break; \
386 : default: \
387 : (x) = __get_user_bad(); \
388 : } \
389 : } while (0)
390 :
391 : #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
392 : asm volatile("1: mov"itype" %1,%"rtype"0\n" \
393 : "2:\n" \
394 : _ASM_EXTABLE_EX(1b, 2b) \
395 : : ltype(x) : "m" (__m(addr)))
396 :
397 : #define __put_user_nocheck(x, ptr, size) \
398 : ({ \
399 : int __pu_err; \
400 : __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
401 : __pu_err; \
402 : })
403 :
404 : #define __get_user_nocheck(x, ptr, size) \
405 : ({ \
406 : int __gu_err; \
407 : unsigned long __gu_val; \
408 : __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
409 : (x) = (__force __typeof__(*(ptr)))__gu_val; \
410 : __gu_err; \
411 : })
412 :
413 : /* FIXME: this hack is definitely wrong -AK */
414 : struct __large_struct { unsigned long buf[100]; };
415 : #define __m(x) (*(struct __large_struct __user *)(x))
416 :
417 : /*
418 : * Tell gcc we read from memory instead of writing: this is because
419 : * we do not write to any memory gcc knows about, so there are no
420 : * aliasing issues.
421 : */
422 : #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
423 : asm volatile(ASM_STAC "\n" \
424 : "1: mov"itype" %"rtype"1,%2\n" \
425 : "2: " ASM_CLAC "\n" \
426 : ".section .fixup,\"ax\"\n" \
427 : "3: mov %3,%0\n" \
428 : " jmp 2b\n" \
429 : ".previous\n" \
430 : _ASM_EXTABLE(1b, 3b) \
431 : : "=r"(err) \
432 : : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
433 :
434 : #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
435 : asm volatile("1: mov"itype" %"rtype"0,%1\n" \
436 : "2:\n" \
437 : _ASM_EXTABLE_EX(1b, 2b) \
438 : : : ltype(x), "m" (__m(addr)))
439 :
440 : /*
441 : * uaccess_try and catch
442 : */
443 : #define uaccess_try do { \
444 : current_thread_info()->uaccess_err = 0; \
445 : stac(); \
446 : barrier();
447 :
448 : #define uaccess_catch(err) \
449 : clac(); \
450 : (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
451 : } while (0)
452 :
453 : /**
454 : * __get_user: - Get a simple variable from user space, with less checking.
455 : * @x: Variable to store result.
456 : * @ptr: Source address, in user space.
457 : *
458 : * Context: User context only. This function may sleep.
459 : *
460 : * This macro copies a single simple variable from user space to kernel
461 : * space. It supports simple types like char and int, but not larger
462 : * data types like structures or arrays.
463 : *
464 : * @ptr must have pointer-to-simple-variable type, and the result of
465 : * dereferencing @ptr must be assignable to @x without a cast.
466 : *
467 : * Caller must check the pointer with access_ok() before calling this
468 : * function.
469 : *
470 : * Returns zero on success, or -EFAULT on error.
471 : * On error, the variable @x is set to zero.
472 : */
473 :
474 : #define __get_user(x, ptr) \
475 : __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
476 :
477 : /**
478 : * __put_user: - Write a simple value into user space, with less checking.
479 : * @x: Value to copy to user space.
480 : * @ptr: Destination address, in user space.
481 : *
482 : * Context: User context only. This function may sleep.
483 : *
484 : * This macro copies a single simple value from kernel space to user
485 : * space. It supports simple types like char and int, but not larger
486 : * data types like structures or arrays.
487 : *
488 : * @ptr must have pointer-to-simple-variable type, and @x must be assignable
489 : * to the result of dereferencing @ptr.
490 : *
491 : * Caller must check the pointer with access_ok() before calling this
492 : * function.
493 : *
494 : * Returns zero on success, or -EFAULT on error.
495 : */
496 :
497 : #define __put_user(x, ptr) \
498 : __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
499 :
500 : #define __get_user_unaligned __get_user
501 : #define __put_user_unaligned __put_user
502 :
503 : /*
504 : * {get|put}_user_try and catch
505 : *
506 : * get_user_try {
507 : * get_user_ex(...);
508 : * } get_user_catch(err)
509 : */
510 : #define get_user_try uaccess_try
511 : #define get_user_catch(err) uaccess_catch(err)
512 :
513 : #define get_user_ex(x, ptr) do { \
514 : unsigned long __gue_val; \
515 : __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
516 : (x) = (__force __typeof__(*(ptr)))__gue_val; \
517 : } while (0)
518 :
519 : #define put_user_try uaccess_try
520 : #define put_user_catch(err) uaccess_catch(err)
521 :
522 : #define put_user_ex(x, ptr) \
523 : __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
524 :
525 : extern unsigned long
526 : copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
527 : extern __must_check long
528 : strncpy_from_user(char *dst, const char __user *src, long count);
529 :
530 : extern __must_check long strlen_user(const char __user *str);
531 : extern __must_check long strnlen_user(const char __user *str, long n);
532 :
533 : unsigned long __must_check clear_user(void __user *mem, unsigned long len);
534 : unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
535 :
536 : extern void __cmpxchg_wrong_size(void)
537 : __compiletime_error("Bad argument size for cmpxchg");
538 :
539 : #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
540 : ({ \
541 : int __ret = 0; \
542 : __typeof__(ptr) __uval = (uval); \
543 : __typeof__(*(ptr)) __old = (old); \
544 : __typeof__(*(ptr)) __new = (new); \
545 : switch (size) { \
546 : case 1: \
547 : { \
548 : asm volatile("\t" ASM_STAC "\n" \
549 : "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
550 : "2:\t" ASM_CLAC "\n" \
551 : "\t.section .fixup, \"ax\"\n" \
552 : "3:\tmov %3, %0\n" \
553 : "\tjmp 2b\n" \
554 : "\t.previous\n" \
555 : _ASM_EXTABLE(1b, 3b) \
556 : : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
557 : : "i" (-EFAULT), "q" (__new), "1" (__old) \
558 : : "memory" \
559 : ); \
560 : break; \
561 : } \
562 : case 2: \
563 : { \
564 : asm volatile("\t" ASM_STAC "\n" \
565 : "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
566 : "2:\t" ASM_CLAC "\n" \
567 : "\t.section .fixup, \"ax\"\n" \
568 : "3:\tmov %3, %0\n" \
569 : "\tjmp 2b\n" \
570 : "\t.previous\n" \
571 : _ASM_EXTABLE(1b, 3b) \
572 : : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
573 : : "i" (-EFAULT), "r" (__new), "1" (__old) \
574 : : "memory" \
575 : ); \
576 : break; \
577 : } \
578 : case 4: \
579 : { \
580 : asm volatile("\t" ASM_STAC "\n" \
581 : "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
582 : "2:\t" ASM_CLAC "\n" \
583 : "\t.section .fixup, \"ax\"\n" \
584 : "3:\tmov %3, %0\n" \
585 : "\tjmp 2b\n" \
586 : "\t.previous\n" \
587 : _ASM_EXTABLE(1b, 3b) \
588 : : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
589 : : "i" (-EFAULT), "r" (__new), "1" (__old) \
590 : : "memory" \
591 : ); \
592 : break; \
593 : } \
594 : case 8: \
595 : { \
596 : if (!IS_ENABLED(CONFIG_X86_64)) \
597 : __cmpxchg_wrong_size(); \
598 : \
599 : asm volatile("\t" ASM_STAC "\n" \
600 : "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
601 : "2:\t" ASM_CLAC "\n" \
602 : "\t.section .fixup, \"ax\"\n" \
603 : "3:\tmov %3, %0\n" \
604 : "\tjmp 2b\n" \
605 : "\t.previous\n" \
606 : _ASM_EXTABLE(1b, 3b) \
607 : : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608 : : "i" (-EFAULT), "r" (__new), "1" (__old) \
609 : : "memory" \
610 : ); \
611 : break; \
612 : } \
613 : default: \
614 : __cmpxchg_wrong_size(); \
615 : } \
616 : *__uval = __old; \
617 : __ret; \
618 : })
619 :
620 : #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
621 : ({ \
622 : access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
623 : __user_atomic_cmpxchg_inatomic((uval), (ptr), \
624 : (old), (new), sizeof(*(ptr))) : \
625 : -EFAULT; \
626 : })
627 :
628 : /*
629 : * movsl can be slow when source and dest are not both 8-byte aligned
630 : */
631 : #ifdef CONFIG_X86_INTEL_USERCOPY
632 : extern struct movsl_mask {
633 : int mask;
634 : } ____cacheline_aligned_in_smp movsl_mask;
635 : #endif
636 :
637 : #define ARCH_HAS_NOCACHE_UACCESS 1
638 :
639 : #ifdef CONFIG_X86_32
640 : # include <asm/uaccess_32.h>
641 : #else
642 : # include <asm/uaccess_64.h>
643 : #endif
644 :
645 : unsigned long __must_check _copy_from_user(void *to, const void __user *from,
646 : unsigned n);
647 : unsigned long __must_check _copy_to_user(void __user *to, const void *from,
648 : unsigned n);
649 :
650 : #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
651 : # define copy_user_diag __compiletime_error
652 : #else
653 : # define copy_user_diag __compiletime_warning
654 : #endif
655 :
656 : extern void copy_user_diag("copy_from_user() buffer size is too small")
657 : copy_from_user_overflow(void);
658 : extern void copy_user_diag("copy_to_user() buffer size is too small")
659 : copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
660 :
661 : #undef copy_user_diag
662 :
663 : #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
664 :
665 : extern void
666 : __compiletime_warning("copy_from_user() buffer size is not provably correct")
667 : __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
668 : #define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
669 :
670 : extern void
671 : __compiletime_warning("copy_to_user() buffer size is not provably correct")
672 : __copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
673 : #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
674 :
675 : #else
676 :
677 : static inline void
678 : __copy_from_user_overflow(int size, unsigned long count)
679 : {
680 : WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
681 : }
682 :
683 : #define __copy_to_user_overflow __copy_from_user_overflow
684 :
685 : #endif
686 :
687 : static inline unsigned long __must_check
688 : copy_from_user(void *to, const void __user *from, unsigned long n)
689 : {
690 : int sz = __compiletime_object_size(to);
691 :
692 : might_fault();
693 :
694 : /*
695 : * While we would like to have the compiler do the checking for us
696 : * even in the non-constant size case, any false positives there are
697 : * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
698 : * without - the [hopefully] dangerous looking nature of the warning
699 : * would make people go look at the respecitive call sites over and
700 : * over again just to find that there's no problem).
701 : *
702 : * And there are cases where it's just not realistic for the compiler
703 : * to prove the count to be in range. For example when multiple call
704 : * sites of a helper function - perhaps in different source files -
705 : * all doing proper range checking, yet the helper function not doing
706 : * so again.
707 : *
708 : * Therefore limit the compile time checking to the constant size
709 : * case, and do only runtime checking for non-constant sizes.
710 : */
711 :
712 : if (likely(sz < 0 || sz >= n))
713 507 : n = _copy_from_user(to, from, n);
714 : else if(__builtin_constant_p(n))
715 : copy_from_user_overflow();
716 : else
717 : __copy_from_user_overflow(sz, n);
718 :
719 : return n;
720 : }
721 :
722 : static inline unsigned long __must_check
723 : copy_to_user(void __user *to, const void *from, unsigned long n)
724 : {
725 : int sz = __compiletime_object_size(from);
726 :
727 : might_fault();
728 :
729 : /* See the comment in copy_from_user() above. */
730 : if (likely(sz < 0 || sz >= n))
731 2334 : n = _copy_to_user(to, from, n);
732 : else if(__builtin_constant_p(n))
733 : copy_to_user_overflow();
734 : else
735 : __copy_to_user_overflow(sz, n);
736 :
737 : return n;
738 : }
739 :
740 : #undef __copy_from_user_overflow
741 : #undef __copy_to_user_overflow
742 :
743 : #endif /* _ASM_X86_UACCESS_H */
744 :
|