分享

Linux/arch/mips/include/asm/uaccess.h

 WUCANADA 2011-12-08

Linux/arch/mips/include/asm/uaccess.h

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8  * Copyright (C) 2007  Maciej W. Rozycki
  9  */
 10 #ifndef _ASM_UACCESS_H
 11 #define _ASM_UACCESS_H
 12 
 13 #include <linux/kernel.h>
 14 #include <linux/errno.h>
 15 #include <linux/thread_info.h>
 16 
 17 /*
 18  * The fs value determines whether argument validity checking should be
 19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
 20  * get_fs() == KERNEL_DS, checking is bypassed.
 21  *
 22  * For historical reasons, these macros are grossly misnamed.
 23  */
 24 #ifdef CONFIG_32BIT
 25 
 26 #define __UA_LIMIT      0x80000000UL
 27 
 28 #define __UA_ADDR       ".word"
 29 #define __UA_LA         "la"
 30 #define __UA_ADDU       "addu"
 31 #define __UA_t0         "$8"
 32 #define __UA_t1         "$9"
 33 
 34 #endif /* CONFIG_32BIT */
 35 
 36 #ifdef CONFIG_64BIT
 37 
 38 extern u64 __ua_limit;
 39 
 40 #define __UA_LIMIT      __ua_limit
 41 
 42 #define __UA_ADDR       ".dword"
 43 #define __UA_LA         "dla"
 44 #define __UA_ADDU       "daddu"
 45 #define __UA_t0         "$12"
 46 #define __UA_t1         "$13"
 47 
 48 #endif /* CONFIG_64BIT */
 49 
 50 /*
 51  * USER_DS is a bitmask that has the bits set that may not be set in a valid
 52  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
 53  * the arithmetic we're doing only works if the limit is a power of two, so
 54  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
 55  * address in this range it's the process's problem, not ours :-)
 56  */
 57 
 58 #define KERNEL_DS       ((mm_segment_t) { 0UL })
 59 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
 60 
 61 #define VERIFY_READ    0
 62 #define VERIFY_WRITE   1
 63 
 64 #define get_ds()        (KERNEL_DS)
 65 #define get_fs()        (current_thread_info()->addr_limit)
 66 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
 67 
 68 #define segment_eq(a, b)        ((a).seg == (b).seg)
 69 
 70 
 71 /*
 72  * Is a address valid? This does a straighforward calculation rather
 73  * than tests.
 74  *
 75  * Address valid if:
 76  *  - "addr" doesn't have any high-bits set
 77  *  - AND "size" doesn't have any high-bits set
 78  *  - AND "addr+size" doesn't have any high-bits set
 79  *  - OR we are in kernel mode.
 80  *
 81  * __ua_size() is a trick to avoid runtime checking of positive constant
 82  * sizes; for those we already know at compile time that the size is ok.
 83  */
 84 #define __ua_size(size)                                                  85         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
 86 
 87 /*
 88  * access_ok: - Checks if a user space pointer is valid
 89  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
 90  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
 91  *        to write to a block, it is always safe to read from it.
 92  * @addr: User space pointer to start of block to check
 93  * @size: Size of block to check
 94  *
 95  * Context: User context only.  This function may sleep.
 96  *
 97  * Checks if a pointer to a block of memory in user space is valid.
 98  *
 99  * Returns true (nonzero) if the memory block may be valid, false (zero)
100  * if it is definitely invalid.
101  *
102  * Note that, depending on architecture, this function probably just
103  * checks that the pointer is in the user space range - after calling
104  * this function, memory access functions may still return -EFAULT.
105  */
106 
107 #define __access_mask get_fs().seg
108 
109 #define __access_ok(addr, size, mask)                                   110 ({                                                                      111         unsigned long __addr = (unsigned long) (addr);                  112         unsigned long __size = size;                                    113         unsigned long __mask = mask;                                    114         unsigned long __ok;                                             115                                                                         116         __chk_user_ptr(addr);                                           117         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     118                 __ua_size(__size)));                                    119         __ok == 0;                                                      120 })
121 
122 #define access_ok(type, addr, size)                                     123         likely(__access_ok((addr), (size), __access_mask))
124 
125 /*
126  * put_user: - Write a simple value into user space.
127  * @x:   Value to copy to user space.
128  * @ptr: Destination address, in user space.
129  *
130  * Context: User context only.  This function may sleep.
131  *
132  * This macro copies a single simple value from kernel space to user
133  * space.  It supports simple types like char and int, but not larger
134  * data types like structures or arrays.
135  *
136  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
137  * to the result of dereferencing @ptr.
138  *
139  * Returns zero on success, or -EFAULT on error.
140  */
141 #define put_user(x,ptr) 142         __put_user_check((x), (ptr), sizeof(*(ptr)))
143 
144 /*
145  * get_user: - Get a simple variable from user space.
146  * @x:   Variable to store result.
147  * @ptr: Source address, in user space.
148  *
149  * Context: User context only.  This function may sleep.
150  *
151  * This macro copies a single simple variable from user space to kernel
152  * space.  It supports simple types like char and int, but not larger
153  * data types like structures or arrays.
154  *
155  * @ptr must have pointer-to-simple-variable type, and the result of
156  * dereferencing @ptr must be assignable to @x without a cast.
157  *
158  * Returns zero on success, or -EFAULT on error.
159  * On error, the variable @x is set to zero.
160  */
161 #define get_user(x,ptr) 162         __get_user_check((x), (ptr), sizeof(*(ptr)))
163 
164 /*
165  * __put_user: - Write a simple value into user space, with less checking.
166  * @x:   Value to copy to user space.
167  * @ptr: Destination address, in user space.
168  *
169  * Context: User context only.  This function may sleep.
170  *
171  * This macro copies a single simple value from kernel space to user
172  * space.  It supports simple types like char and int, but not larger
173  * data types like structures or arrays.
174  *
175  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
176  * to the result of dereferencing @ptr.
177  *
178  * Caller must check the pointer with access_ok() before calling this
179  * function.
180  *
181  * Returns zero on success, or -EFAULT on error.
182  */
183 #define __put_user(x,ptr) 184         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
185 
186 /*
187  * __get_user: - Get a simple variable from user space, with less checking.
188  * @x:   Variable to store result.
189  * @ptr: Source address, in user space.
190  *
191  * Context: User context only.  This function may sleep.
192  *
193  * This macro copies a single simple variable from user space to kernel
194  * space.  It supports simple types like char and int, but not larger
195  * data types like structures or arrays.
196  *
197  * @ptr must have pointer-to-simple-variable type, and the result of
198  * dereferencing @ptr must be assignable to @x without a cast.
199  *
200  * Caller must check the pointer with access_ok() before calling this
201  * function.
202  *
203  * Returns zero on success, or -EFAULT on error.
204  * On error, the variable @x is set to zero.
205  */
206 #define __get_user(x,ptr) 207         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
208 
209 struct __large_struct { unsigned long buf[100]; };
210 #define __m(x) (*(struct __large_struct __user *)(x))
211 
212 /*
213  * Yuck.  We need two variants, one for 64bit operation and one
214  * for 32 bit mode and old iron.
215  */
216 #ifdef CONFIG_32BIT
217 #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
218 #endif
219 #ifdef CONFIG_64BIT
220 #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
221 #endif
222 
223 extern void __get_user_unknown(void);
224 
225 #define __get_user_common(val, size, ptr)                               226 do {                                                                    227         switch (size) {                                                 228         case 1: __get_user_asm(val, "lb", ptr); break;                  229         case 2: __get_user_asm(val, "lh", ptr); break;                  230         case 4: __get_user_asm(val, "lw", ptr); break;                  231         case 8: __GET_USER_DW(val, ptr); break;                         232         default: __get_user_unknown(); break;                           233         }                                                               234 } while (0)
235 
236 #define __get_user_nocheck(x, ptr, size)                                237 ({                                                                      238         int __gu_err;                                                   239                                                                         240         __chk_user_ptr(ptr);                                            241         __get_user_common((x), size, ptr);                              242         __gu_err;                                                       243 })
244 
245 #define __get_user_check(x, ptr, size)                                  246 ({                                                                      247         int __gu_err = -EFAULT;                                         248         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             249                                                                         250         might_fault();                                                  251         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            252                 __get_user_common((x), size, __gu_ptr);                 253                                                                         254         __gu_err;                                                       255 })
256 
257 #define __get_user_asm(val, insn, addr)                                 258 {                                                                       259         long __gu_tmp;                                                  260                                                                         261         __asm__ __volatile__(                                           262         "1:     " insn "        %1, %3                          \n"     263         "2:                                                     \n"     264         "       .section .fixup,\"ax\"                          \n"     265         "3:     li      %0, %4                                  \n"     266         "       j       2b                                      \n"     267         "       .previous                                       \n"     268         "       .section __ex_table,\"a\"                       \n"     269         "       "__UA_ADDR "\t1b, 3b                            \n"     270         "       .previous                                       \n"     271         : "=r" (__gu_err), "=r" (__gu_tmp)                              272         : "" (0), "o" (__m(addr)), "i" (-EFAULT));                     273                                                                         274         (val) = (__typeof__(*(addr))) __gu_tmp;                         275 }
276 
277 /*
278  * Get a long long 64 using 32 bit registers.
279  */
280 #define __get_user_asm_ll32(val, addr)                                  281 {                                                                       282         union {                                                         283                 unsigned long long      l;                              284                 __typeof__(*(addr))     t;                              285         } __gu_tmp;                                                     286                                                                         287         __asm__ __volatile__(                                           288         "1:     lw      %1, (%3)                                \n"     289         "2:     lw      %D1, 4(%3)                              \n"     290         "3:     .section        .fixup,\"ax\"                   \n"     291         "4:     li      %0, %4                                  \n"     292         "       move    %1, $0                                  \n"     293         "       move    %D1, $0                                 \n"     294         "       j       3b                                      \n"     295         "       .previous                                       \n"     296         "       .section        __ex_table,\"a\"                \n"     297         "       " __UA_ADDR "   1b, 4b                          \n"     298         "       " __UA_ADDR "   2b, 4b                          \n"     299         "       .previous                                       \n"     300         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           301         : "" (0), "r" (addr), "i" (-EFAULT));                          302                                                                         303         (val) = __gu_tmp.t;                                             304 }
305 
306 /*
307  * Yuck.  We need two variants, one for 64bit operation and one
308  * for 32 bit mode and old iron.
309  */
310 #ifdef CONFIG_32BIT
311 #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
312 #endif
313 #ifdef CONFIG_64BIT
314 #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
315 #endif
316 
317 #define __put_user_nocheck(x, ptr, size)                                318 ({                                                                      319         __typeof__(*(ptr)) __pu_val;                                    320         int __pu_err = 0;                                               321                                                                         322         __chk_user_ptr(ptr);                                            323         __pu_val = (x);                                                 324         switch (size) {                                                 325         case 1: __put_user_asm("sb", ptr); break;                       326         case 2: __put_user_asm("sh", ptr); break;                       327         case 4: __put_user_asm("sw", ptr); break;                       328         case 8: __PUT_USER_DW(ptr); break;                              329         default: __put_user_unknown(); break;                           330         }                                                               331         __pu_err;                                                       332 })
333 
334 #define __put_user_check(x, ptr, size)                                  335 ({                                                                      336         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   337         __typeof__(*(ptr)) __pu_val = (x);                              338         int __pu_err = -EFAULT;                                         339                                                                         340         might_fault();                                                  341         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        342                 switch (size) {                                         343                 case 1: __put_user_asm("sb", __pu_addr); break;         344                 case 2: __put_user_asm("sh", __pu_addr); break;         345                 case 4: __put_user_asm("sw", __pu_addr); break;         346                 case 8: __PUT_USER_DW(__pu_addr); break;                347                 default: __put_user_unknown(); break;                   348                 }                                                       349         }                                                               350         __pu_err;                                                       351 })
352 
353 #define __put_user_asm(insn, ptr)                                       354 {                                                                       355         __asm__ __volatile__(                                           356         "1:     " insn "        %z2, %3         # __put_user_asm\n"     357         "2:                                                     \n"     358         "       .section        .fixup,\"ax\"                   \n"     359         "3:     li      %0, %4                                  \n"     360         "       j       2b                                      \n"     361         "       .previous                                       \n"     362         "       .section        __ex_table,\"a\"                \n"     363         "       " __UA_ADDR "   1b, 3b                          \n"     364         "       .previous                                       \n"     365         : "=r" (__pu_err)                                               366         : "" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     367           "i" (-EFAULT));                                               368 }
369 
370 #define __put_user_asm_ll32(ptr)                                        371 {                                                                       372         __asm__ __volatile__(                                           373         "1:     sw      %2, (%3)        # __put_user_asm_ll32   \n"     374         "2:     sw      %D2, 4(%3)                              \n"     375         "3:                                                     \n"     376         "       .section        .fixup,\"ax\"                   \n"     377         "4:     li      %0, %4                                  \n"     378         "       j       3b                                      \n"     379         "       .previous                                       \n"     380         "       .section        __ex_table,\"a\"                \n"     381         "       " __UA_ADDR "   1b, 4b                          \n"     382         "       " __UA_ADDR "   2b, 4b                          \n"     383         "       .previous"                                              384         : "=r" (__pu_err)                                               385         : "" (0), "r" (__pu_val), "r" (ptr),                           386           "i" (-EFAULT));                                               387 }
388 
389 extern void __put_user_unknown(void);
390 
391 /*
392  * put_user_unaligned: - Write a simple value into user space.
393  * @x:   Value to copy to user space.
394  * @ptr: Destination address, in user space.
395  *
396  * Context: User context only.  This function may sleep.
397  *
398  * This macro copies a single simple value from kernel space to user
399  * space.  It supports simple types like char and int, but not larger
400  * data types like structures or arrays.
401  *
402  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
403  * to the result of dereferencing @ptr.
404  *
405  * Returns zero on success, or -EFAULT on error.
406  */
407 #define put_user_unaligned(x,ptr)       408         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
409 
410 /*
411  * get_user_unaligned: - Get a simple variable from user space.
412  * @x:   Variable to store result.
413  * @ptr: Source address, in user space.
414  *
415  * Context: User context only.  This function may sleep.
416  *
417  * This macro copies a single simple variable from user space to kernel
418  * space.  It supports simple types like char and int, but not larger
419  * data types like structures or arrays.
420  *
421  * @ptr must have pointer-to-simple-variable type, and the result of
422  * dereferencing @ptr must be assignable to @x without a cast.
423  *
424  * Returns zero on success, or -EFAULT on error.
425  * On error, the variable @x is set to zero.
426  */
427 #define get_user_unaligned(x,ptr) 428         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
429 
430 /*
431  * __put_user_unaligned: - Write a simple value into user space, with less checking.
432  * @x:   Value to copy to user space.
433  * @ptr: Destination address, in user space.
434  *
435  * Context: User context only.  This function may sleep.
436  *
437  * This macro copies a single simple value from kernel space to user
438  * space.  It supports simple types like char and int, but not larger
439  * data types like structures or arrays.
440  *
441  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
442  * to the result of dereferencing @ptr.
443  *
444  * Caller must check the pointer with access_ok() before calling this
445  * function.
446  *
447  * Returns zero on success, or -EFAULT on error.
448  */
449 #define __put_user_unaligned(x,ptr) 450         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
451 
452 /*
453  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
454  * @x:   Variable to store result.
455  * @ptr: Source address, in user space.
456  *
457  * Context: User context only.  This function may sleep.
458  *
459  * This macro copies a single simple variable from user space to kernel
460  * space.  It supports simple types like char and int, but not larger
461  * data types like structures or arrays.
462  *
463  * @ptr must have pointer-to-simple-variable type, and the result of
464  * dereferencing @ptr must be assignable to @x without a cast.
465  *
466  * Caller must check the pointer with access_ok() before calling this
467  * function.
468  *
469  * Returns zero on success, or -EFAULT on error.
470  * On error, the variable @x is set to zero.
471  */
472 #define __get_user_unaligned(x,ptr) 473         __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
474 
475 /*
476  * Yuck.  We need two variants, one for 64bit operation and one
477  * for 32 bit mode and old iron.
478  */
479 #ifdef CONFIG_32BIT
480 #define __GET_USER_UNALIGNED_DW(val, ptr)                               481         __get_user_unaligned_asm_ll32(val, ptr)
482 #endif
483 #ifdef CONFIG_64BIT
484 #define __GET_USER_UNALIGNED_DW(val, ptr)                               485         __get_user_unaligned_asm(val, "uld", ptr)
486 #endif
487 
488 extern void __get_user_unaligned_unknown(void);
489 
490 #define __get_user_unaligned_common(val, size, ptr)                     491 do {                                                                    492         switch (size) {                                                 493         case 1: __get_user_asm(val, "lb", ptr); break;                  494         case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;       495         case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;       496         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               497         default: __get_user_unaligned_unknown(); break;                 498         }                                                               499 } while (0)
500 
501 #define __get_user_unaligned_nocheck(x,ptr,size)                        502 ({                                                                      503         int __gu_err;                                                   504                                                                         505         __get_user_unaligned_common((x), size, ptr);                    506         __gu_err;                                                       507 })
508 
509 #define __get_user_unaligned_check(x,ptr,size)                          510 ({                                                                      511         int __gu_err = -EFAULT;                                         512         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             513                                                                         514         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            515                 __get_user_unaligned_common((x), size, __gu_ptr);       516                                                                         517         __gu_err;                                                       518 })
519 
520 #define __get_user_unaligned_asm(val, insn, addr)                       521 {                                                                       522         long __gu_tmp;                                                  523                                                                         524         __asm__ __volatile__(                                           525         "1:     " insn "        %1, %3                          \n"     526         "2:                                                     \n"     527         "       .section .fixup,\"ax\"                          \n"     528         "3:     li      %0, %4                                  \n"     529         "       j       2b                                      \n"     530         "       .previous                                       \n"     531         "       .section __ex_table,\"a\"                       \n"     532         "       "__UA_ADDR "\t1b, 3b                            \n"     533         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     534         "       .previous                                       \n"     535         : "=r" (__gu_err), "=r" (__gu_tmp)                              536         : "" (0), "o" (__m(addr)), "i" (-EFAULT));                     537                                                                         538         (val) = (__typeof__(*(addr))) __gu_tmp;                         539 }
540 
541 /*
542  * Get a long long 64 using 32 bit registers.
543  */
544 #define __get_user_unaligned_asm_ll32(val, addr)                        545 {                                                                       546         unsigned long long __gu_tmp;                                    547                                                                         548         __asm__ __volatile__(                                           549         "1:     ulw     %1, (%3)                                \n"     550         "2:     ulw     %D1, 4(%3)                              \n"     551         "       move    %0, $0                                  \n"     552         "3:     .section        .fixup,\"ax\"                   \n"     553         "4:     li      %0, %4                                  \n"     554         "       move    %1, $0                                  \n"     555         "       move    %D1, $0                                 \n"     556         "       j       3b                                      \n"     557         "       .previous                                       \n"     558         "       .section        __ex_table,\"a\"                \n"     559         "       " __UA_ADDR "   1b, 4b                          \n"     560         "       " __UA_ADDR "   1b + 4, 4b                      \n"     561         "       " __UA_ADDR "   2b, 4b                          \n"     562         "       " __UA_ADDR "   2b + 4, 4b                      \n"     563         "       .previous                                       \n"     564         : "=r" (__gu_err), "=&r" (__gu_tmp)                             565         : "" (0), "r" (addr), "i" (-EFAULT));                          566         (val) = (__typeof__(*(addr))) __gu_tmp;                         567 }
568 
569 /*
570  * Yuck.  We need two variants, one for 64bit operation and one
571  * for 32 bit mode and old iron.
572  */
573 #ifdef CONFIG_32BIT
574 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
575 #endif
576 #ifdef CONFIG_64BIT
577 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
578 #endif
579 
580 #define __put_user_unaligned_nocheck(x,ptr,size)                        581 ({                                                                      582         __typeof__(*(ptr)) __pu_val;                                    583         int __pu_err = 0;                                               584                                                                         585         __pu_val = (x);                                                 586         switch (size) {                                                 587         case 1: __put_user_asm("sb", ptr); break;                       588         case 2: __put_user_unaligned_asm("ush", ptr); break;            589         case 4: __put_user_unaligned_asm("usw", ptr); break;            590         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    591         default: __put_user_unaligned_unknown(); break;                 592         }                                                               593         __pu_err;                                                       594 })
595 
596 #define __put_user_unaligned_check(x,ptr,size)                          597 ({                                                                      598         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   599         __typeof__(*(ptr)) __pu_val = (x);                              600         int __pu_err = -EFAULT;                                         601                                                                         602         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        603                 switch (size) {                                         604                 case 1: __put_user_asm("sb", __pu_addr); break;         605                 case 2: __put_user_unaligned_asm("ush", __pu_addr); break; 606                 case 4: __put_user_unaligned_asm("usw", __pu_addr); break; 607                 case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break;      608                 default: __put_user_unaligned_unknown(); break;         609                 }                                                       610         }                                                               611         __pu_err;                                                       612 })
613 
614 #define __put_user_unaligned_asm(insn, ptr)                             615 {                                                                       616         __asm__ __volatile__(                                           617         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" 618         "2:                                                     \n"     619         "       .section        .fixup,\"ax\"                   \n"     620         "3:     li      %0, %4                                  \n"     621         "       j       2b                                      \n"     622         "       .previous                                       \n"     623         "       .section        __ex_table,\"a\"                \n"     624         "       " __UA_ADDR "   1b, 3b                          \n"     625         "       .previous                                       \n"     626         : "=r" (__pu_err)                                               627         : "" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     628           "i" (-EFAULT));                                               629 }
630 
631 #define __put_user_unaligned_asm_ll32(ptr)                              632 {                                                                       633         __asm__ __volatile__(                                           634         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" 635         "2:     sw      %D2, 4(%3)                              \n"     636         "3:                                                     \n"     637         "       .section        .fixup,\"ax\"                   \n"     638         "4:     li      %0, %4                                  \n"     639         "       j       3b                                      \n"     640         "       .previous                                       \n"     641         "       .section        __ex_table,\"a\"                \n"     642         "       " __UA_ADDR "   1b, 4b                          \n"     643         "       " __UA_ADDR "   1b + 4, 4b                      \n"     644         "       " __UA_ADDR "   2b, 4b                          \n"     645         "       " __UA_ADDR "   2b + 4, 4b                      \n"     646         "       .previous"                                              647         : "=r" (__pu_err)                                               648         : "" (0), "r" (__pu_val), "r" (ptr),                           649           "i" (-EFAULT));                                               650 }
651 
652 extern void __put_user_unaligned_unknown(void);
653 
654 /*
655  * We're generating jump to subroutines which will be outside the range of
656  * jump instructions
657  */
658 #ifdef MODULE
659 #define __MODULE_JAL(destination)                                       660         ".set\tnoat\n\t"                                                661         __UA_LA "\t$1, " #destination "\n\t"                            662         "jalr\t$1\n\t"                                                  663         ".set\tat\n\t"
664 #else
665 #define __MODULE_JAL(destination)                                       666         "jal\t" #destination "\n\t"
667 #endif
668 
669 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
670 #define DADDI_SCRATCH "$0"
671 #else
672 #define DADDI_SCRATCH "$3"
673 #endif
674 
675 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
676 
677 #define __invoke_copy_to_user(to, from, n)                              678 ({                                                                      679         register void __user *__cu_to_r __asm__("$4");                  680         register const void *__cu_from_r __asm__("$5");                 681         register long __cu_len_r __asm__("$6");                         682                                                                         683         __cu_to_r = (to);                                               684         __cu_from_r = (from);                                           685         __cu_len_r = (n);                                               686         __asm__ __volatile__(                                           687         __MODULE_JAL(__copy_user)                                       688         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       689         :                                                               690         : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",         691           DADDI_SCRATCH, "memory");                                     692         __cu_len_r;                                                     693 })
694 
695 /*
696  * __copy_to_user: - Copy a block of data into user space, with less checking.
697  * @to:   Destination address, in user space.
698  * @from: Source address, in kernel space.
699  * @n:    Number of bytes to copy.
700  *
701  * Context: User context only.  This function may sleep.
702  *
703  * Copy data from kernel space to user space.  Caller must check
704  * the specified block with access_ok() before calling this function.
705  *
706  * Returns number of bytes that could not be copied.
707  * On success, this will be zero.
708  */
709 #define __copy_to_user(to, from, n)                                     710 ({                                                                      711         void __user *__cu_to;                                           712         const void *__cu_from;                                          713         long __cu_len;                                                  714                                                                         715         __cu_to = (to);                                                 716         __cu_from = (from);                                             717         __cu_len = (n);                                                 718         might_fault();                                                  719         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); 720         __cu_len;                                                       721 })
722 
723 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
724 
725 #define __copy_to_user_inatomic(to, from, n)                            726 ({                                                                      727         void __user *__cu_to;                                           728         const void *__cu_from;                                          729         long __cu_len;                                                  730                                                                         731         __cu_to = (to);                                                 732         __cu_from = (from);                                             733         __cu_len = (n);                                                 734         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); 735         __cu_len;                                                       736 })
737 
738 #define __copy_from_user_inatomic(to, from, n)                          739 ({                                                                      740         void *__cu_to;                                                  741         const void __user *__cu_from;                                   742         long __cu_len;                                                  743                                                                         744         __cu_to = (to);                                                 745         __cu_from = (from);                                             746         __cu_len = (n);                                                 747         __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, 748                                                     __cu_len);          749         __cu_len;                                                       750 })
751 
752 /*
753  * copy_to_user: - Copy a block of data into user space.
754  * @to:   Destination address, in user space.
755  * @from: Source address, in kernel space.
756  * @n:    Number of bytes to copy.
757  *
758  * Context: User context only.  This function may sleep.
759  *
760  * Copy data from kernel space to user space.
761  *
762  * Returns number of bytes that could not be copied.
763  * On success, this will be zero.
764  */
765 #define copy_to_user(to, from, n)                                       766 ({                                                                      767         void __user *__cu_to;                                           768         const void *__cu_from;                                          769         long __cu_len;                                                  770                                                                         771         __cu_to = (to);                                                 772         __cu_from = (from);                                             773         __cu_len = (n);                                                 774         if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {               775                 might_fault();                                          776                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    777                                                  __cu_len);             778         }                                                               779         __cu_len;                                                       780 })
781 
782 #define __invoke_copy_from_user(to, from, n)                            783 ({                                                                      784         register void *__cu_to_r __asm__("$4");                         785         register const void __user *__cu_from_r __asm__("$5");          786         register long __cu_len_r __asm__("$6");                         787                                                                         788         __cu_to_r = (to);                                               789         __cu_from_r = (from);                                           790         __cu_len_r = (n);                                               791         __asm__ __volatile__(                                           792         ".set\tnoreorder\n\t"                                           793         __MODULE_JAL(__copy_user)                                       794         ".set\tnoat\n\t"                                                795         __UA_ADDU "\t$1, %1, %2\n\t"                                    796         ".set\tat\n\t"                                                  797         ".set\treorder"                                                 798         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       799         :                                                               800         : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",         801           DADDI_SCRATCH, "memory");                                     802         __cu_len_r;                                                     803 })
804 
805 #define __invoke_copy_from_user_inatomic(to, from, n)                   806 ({                                                                      807         register void *__cu_to_r __asm__("$4");                         808         register const void __user *__cu_from_r __asm__("$5");          809         register long __cu_len_r __asm__("$6");                         810                                                                         811         __cu_to_r = (to);                                               812         __cu_from_r = (from);                                           813         __cu_len_r = (n);                                               814         __asm__ __volatile__(                                           815         ".set\tnoreorder\n\t"                                           816         __MODULE_JAL(__copy_user_inatomic)                              817         ".set\tnoat\n\t"                                                818         __UA_ADDU "\t$1, %1, %2\n\t"                                    819         ".set\tat\n\t"                                                  820         ".set\treorder"                                                 821         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       822         :                                                               823         : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",         824           DADDI_SCRATCH, "memory");                                     825         __cu_len_r;                                                     826 })
827 
828 /*
829  * __copy_from_user: - Copy a block of data from user space, with less checking.
830  * @to:   Destination address, in kernel space.
831  * @from: Source address, in user space.
832  * @n:    Number of bytes to copy.
833  *
834  * Context: User context only.  This function may sleep.
835  *
836  * Copy data from user space to kernel space.  Caller must check
837  * the specified block with access_ok() before calling this function.
838  *
839  * Returns number of bytes that could not be copied.
840  * On success, this will be zero.
841  *
842  * If some data could not be copied, this function will pad the copied
843  * data to the requested size using zero bytes.
844  */
845 #define __copy_from_user(to, from, n)                                   846 ({                                                                      847         void *__cu_to;                                                  848         const void __user *__cu_from;                                   849         long __cu_len;                                                  850                                                                         851         __cu_to = (to);                                                 852         __cu_from = (from);                                             853         __cu_len = (n);                                                 854         might_fault();                                                  855         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          856                                            __cu_len);                   857         __cu_len;                                                       858 })
859 
860 /*
861  * copy_from_user: - Copy a block of data from user space.
862  * @to:   Destination address, in kernel space.
863  * @from: Source address, in user space.
864  * @n:    Number of bytes to copy.
865  *
866  * Context: User context only.  This function may sleep.
867  *
868  * Copy data from user space to kernel space.
869  *
870  * Returns number of bytes that could not be copied.
871  * On success, this will be zero.
872  *
873  * If some data could not be copied, this function will pad the copied
874  * data to the requested size using zero bytes.
875  */
876 #define copy_from_user(to, from, n)                                     877 ({                                                                      878         void *__cu_to;                                                  879         const void __user *__cu_from;                                   880         long __cu_len;                                                  881                                                                         882         __cu_to = (to);                                                 883         __cu_from = (from);                                             884         __cu_len = (n);                                                 885         if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {              886                 might_fault();                                          887                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  888                                                    __cu_len);           889         }                                                               890         __cu_len;                                                       891 })
892 
893 #define __copy_in_user(to, from, n)                                     894 ({                                                                      895         void __user *__cu_to;                                           896         const void __user *__cu_from;                                   897         long __cu_len;                                                  898                                                                         899         __cu_to = (to);                                                 900         __cu_from = (from);                                             901         __cu_len = (n);                                                 902         might_fault();                                                  903         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          904                                            __cu_len);                   905         __cu_len;                                                       906 })
907 
908 #define copy_in_user(to, from, n)                                       909 ({                                                                      910         void __user *__cu_to;                                           911         const void __user *__cu_from;                                   912         long __cu_len;                                                  913                                                                         914         __cu_to = (to);                                                 915         __cu_from = (from);                                             916         __cu_len = (n);                                                 917         if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&       918                    access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {       919                 might_fault();                                          920                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  921                                                    __cu_len);           922         }                                                               923         __cu_len;                                                       924 })
925 
926 /*
927  * __clear_user: - Zero a block of memory in user space, with less checking.
928  * @to:   Destination address, in user space.
929  * @n:    Number of bytes to zero.
930  *
931  * Zero a block of memory in user space.  Caller must check
932  * the specified block with access_ok() before calling this function.
933  *
934  * Returns number of bytes that could not be cleared.
935  * On success, this will be zero.
936  */
937 static inline __kernel_size_t
938 __clear_user(void __user *addr, __kernel_size_t size)
939 {
940         __kernel_size_t res;
941 
942         might_fault();
943         __asm__ __volatile__(
944                 "move\t$4, %1\n\t"
945                 "move\t$5, $0\n\t"
946                 "move\t$6, %2\n\t"
947                 __MODULE_JAL(__bzero)
948                 "move\t%0, $6"
949                 : "=r" (res)
950                 : "r" (addr), "r" (size)
951                 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
952 
953         return res;
954 }
955 
956 #define clear_user(addr,n)                                              957 ({                                                                      958         void __user * __cl_addr = (addr);                               959         unsigned long __cl_size = (n);                                  960         if (__cl_size && access_ok(VERIFY_WRITE,                        961                                         __cl_addr, __cl_size))          962                 __cl_size = __clear_user(__cl_addr, __cl_size);         963         __cl_size;                                                      964 })
965 
966 /*
967  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
968  * @dst:   Destination address, in kernel space.  This buffer must be at
969  *         least @count bytes long.
970  * @src:   Source address, in user space.
971  * @count: Maximum number of bytes to copy, including the trailing NUL.
972  *
973  * Copies a NUL-terminated string from userspace to kernel space.
974  * Caller must check the specified block with access_ok() before calling
975  * this function.
976  *
977  * On success, returns the length of the string (not including the trailing
978  * NUL).
979  *
980  * If access to userspace fails, returns -EFAULT (some data may have been
981  * copied).
982  *
983  * If @count is smaller than the length of the string, copies @count bytes
984  * and returns @count.
985  */
986 static inline long
987 __strncpy_from_user(char *__to, const char __user *__from, long __len)
988 {
989         long res;
990 
991         might_fault();
992         __asm__ __volatile__(
993                 "move\t$4, %1\n\t"
994                 "move\t$5, %2\n\t"
995                 "move\t$6, %3\n\t"
996                 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
997                 "move\t%0, $2"
998                 : "=r" (res)
999                 : "r" (__to), "r" (__from), "r" (__len)
1000                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1001 
1002         return res;
1003 }
1004 
1005 /*
1006  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1007  * @dst:   Destination address, in kernel space.  This buffer must be at
1008  *         least @count bytes long.
1009  * @src:   Source address, in user space.
1010  * @count: Maximum number of bytes to copy, including the trailing NUL.
1011  *
1012  * Copies a NUL-terminated string from userspace to kernel space.
1013  *
1014  * On success, returns the length of the string (not including the trailing
1015  * NUL).
1016  *
1017  * If access to userspace fails, returns -EFAULT (some data may have been
1018  * copied).
1019  *
1020  * If @count is smaller than the length of the string, copies @count bytes
1021  * and returns @count.
1022  */
1023 static inline long
1024 strncpy_from_user(char *__to, const char __user *__from, long __len)
1025 {
1026         long res;
1027 
1028         might_fault();
1029         __asm__ __volatile__(
1030                 "move\t$4, %1\n\t"
1031                 "move\t$5, %2\n\t"
1032                 "move\t$6, %3\n\t"
1033                 __MODULE_JAL(__strncpy_from_user_asm)
1034                 "move\t%0, $2"
1035                 : "=r" (res)
1036                 : "r" (__to), "r" (__from), "r" (__len)
1037                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1038 
1039         return res;
1040 }
1041 
1042 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1043 static inline long __strlen_user(const char __user *s)
1044 {
1045         long res;
1046 
1047         might_fault();
1048         __asm__ __volatile__(
1049                 "move\t$4, %1\n\t"
1050                 __MODULE_JAL(__strlen_user_nocheck_asm)
1051                 "move\t%0, $2"
1052                 : "=r" (res)
1053                 : "r" (s)
1054                 : "$2", "$4", __UA_t0, "$31");
1055 
1056         return res;
1057 }
1058 
1059 /*
1060  * strlen_user: - Get the size of a string in user space.
1061  * @str: The string to measure.
1062  *
1063  * Context: User context only.  This function may sleep.
1064  *
1065  * Get the size of a NUL-terminated string in user space.
1066  *
1067  * Returns the size of the string INCLUDING the terminating NUL.
1068  * On exception, returns 0.
1069  *
1070  * If there is a limit on the length of a valid string, you may wish to
1071  * consider using strnlen_user() instead.
1072  */
1073 static inline long strlen_user(const char __user *s)
1074 {
1075         long res;
1076 
1077         might_fault();
1078         __asm__ __volatile__(
1079                 "move\t$4, %1\n\t"
1080                 __MODULE_JAL(__strlen_user_asm)
1081                 "move\t%0, $2"
1082                 : "=r" (res)
1083                 : "r" (s)
1084                 : "$2", "$4", __UA_t0, "$31");
1085 
1086         return res;
1087 }
1088 
1089 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1090 static inline long __strnlen_user(const char __user *s, long n)
1091 {
1092         long res;
1093 
1094         might_fault();
1095         __asm__ __volatile__(
1096                 "move\t$4, %1\n\t"
1097                 "move\t$5, %2\n\t"
1098                 __MODULE_JAL(__strnlen_user_nocheck_asm)
1099                 "move\t%0, $2"
1100                 : "=r" (res)
1101                 : "r" (s), "r" (n)
1102                 : "$2", "$4", "$5", __UA_t0, "$31");
1103 
1104         return res;
1105 }
1106 
1107 /*
1108  * strlen_user: - Get the size of a string in user space.
1109  * @str: The string to measure.
1110  *
1111  * Context: User context only.  This function may sleep.
1112  *
1113  * Get the size of a NUL-terminated string in user space.
1114  *
1115  * Returns the size of the string INCLUDING the terminating NUL.
1116  * On exception, returns 0.
1117  *
1118  * If there is a limit on the length of a valid string, you may wish to
1119  * consider using strnlen_user() instead.
1120  */
1121 static inline long strnlen_user(const char __user *s, long n)
1122 {
1123         long res;
1124 
1125         might_fault();
1126         __asm__ __volatile__(
1127                 "move\t$4, %1\n\t"
1128                 "move\t$5, %2\n\t"
1129                 __MODULE_JAL(__strnlen_user_asm)
1130                 "move\t%0, $2"
1131                 : "=r" (res)
1132                 : "r" (s), "r" (n)
1133                 : "$2", "$4", "$5", __UA_t0, "$31");
1134 
1135         return res;
1136 }
1137 
1138 struct exception_table_entry
1139 {
1140         unsigned long insn;
1141         unsigned long nextinsn;
1142 };
1143 
1144 extern int fixup_exception(struct pt_regs *regs);
1145 
1146 #endif /* _ASM_UACCESS_H */
1147 

This page was automatically generated by LXR 0.3.1.  ?  Linux is a registered trademark of Linus Torvalds

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多