2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
11 #ifndef _ASM_MICROBLAZE_UACCESS_H
12 #define _ASM_MICROBLAZE_UACCESS_H
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h> /* RLIMIT_FSIZE */
24 #include <asm/pgtable.h>
25 #include <linux/string.h>
28 #define VERIFY_WRITE 1
31 * On Microblaze the fs value is actually the top of the corresponding
34 * The fs value determines whether argument validity checking should be
35 * performed or not. If get_fs() == USER_DS, checking is performed, with
36 * get_fs() == KERNEL_DS, checking is bypassed.
38 * For historical reasons, these macros are grossly misnamed.
40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
42 # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
45 # define KERNEL_DS MAKE_MM_SEG(0)
46 # define USER_DS KERNEL_DS
48 # define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49 # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
52 # define get_ds() (KERNEL_DS)
53 # define get_fs() (current_thread_info()->addr_limit)
54 # define set_fs(val) (current_thread_info()->addr_limit = (val))
56 # define segment_eq(a, b) ((a).seg == (b).seg)
59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
70 struct exception_table_entry {
71 unsigned long insn, fixup;
76 /* Check against bounds of physical memory */
77 static inline int ___range_ok(unsigned long addr, unsigned long size)
79 return ((addr < memory_start) ||
80 ((addr + size) > memory_end));
83 #define __range_ok(addr, size) \
84 ___range_ok((unsigned long)(addr), (unsigned long)(size))
86 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
91 * Address is valid if:
92 * - "addr", "addr + size" and "size" are all below the limit
94 #define access_ok(type, addr, size) \
95 (get_fs().seg > (((unsigned long)(addr)) | \
96 (size) | ((unsigned long)(addr) + (size))))
98 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
99 type?"WRITE":"READ",addr,size,get_fs().seg)) */
104 # define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
105 # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
107 # define __FIXUP_SECTION ".section .discard,\"ax\"\n"
108 # define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
113 /* Undefined function to trigger linker error */
114 extern int bad_user_access_length(void);
116 /* FIXME this is function for optimalization -> memcpy */
117 #define __get_user(var, ptr) \
120 switch (sizeof(*(ptr))) { \
127 memcpy((void *) &(var), (ptr), 8); \
131 __gu_err = __get_user_bad(); \
137 #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
139 /* FIXME is not there defined __pu_val */
140 #define __put_user(var, ptr) \
143 switch (sizeof(*(ptr))) { \
150 typeof(*(ptr)) __pu_val = (var); \
151 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
155 __pu_err = __put_user_bad(); \
161 #define __put_user_bad() (bad_user_access_length(), (-EFAULT))
163 #define put_user(x, ptr) __put_user((x), (ptr))
164 #define get_user(x, ptr) __get_user((x), (ptr))
166 #define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
167 #define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
169 #define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
170 #define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
171 #define __copy_to_user_inatomic(to, from, n) \
172 (__copy_to_user((to), (from), (n)))
173 #define __copy_from_user_inatomic(to, from, n) \
174 (__copy_from_user((to), (from), (n)))
176 #define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
179 static inline unsigned long clear_user(void *addr, unsigned long size)
181 if (access_ok(VERIFY_WRITE, addr, size))
182 size = __clear_user(addr, size);
186 /* Returns 0 if exception not found and fixup otherwise. */
187 extern unsigned long search_exception_table(unsigned long);
189 extern long strncpy_from_user(char *dst, const char *src, long count);
190 extern long strnlen_user(const char *src, long count);
192 #else /* CONFIG_MMU */
195 * All the __XXX versions macros/functions below do not perform
196 * access checking. It is assumed that the necessary checks have been
197 * already performed before the finction (macro) is called.
200 #define get_user(x, ptr) \
202 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
203 ? __get_user((x), (ptr)) : -EFAULT; \
206 #define put_user(x, ptr) \
208 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
209 ? __put_user((x), (ptr)) : -EFAULT; \
212 #define __get_user(x, ptr) \
214 unsigned long __gu_val; \
215 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
217 switch (sizeof(*(ptr))) { \
219 __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
222 __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
225 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
228 __gu_val = 0; __gu_err = -EINVAL; \
230 x = (__typeof__(*(ptr))) __gu_val; \
234 #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
240 .section .fixup,\"ax\"; \
244 .section __ex_table,\"a\"; \
247 : "=r"(__gu_err), "=r"(__gu_val) \
248 : "r"(__gu_ptr), "i"(-EFAULT) \
252 #define __put_user(x, ptr) \
254 __typeof__(*(ptr)) volatile __gu_val = (x); \
256 switch (sizeof(__gu_val)) { \
258 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
261 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
264 __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
267 __put_user_asm_8((ptr), __gu_val, __gu_err); \
270 __gu_err = -EINVAL; \
275 #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
277 __asm__ __volatile__ (" lwi %0, %1, 0; \
283 .section .fixup,\"ax\"; \
287 .section __ex_table,\"a\"; \
292 "r"(__gu_ptr), "i"(-EFAULT) \
296 #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
298 __asm__ __volatile__ ( \
299 "1:" insn " %1, %2, r0; \
302 .section .fixup,\"ax\"; \
306 .section __ex_table,\"a\"; \
310 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
314 /* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
315 static inline unsigned long __must_check __clear_user(void __user *to,
318 /* normal memset with two words to __ex_table */
319 __asm__ __volatile__ ( \
320 "1: sb r0, %2, r0;" \
321 " addik %0, %0, -1;" \
323 " addik %2, %2, 1;" \
334 static inline unsigned long __must_check clear_user(void __user *to,
338 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
341 return __clear_user(to, n);
344 #define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
345 #define __copy_from_user_inatomic(to, from, n) \
346 copy_from_user((to), (from), (n))
348 #define copy_to_user(to, from, n) \
349 (access_ok(VERIFY_WRITE, (to), (n)) ? \
350 __copy_tofrom_user((void __user *)(to), \
351 (__force const void __user *)(from), (n)) \
354 #define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
355 #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
357 #define copy_from_user(to, from, n) \
358 (access_ok(VERIFY_READ, (from), (n)) ? \
359 __copy_tofrom_user((__force void __user *)(to), \
360 (void __user *)(from), (n)) \
363 extern int __strncpy_user(char *to, const char __user *from, int len);
364 extern int __strnlen_user(const char __user *sstr, int len);
366 #define strncpy_from_user(to, from, len) \
367 (access_ok(VERIFY_READ, from, 1) ? \
368 __strncpy_user(to, from, len) : -EFAULT)
369 #define strnlen_user(str, len) \
370 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
372 #endif /* CONFIG_MMU */
374 extern unsigned long __copy_tofrom_user(void __user *to,
375 const void __user *from, unsigned long size);
377 #endif /* __ASSEMBLY__ */
378 #endif /* __KERNEL__ */
380 #endif /* _ASM_MICROBLAZE_UACCESS_H */