You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
This commit is contained in:
@@ -0,0 +1,55 @@
|
||||
#ifndef _ASM_EXTABLE_H
|
||||
#define _ASM_EXTABLE_H
|
||||
|
||||
/*
|
||||
* About the exception table:
|
||||
*
|
||||
* - insn is a 32-bit pc-relative offset from the faulting insn.
|
||||
* - nextinsn is a 16-bit offset off of the faulting instruction
|
||||
* (not off of the *next* instruction as branches are).
|
||||
* - errreg is the register in which to place -EFAULT.
|
||||
* - valreg is the final target register for the load sequence
|
||||
* and will be zeroed.
|
||||
*
|
||||
* Either errreg or valreg may be $31, in which case nothing happens.
|
||||
*
|
||||
* The exception fixup information "just so happens" to be arranged
|
||||
* as in a MEM format instruction. This lets us emit our three
|
||||
* values like so:
|
||||
*
|
||||
* lda valreg, nextinsn(errreg)
|
||||
*
|
||||
*/
|
||||
|
||||
struct exception_table_entry
|
||||
{
|
||||
signed int insn;
|
||||
union exception_fixup {
|
||||
unsigned unit;
|
||||
struct {
|
||||
signed int nextinsn : 16;
|
||||
unsigned int errreg : 5;
|
||||
unsigned int valreg : 5;
|
||||
} bits;
|
||||
} fixup;
|
||||
};
|
||||
|
||||
/* Returns the new pc */
|
||||
#define fixup_exception(map_reg, _fixup, pc) \
|
||||
({ \
|
||||
if ((_fixup)->fixup.bits.valreg != 31) \
|
||||
map_reg((_fixup)->fixup.bits.valreg) = 0; \
|
||||
if ((_fixup)->fixup.bits.errreg != 31) \
|
||||
map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
|
||||
(pc) + (_fixup)->fixup.bits.nextinsn; \
|
||||
})
|
||||
|
||||
#define ARCH_HAS_RELATIVE_EXTABLE
|
||||
|
||||
#define swap_ex_entry_fixup(a, b, tmp, delta) \
|
||||
do { \
|
||||
(a)->fixup.unit = (b)->fixup.unit; \
|
||||
(b)->fixup.unit = (tmp).fixup.unit; \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
@@ -19,12 +19,8 @@
|
||||
"3: .subsection 2\n" \
|
||||
"4: br 1b\n" \
|
||||
" .previous\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .long 1b-.\n" \
|
||||
" lda $31,3b-1b(%1)\n" \
|
||||
" .long 2b-.\n" \
|
||||
" lda $31,3b-2b(%1)\n" \
|
||||
" .previous\n" \
|
||||
EXC(1b,3b,%1,$31) \
|
||||
EXC(2b,3b,%1,$31) \
|
||||
: "=&r" (oldval), "=&r"(ret) \
|
||||
: "r" (uaddr), "r"(oparg) \
|
||||
: "memory")
|
||||
@@ -101,12 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
"3: .subsection 2\n"
|
||||
"4: br 1b\n"
|
||||
" .previous\n"
|
||||
" .section __ex_table,\"a\"\n"
|
||||
" .long 1b-.\n"
|
||||
" lda $31,3b-1b(%0)\n"
|
||||
" .long 2b-.\n"
|
||||
" lda $31,3b-2b(%0)\n"
|
||||
" .previous\n"
|
||||
EXC(1b,3b,%0,$31)
|
||||
EXC(2b,3b,%0,$31)
|
||||
: "+r"(ret), "=&r"(prev), "=&r"(cmp)
|
||||
: "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
|
||||
: "memory");
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
#ifndef __ALPHA_UACCESS_H
|
||||
#define __ALPHA_UACCESS_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
|
||||
/*
|
||||
* The fs value determines whether argument validity checking should be
|
||||
* performed or not. If get_fs() == USER_DS, checking is performed, with
|
||||
@@ -20,9 +16,6 @@
|
||||
#define KERNEL_DS ((mm_segment_t) { 0UL })
|
||||
#define USER_DS ((mm_segment_t) { -0x40000000000UL })
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
#define get_ds() (KERNEL_DS)
|
||||
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
||||
@@ -39,13 +32,13 @@
|
||||
* - AND "addr+size" doesn't have any high-bits set
|
||||
* - OR we are in kernel mode.
|
||||
*/
|
||||
#define __access_ok(addr, size, segment) \
|
||||
(((segment).seg & (addr | size | (addr+size))) == 0)
|
||||
#define __access_ok(addr, size) \
|
||||
((get_fs().seg & (addr | size | (addr+size))) == 0)
|
||||
|
||||
#define access_ok(type, addr, size) \
|
||||
({ \
|
||||
__chk_user_ptr(addr); \
|
||||
__access_ok(((unsigned long)(addr)), (size), get_fs()); \
|
||||
#define access_ok(type, addr, size) \
|
||||
({ \
|
||||
__chk_user_ptr(addr); \
|
||||
__access_ok(((unsigned long)(addr)), (size)); \
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -61,9 +54,9 @@
|
||||
* (b) require any knowledge of processes at this stage
|
||||
*/
|
||||
#define put_user(x, ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs())
|
||||
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
#define get_user(x, ptr) \
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* The "__xxx" versions do not do address space checking, useful when
|
||||
@@ -81,6 +74,11 @@
|
||||
* more extensive comments with fixup_inline_exception below for
|
||||
* more information.
|
||||
*/
|
||||
#define EXC(label,cont,res,err) \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long "#label"-.\n" \
|
||||
" lda "#res","#cont"-"#label"("#err")\n" \
|
||||
".previous\n"
|
||||
|
||||
extern void __get_user_unknown(void);
|
||||
|
||||
@@ -100,23 +98,23 @@ extern void __get_user_unknown(void);
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_check(x, ptr, size, segment) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT; \
|
||||
unsigned long __gu_val = 0; \
|
||||
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
if (__access_ok((unsigned long)__gu_addr, size, segment)) { \
|
||||
__gu_err = 0; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_8(__gu_addr); break; \
|
||||
case 2: __get_user_16(__gu_addr); break; \
|
||||
case 4: __get_user_32(__gu_addr); break; \
|
||||
case 8: __get_user_64(__gu_addr); break; \
|
||||
default: __get_user_unknown(); break; \
|
||||
} \
|
||||
} \
|
||||
(x) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
#define __get_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT; \
|
||||
unsigned long __gu_val = 0; \
|
||||
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
if (__access_ok((unsigned long)__gu_addr, size)) { \
|
||||
__gu_err = 0; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_8(__gu_addr); break; \
|
||||
case 2: __get_user_16(__gu_addr); break; \
|
||||
case 4: __get_user_32(__gu_addr); break; \
|
||||
case 8: __get_user_64(__gu_addr); break; \
|
||||
default: __get_user_unknown(); break; \
|
||||
} \
|
||||
} \
|
||||
(x) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
struct __large_struct { unsigned long buf[100]; };
|
||||
@@ -125,20 +123,14 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
#define __get_user_64(addr) \
|
||||
__asm__("1: ldq %0,%2\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda %0, 2b-1b(%1)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,%0,%1) \
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "m"(__m(addr)), "1"(__gu_err))
|
||||
|
||||
#define __get_user_32(addr) \
|
||||
__asm__("1: ldl %0,%2\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda %0, 2b-1b(%1)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,%0,%1) \
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "m"(__m(addr)), "1"(__gu_err))
|
||||
|
||||
@@ -148,20 +140,14 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
#define __get_user_16(addr) \
|
||||
__asm__("1: ldwu %0,%2\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda %0, 2b-1b(%1)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,%0,%1) \
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "m"(__m(addr)), "1"(__gu_err))
|
||||
|
||||
#define __get_user_8(addr) \
|
||||
__asm__("1: ldbu %0,%2\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda %0, 2b-1b(%1)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,%0,%1) \
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "m"(__m(addr)), "1"(__gu_err))
|
||||
#else
|
||||
@@ -177,12 +163,8 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
" extwh %1,%3,%1\n" \
|
||||
" or %0,%1,%0\n" \
|
||||
"3:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda %0, 3b-1b(%2)\n" \
|
||||
" .long 2b - .\n" \
|
||||
" lda %0, 3b-2b(%2)\n" \
|
||||
".previous" \
|
||||
EXC(1b,3b,%0,%2) \
|
||||
EXC(2b,3b,%0,%2) \
|
||||
: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
|
||||
: "r"(addr), "2"(__gu_err)); \
|
||||
}
|
||||
@@ -191,10 +173,7 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
__asm__("1: ldq_u %0,0(%2)\n" \
|
||||
" extbl %0,%2,%0\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda %0, 2b-1b(%1)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,%0,%1) \
|
||||
: "=&r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(addr), "1"(__gu_err))
|
||||
#endif
|
||||
@@ -215,21 +194,21 @@ extern void __put_user_unknown(void);
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_check(x, ptr, size, segment) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
if (__access_ok((unsigned long)__pu_addr, size, segment)) { \
|
||||
__pu_err = 0; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_8(x, __pu_addr); break; \
|
||||
case 2: __put_user_16(x, __pu_addr); break; \
|
||||
case 4: __put_user_32(x, __pu_addr); break; \
|
||||
case 8: __put_user_64(x, __pu_addr); break; \
|
||||
default: __put_user_unknown(); break; \
|
||||
} \
|
||||
} \
|
||||
__pu_err; \
|
||||
#define __put_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
if (__access_ok((unsigned long)__pu_addr, size)) { \
|
||||
__pu_err = 0; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_8(x, __pu_addr); break; \
|
||||
case 2: __put_user_16(x, __pu_addr); break; \
|
||||
case 4: __put_user_32(x, __pu_addr); break; \
|
||||
case 8: __put_user_64(x, __pu_addr); break; \
|
||||
default: __put_user_unknown(); break; \
|
||||
} \
|
||||
} \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -240,20 +219,14 @@ extern void __put_user_unknown(void);
|
||||
#define __put_user_64(x, addr) \
|
||||
__asm__ __volatile__("1: stq %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda $31,2b-1b(%0)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,$31,%0) \
|
||||
: "=r"(__pu_err) \
|
||||
: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
|
||||
|
||||
#define __put_user_32(x, addr) \
|
||||
__asm__ __volatile__("1: stl %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda $31,2b-1b(%0)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,$31,%0) \
|
||||
: "=r"(__pu_err) \
|
||||
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
|
||||
|
||||
@@ -263,20 +236,14 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
|
||||
#define __put_user_16(x, addr) \
|
||||
__asm__ __volatile__("1: stw %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda $31,2b-1b(%0)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,$31,%0) \
|
||||
: "=r"(__pu_err) \
|
||||
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
|
||||
|
||||
#define __put_user_8(x, addr) \
|
||||
__asm__ __volatile__("1: stb %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda $31,2b-1b(%0)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,$31,%0) \
|
||||
: "=r"(__pu_err) \
|
||||
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
|
||||
#else
|
||||
@@ -298,16 +265,10 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
|
||||
"3: stq_u %2,1(%5)\n" \
|
||||
"4: stq_u %1,0(%5)\n" \
|
||||
"5:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda $31, 5b-1b(%0)\n" \
|
||||
" .long 2b - .\n" \
|
||||
" lda $31, 5b-2b(%0)\n" \
|
||||
" .long 3b - .\n" \
|
||||
" lda $31, 5b-3b(%0)\n" \
|
||||
" .long 4b - .\n" \
|
||||
" lda $31, 5b-4b(%0)\n" \
|
||||
".previous" \
|
||||
EXC(1b,5b,$31,%0) \
|
||||
EXC(2b,5b,$31,%0) \
|
||||
EXC(3b,5b,$31,%0) \
|
||||
EXC(4b,5b,$31,%0) \
|
||||
: "=r"(__pu_err), "=&r"(__pu_tmp1), \
|
||||
"=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
|
||||
"=&r"(__pu_tmp4) \
|
||||
@@ -324,12 +285,8 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
|
||||
" or %1,%2,%1\n" \
|
||||
"2: stq_u %1,0(%4)\n" \
|
||||
"3:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda $31, 3b-1b(%0)\n" \
|
||||
" .long 2b - .\n" \
|
||||
" lda $31, 3b-2b(%0)\n" \
|
||||
".previous" \
|
||||
EXC(1b,3b,$31,%0) \
|
||||
EXC(2b,3b,$31,%0) \
|
||||
: "=r"(__pu_err), \
|
||||
"=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
|
||||
: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
|
||||
@@ -341,153 +298,37 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
|
||||
* Complex access routines
|
||||
*/
|
||||
|
||||
/* This little bit of silliness is to get the GP loaded for a function
|
||||
that ordinarily wouldn't. Otherwise we could have it done by the macro
|
||||
directly, which can be optimized the linker. */
|
||||
#ifdef MODULE
|
||||
#define __module_address(sym) "r"(sym),
|
||||
#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
|
||||
#else
|
||||
#define __module_address(sym)
|
||||
#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
|
||||
#endif
|
||||
extern long __copy_user(void *to, const void *from, long len);
|
||||
|
||||
extern void __copy_user(void);
|
||||
|
||||
extern inline long
|
||||
__copy_tofrom_user_nocheck(void *to, const void *from, long len)
|
||||
static inline unsigned long
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long len)
|
||||
{
|
||||
register void * __cu_to __asm__("$6") = to;
|
||||
register const void * __cu_from __asm__("$7") = from;
|
||||
register long __cu_len __asm__("$0") = len;
|
||||
|
||||
__asm__ __volatile__(
|
||||
__module_call(28, 3, __copy_user)
|
||||
: "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
|
||||
: __module_address(__copy_user)
|
||||
"0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
|
||||
: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
|
||||
|
||||
return __cu_len;
|
||||
return __copy_user(to, (__force const void *)from, len);
|
||||
}
|
||||
|
||||
#define __copy_to_user(to, from, n) \
|
||||
({ \
|
||||
__chk_user_ptr(to); \
|
||||
__copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \
|
||||
})
|
||||
#define __copy_from_user(to, from, n) \
|
||||
({ \
|
||||
__chk_user_ptr(from); \
|
||||
__copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \
|
||||
})
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
extern inline long
|
||||
copy_to_user(void __user *to, const void *from, long n)
|
||||
static inline unsigned long
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long len)
|
||||
{
|
||||
if (likely(__access_ok((unsigned long)to, n, get_fs())))
|
||||
n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
|
||||
return n;
|
||||
return __copy_user((__force void *)to, from, len);
|
||||
}
|
||||
|
||||
extern inline long
|
||||
copy_from_user(void *to, const void __user *from, long n)
|
||||
{
|
||||
long res = n;
|
||||
if (likely(__access_ok((unsigned long)from, n, get_fs())))
|
||||
res = __copy_from_user_inatomic(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
extern void __do_clear_user(void);
|
||||
|
||||
extern inline long
|
||||
__clear_user(void __user *to, long len)
|
||||
{
|
||||
register void __user * __cl_to __asm__("$6") = to;
|
||||
register long __cl_len __asm__("$0") = len;
|
||||
__asm__ __volatile__(
|
||||
__module_call(28, 2, __do_clear_user)
|
||||
: "=r"(__cl_len), "=r"(__cl_to)
|
||||
: __module_address(__do_clear_user)
|
||||
"0"(__cl_len), "1"(__cl_to)
|
||||
: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
|
||||
return __cl_len;
|
||||
}
|
||||
extern long __clear_user(void __user *to, long len);
|
||||
|
||||
extern inline long
|
||||
clear_user(void __user *to, long len)
|
||||
{
|
||||
if (__access_ok((unsigned long)to, len, get_fs()))
|
||||
if (__access_ok((unsigned long)to, len))
|
||||
len = __clear_user(to, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
#undef __module_address
|
||||
#undef __module_call
|
||||
|
||||
#define user_addr_max() \
|
||||
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
|
||||
(uaccess_kernel() ? ~0UL : TASK_SIZE)
|
||||
|
||||
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
extern __must_check long strlen_user(const char __user *str);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
/*
|
||||
* About the exception table:
|
||||
*
|
||||
* - insn is a 32-bit pc-relative offset from the faulting insn.
|
||||
* - nextinsn is a 16-bit offset off of the faulting instruction
|
||||
* (not off of the *next* instruction as branches are).
|
||||
* - errreg is the register in which to place -EFAULT.
|
||||
* - valreg is the final target register for the load sequence
|
||||
* and will be zeroed.
|
||||
*
|
||||
* Either errreg or valreg may be $31, in which case nothing happens.
|
||||
*
|
||||
* The exception fixup information "just so happens" to be arranged
|
||||
* as in a MEM format instruction. This lets us emit our three
|
||||
* values like so:
|
||||
*
|
||||
* lda valreg, nextinsn(errreg)
|
||||
*
|
||||
*/
|
||||
|
||||
struct exception_table_entry
|
||||
{
|
||||
signed int insn;
|
||||
union exception_fixup {
|
||||
unsigned unit;
|
||||
struct {
|
||||
signed int nextinsn : 16;
|
||||
unsigned int errreg : 5;
|
||||
unsigned int valreg : 5;
|
||||
} bits;
|
||||
} fixup;
|
||||
};
|
||||
|
||||
/* Returns the new pc */
|
||||
#define fixup_exception(map_reg, _fixup, pc) \
|
||||
({ \
|
||||
if ((_fixup)->fixup.bits.valreg != 31) \
|
||||
map_reg((_fixup)->fixup.bits.valreg) = 0; \
|
||||
if ((_fixup)->fixup.bits.errreg != 31) \
|
||||
map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
|
||||
(pc) + (_fixup)->fixup.bits.nextinsn; \
|
||||
})
|
||||
|
||||
#define ARCH_HAS_RELATIVE_EXTABLE
|
||||
|
||||
#define swap_ex_entry_fixup(a, b, tmp, delta) \
|
||||
do { \
|
||||
(a)->fixup.unit = (b)->fixup.unit; \
|
||||
(b)->fixup.unit = (tmp).fixup.unit; \
|
||||
} while (0)
|
||||
|
||||
#include <asm/extable.h>
|
||||
|
||||
#endif /* __ALPHA_UACCESS_H */
|
||||
|
||||
+42
-110
@@ -482,12 +482,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
||||
" extwl %1,%3,%1\n"
|
||||
" extwh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -502,12 +498,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
||||
" extll %1,%3,%1\n"
|
||||
" extlh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -522,12 +514,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
||||
" extql %1,%3,%1\n"
|
||||
" extqh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -551,16 +539,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
||||
"3: stq_u %2,1(%5)\n"
|
||||
"4: stq_u %1,0(%5)\n"
|
||||
"5:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %2,5b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %1,5b-2b(%0)\n"
|
||||
" .long 3b - .\n"
|
||||
" lda $31,5b-3b(%0)\n"
|
||||
" .long 4b - .\n"
|
||||
" lda $31,5b-4b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,5b,%2,%0)
|
||||
EXC(2b,5b,%1,%0)
|
||||
EXC(3b,5b,$31,%0)
|
||||
EXC(4b,5b,$31,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
|
||||
"=&r"(tmp3), "=&r"(tmp4)
|
||||
: "r"(va), "r"(una_reg(reg)), "0"(0));
|
||||
@@ -581,16 +563,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
||||
"3: stq_u %2,3(%5)\n"
|
||||
"4: stq_u %1,0(%5)\n"
|
||||
"5:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %2,5b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %1,5b-2b(%0)\n"
|
||||
" .long 3b - .\n"
|
||||
" lda $31,5b-3b(%0)\n"
|
||||
" .long 4b - .\n"
|
||||
" lda $31,5b-4b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,5b,%2,%0)
|
||||
EXC(2b,5b,%1,%0)
|
||||
EXC(3b,5b,$31,%0)
|
||||
EXC(4b,5b,$31,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
|
||||
"=&r"(tmp3), "=&r"(tmp4)
|
||||
: "r"(va), "r"(una_reg(reg)), "0"(0));
|
||||
@@ -611,16 +587,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
||||
"3: stq_u %2,7(%5)\n"
|
||||
"4: stq_u %1,0(%5)\n"
|
||||
"5:\n"
|
||||
".section __ex_table,\"a\"\n\t"
|
||||
" .long 1b - .\n"
|
||||
" lda %2,5b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %1,5b-2b(%0)\n"
|
||||
" .long 3b - .\n"
|
||||
" lda $31,5b-3b(%0)\n"
|
||||
" .long 4b - .\n"
|
||||
" lda $31,5b-4b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,5b,%2,%0)
|
||||
EXC(2b,5b,%1,%0)
|
||||
EXC(3b,5b,$31,%0)
|
||||
EXC(4b,5b,$31,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
|
||||
"=&r"(tmp3), "=&r"(tmp4)
|
||||
: "r"(va), "r"(una_reg(reg)), "0"(0));
|
||||
@@ -802,7 +772,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
/* Don't bother reading ds in the access check since we already
|
||||
know that this came from the user. Also rely on the fact that
|
||||
the page at TASK_SIZE is unmapped and so can't be touched anyway. */
|
||||
if (!__access_ok((unsigned long)va, 0, USER_DS))
|
||||
if ((unsigned long)va >= TASK_SIZE)
|
||||
goto give_sigsegv;
|
||||
|
||||
++unaligned[1].count;
|
||||
@@ -835,12 +805,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
" extwl %1,%3,%1\n"
|
||||
" extwh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -855,12 +821,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
" extll %1,%3,%1\n"
|
||||
" extlh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -875,12 +837,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
" extql %1,%3,%1\n"
|
||||
" extqh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -895,12 +853,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
" extll %1,%3,%1\n"
|
||||
" extlh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -915,12 +869,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
" extql %1,%3,%1\n"
|
||||
" extqh %2,%3,%2\n"
|
||||
"3:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %1,3b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %2,3b-2b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,3b,%1,%0)
|
||||
EXC(2b,3b,%2,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
|
||||
: "r"(va), "0"(0));
|
||||
if (error)
|
||||
@@ -944,16 +894,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
"3: stq_u %2,1(%5)\n"
|
||||
"4: stq_u %1,0(%5)\n"
|
||||
"5:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %2,5b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %1,5b-2b(%0)\n"
|
||||
" .long 3b - .\n"
|
||||
" lda $31,5b-3b(%0)\n"
|
||||
" .long 4b - .\n"
|
||||
" lda $31,5b-4b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,5b,%2,%0)
|
||||
EXC(2b,5b,%1,%0)
|
||||
EXC(3b,5b,$31,%0)
|
||||
EXC(4b,5b,$31,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
|
||||
"=&r"(tmp3), "=&r"(tmp4)
|
||||
: "r"(va), "r"(*reg_addr), "0"(0));
|
||||
@@ -978,16 +922,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
"3: stq_u %2,3(%5)\n"
|
||||
"4: stq_u %1,0(%5)\n"
|
||||
"5:\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" lda %2,5b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %1,5b-2b(%0)\n"
|
||||
" .long 3b - .\n"
|
||||
" lda $31,5b-3b(%0)\n"
|
||||
" .long 4b - .\n"
|
||||
" lda $31,5b-4b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,5b,%2,%0)
|
||||
EXC(2b,5b,%1,%0)
|
||||
EXC(3b,5b,$31,%0)
|
||||
EXC(4b,5b,$31,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
|
||||
"=&r"(tmp3), "=&r"(tmp4)
|
||||
: "r"(va), "r"(*reg_addr), "0"(0));
|
||||
@@ -1012,16 +950,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||
"3: stq_u %2,7(%5)\n"
|
||||
"4: stq_u %1,0(%5)\n"
|
||||
"5:\n"
|
||||
".section __ex_table,\"a\"\n\t"
|
||||
" .long 1b - .\n"
|
||||
" lda %2,5b-1b(%0)\n"
|
||||
" .long 2b - .\n"
|
||||
" lda %1,5b-2b(%0)\n"
|
||||
" .long 3b - .\n"
|
||||
" lda $31,5b-3b(%0)\n"
|
||||
" .long 4b - .\n"
|
||||
" lda $31,5b-4b(%0)\n"
|
||||
".previous"
|
||||
EXC(1b,5b,%2,%0)
|
||||
EXC(2b,5b,%1,%0)
|
||||
EXC(3b,5b,$31,%0)
|
||||
EXC(4b,5b,$31,%0)
|
||||
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
|
||||
"=&r"(tmp3), "=&r"(tmp4)
|
||||
: "r"(va), "r"(*reg_addr), "0"(0));
|
||||
@@ -1047,7 +979,7 @@ give_sigsegv:
|
||||
/* We need to replicate some of the logic in mm/fault.c,
|
||||
since we don't have access to the fault code in the
|
||||
exception handling return path. */
|
||||
if (!__access_ok((unsigned long)va, 0, USER_DS))
|
||||
if ((unsigned long)va >= TASK_SIZE)
|
||||
info.si_code = SEGV_ACCERR;
|
||||
else {
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
+26
-40
@@ -8,21 +8,6 @@
|
||||
* right "bytes left to zero" value (and that it is updated only _after_
|
||||
* a successful copy). There is also some rather minor exception setup
|
||||
* stuff.
|
||||
*
|
||||
* NOTE! This is not directly C-callable, because the calling semantics
|
||||
* are different:
|
||||
*
|
||||
* Inputs:
|
||||
* length in $0
|
||||
* destination address in $6
|
||||
* exception pointer in $7
|
||||
* return address in $28 (exceptions expect it there)
|
||||
*
|
||||
* Outputs:
|
||||
* bytes left to copy in $0
|
||||
*
|
||||
* Clobbers:
|
||||
* $1,$2,$3,$4,$5,$6
|
||||
*/
|
||||
#include <asm/export.h>
|
||||
|
||||
@@ -38,62 +23,63 @@
|
||||
.set noreorder
|
||||
.align 4
|
||||
|
||||
.globl __do_clear_user
|
||||
.ent __do_clear_user
|
||||
.frame $30, 0, $28
|
||||
.globl __clear_user
|
||||
.ent __clear_user
|
||||
.frame $30, 0, $26
|
||||
.prologue 0
|
||||
|
||||
$loop:
|
||||
and $1, 3, $4 # e0 :
|
||||
beq $4, 1f # .. e1 :
|
||||
|
||||
0: EX( stq_u $31, 0($6) ) # e0 : zero one word
|
||||
0: EX( stq_u $31, 0($16) ) # e0 : zero one word
|
||||
subq $0, 8, $0 # .. e1 :
|
||||
subq $4, 1, $4 # e0 :
|
||||
addq $6, 8, $6 # .. e1 :
|
||||
addq $16, 8, $16 # .. e1 :
|
||||
bne $4, 0b # e1 :
|
||||
unop # :
|
||||
|
||||
1: bic $1, 3, $1 # e0 :
|
||||
beq $1, $tail # .. e1 :
|
||||
|
||||
2: EX( stq_u $31, 0($6) ) # e0 : zero four words
|
||||
2: EX( stq_u $31, 0($16) ) # e0 : zero four words
|
||||
subq $0, 8, $0 # .. e1 :
|
||||
EX( stq_u $31, 8($6) ) # e0 :
|
||||
EX( stq_u $31, 8($16) ) # e0 :
|
||||
subq $0, 8, $0 # .. e1 :
|
||||
EX( stq_u $31, 16($6) ) # e0 :
|
||||
EX( stq_u $31, 16($16) ) # e0 :
|
||||
subq $0, 8, $0 # .. e1 :
|
||||
EX( stq_u $31, 24($6) ) # e0 :
|
||||
EX( stq_u $31, 24($16) ) # e0 :
|
||||
subq $0, 8, $0 # .. e1 :
|
||||
subq $1, 4, $1 # e0 :
|
||||
addq $6, 32, $6 # .. e1 :
|
||||
addq $16, 32, $16 # .. e1 :
|
||||
bne $1, 2b # e1 :
|
||||
|
||||
$tail:
|
||||
bne $2, 1f # e1 : is there a tail to do?
|
||||
ret $31, ($28), 1 # .. e1 :
|
||||
ret $31, ($26), 1 # .. e1 :
|
||||
|
||||
1: EX( ldq_u $5, 0($6) ) # e0 :
|
||||
1: EX( ldq_u $5, 0($16) ) # e0 :
|
||||
clr $0 # .. e1 :
|
||||
nop # e1 :
|
||||
mskqh $5, $0, $5 # e0 :
|
||||
EX( stq_u $5, 0($6) ) # e0 :
|
||||
ret $31, ($28), 1 # .. e1 :
|
||||
EX( stq_u $5, 0($16) ) # e0 :
|
||||
ret $31, ($26), 1 # .. e1 :
|
||||
|
||||
__do_clear_user:
|
||||
and $6, 7, $4 # e0 : find dest misalignment
|
||||
__clear_user:
|
||||
and $17, $17, $0
|
||||
and $16, 7, $4 # e0 : find dest misalignment
|
||||
beq $0, $zerolength # .. e1 :
|
||||
addq $0, $4, $1 # e0 : bias counter
|
||||
and $1, 7, $2 # e1 : number of bytes in tail
|
||||
srl $1, 3, $1 # e0 :
|
||||
beq $4, $loop # .. e1 :
|
||||
|
||||
EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in
|
||||
EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in
|
||||
beq $1, $oneword # .. e1 : sub-word store?
|
||||
|
||||
mskql $5, $6, $5 # e0 : take care of misaligned head
|
||||
addq $6, 8, $6 # .. e1 :
|
||||
EX( stq_u $5, -8($6) ) # e0 :
|
||||
mskql $5, $16, $5 # e0 : take care of misaligned head
|
||||
addq $16, 8, $16 # .. e1 :
|
||||
EX( stq_u $5, -8($16) ) # e0 :
|
||||
addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment
|
||||
subq $1, 1, $1 # e0 :
|
||||
subq $0, 8, $0 # .. e1 :
|
||||
@@ -101,15 +87,15 @@ __do_clear_user:
|
||||
unop # :
|
||||
|
||||
$oneword:
|
||||
mskql $5, $6, $4 # e0 :
|
||||
mskql $5, $16, $4 # e0 :
|
||||
mskqh $5, $2, $5 # e0 :
|
||||
or $5, $4, $5 # e1 :
|
||||
EX( stq_u $5, 0($6) ) # e0 :
|
||||
EX( stq_u $5, 0($16) ) # e0 :
|
||||
clr $0 # .. e1 :
|
||||
|
||||
$zerolength:
|
||||
$exception:
|
||||
ret $31, ($28), 1 # .. e1 :
|
||||
ret $31, ($26), 1 # .. e1 :
|
||||
|
||||
.end __do_clear_user
|
||||
EXPORT_SYMBOL(__do_clear_user)
|
||||
.end __clear_user
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
|
||||
+34
-48
@@ -9,21 +9,6 @@
|
||||
* contains the right "bytes left to copy" value (and that it is updated
|
||||
* only _after_ a successful copy). There is also some rather minor
|
||||
* exception setup stuff..
|
||||
*
|
||||
* NOTE! This is not directly C-callable, because the calling semantics are
|
||||
* different:
|
||||
*
|
||||
* Inputs:
|
||||
* length in $0
|
||||
* destination address in $6
|
||||
* source address in $7
|
||||
* return address in $28
|
||||
*
|
||||
* Outputs:
|
||||
* bytes left to copy in $0
|
||||
*
|
||||
* Clobbers:
|
||||
* $1,$2,$3,$4,$5,$6,$7
|
||||
*/
|
||||
|
||||
#include <asm/export.h>
|
||||
@@ -49,58 +34,59 @@
|
||||
.ent __copy_user
|
||||
__copy_user:
|
||||
.prologue 0
|
||||
and $6,7,$3
|
||||
and $18,$18,$0
|
||||
and $16,7,$3
|
||||
beq $0,$35
|
||||
beq $3,$36
|
||||
subq $3,8,$3
|
||||
.align 4
|
||||
$37:
|
||||
EXI( ldq_u $1,0($7) )
|
||||
EXO( ldq_u $2,0($6) )
|
||||
extbl $1,$7,$1
|
||||
mskbl $2,$6,$2
|
||||
insbl $1,$6,$1
|
||||
EXI( ldq_u $1,0($17) )
|
||||
EXO( ldq_u $2,0($16) )
|
||||
extbl $1,$17,$1
|
||||
mskbl $2,$16,$2
|
||||
insbl $1,$16,$1
|
||||
addq $3,1,$3
|
||||
bis $1,$2,$1
|
||||
EXO( stq_u $1,0($6) )
|
||||
EXO( stq_u $1,0($16) )
|
||||
subq $0,1,$0
|
||||
addq $6,1,$6
|
||||
addq $7,1,$7
|
||||
addq $16,1,$16
|
||||
addq $17,1,$17
|
||||
beq $0,$41
|
||||
bne $3,$37
|
||||
$36:
|
||||
and $7,7,$1
|
||||
and $17,7,$1
|
||||
bic $0,7,$4
|
||||
beq $1,$43
|
||||
beq $4,$48
|
||||
EXI( ldq_u $3,0($7) )
|
||||
EXI( ldq_u $3,0($17) )
|
||||
.align 4
|
||||
$50:
|
||||
EXI( ldq_u $2,8($7) )
|
||||
EXI( ldq_u $2,8($17) )
|
||||
subq $4,8,$4
|
||||
extql $3,$7,$3
|
||||
extqh $2,$7,$1
|
||||
extql $3,$17,$3
|
||||
extqh $2,$17,$1
|
||||
bis $3,$1,$1
|
||||
EXO( stq $1,0($6) )
|
||||
addq $7,8,$7
|
||||
EXO( stq $1,0($16) )
|
||||
addq $17,8,$17
|
||||
subq $0,8,$0
|
||||
addq $6,8,$6
|
||||
addq $16,8,$16
|
||||
bis $2,$2,$3
|
||||
bne $4,$50
|
||||
$48:
|
||||
beq $0,$41
|
||||
.align 4
|
||||
$57:
|
||||
EXI( ldq_u $1,0($7) )
|
||||
EXO( ldq_u $2,0($6) )
|
||||
extbl $1,$7,$1
|
||||
mskbl $2,$6,$2
|
||||
insbl $1,$6,$1
|
||||
EXI( ldq_u $1,0($17) )
|
||||
EXO( ldq_u $2,0($16) )
|
||||
extbl $1,$17,$1
|
||||
mskbl $2,$16,$2
|
||||
insbl $1,$16,$1
|
||||
bis $1,$2,$1
|
||||
EXO( stq_u $1,0($6) )
|
||||
EXO( stq_u $1,0($16) )
|
||||
subq $0,1,$0
|
||||
addq $6,1,$6
|
||||
addq $7,1,$7
|
||||
addq $16,1,$16
|
||||
addq $17,1,$17
|
||||
bne $0,$57
|
||||
br $31,$41
|
||||
.align 4
|
||||
@@ -108,27 +94,27 @@ $43:
|
||||
beq $4,$65
|
||||
.align 4
|
||||
$66:
|
||||
EXI( ldq $1,0($7) )
|
||||
EXI( ldq $1,0($17) )
|
||||
subq $4,8,$4
|
||||
EXO( stq $1,0($6) )
|
||||
addq $7,8,$7
|
||||
EXO( stq $1,0($16) )
|
||||
addq $17,8,$17
|
||||
subq $0,8,$0
|
||||
addq $6,8,$6
|
||||
addq $16,8,$16
|
||||
bne $4,$66
|
||||
$65:
|
||||
beq $0,$41
|
||||
EXI( ldq $2,0($7) )
|
||||
EXO( ldq $1,0($6) )
|
||||
EXI( ldq $2,0($17) )
|
||||
EXO( ldq $1,0($16) )
|
||||
mskql $2,$0,$2
|
||||
mskqh $1,$0,$1
|
||||
bis $2,$1,$2
|
||||
EXO( stq $2,0($6) )
|
||||
EXO( stq $2,0($16) )
|
||||
bis $31,$31,$0
|
||||
$41:
|
||||
$35:
|
||||
$exitin:
|
||||
$exitout:
|
||||
ret $31,($28),1
|
||||
ret $31,($26),1
|
||||
|
||||
.end __copy_user
|
||||
EXPORT_SYMBOL(__copy_user)
|
||||
|
||||
@@ -45,10 +45,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
|
||||
__asm__ __volatile__( \
|
||||
"1: ldq_u %0,%2\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - .\n" \
|
||||
" lda %0,2b-1b(%1)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,%0,%1) \
|
||||
: "=r"(x), "=r"(__guu_err) \
|
||||
: "m"(__m(ptr)), "1"(0)); \
|
||||
__guu_err; \
|
||||
@@ -60,10 +57,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
|
||||
__asm__ __volatile__( \
|
||||
"1: stq_u %2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
" .long 1b - ." \
|
||||
" lda $31,2b-1b(%0)\n" \
|
||||
".previous" \
|
||||
EXC(1b,2b,$31,%0) \
|
||||
: "=r"(__puu_err) \
|
||||
: "m"(__m(addr)), "rJ"(x), "0"(0)); \
|
||||
__puu_err; \
|
||||
|
||||
@@ -9,21 +9,6 @@
|
||||
* a successful copy). There is also some rather minor exception setup
|
||||
* stuff.
|
||||
*
|
||||
* NOTE! This is not directly C-callable, because the calling semantics
|
||||
* are different:
|
||||
*
|
||||
* Inputs:
|
||||
* length in $0
|
||||
* destination address in $6
|
||||
* exception pointer in $7
|
||||
* return address in $28 (exceptions expect it there)
|
||||
*
|
||||
* Outputs:
|
||||
* bytes left to copy in $0
|
||||
*
|
||||
* Clobbers:
|
||||
* $1,$2,$3,$4,$5,$6
|
||||
*
|
||||
* Much of the information about 21264 scheduling/coding comes from:
|
||||
* Compiler Writer's Guide for the Alpha 21264
|
||||
* abbreviated as 'CWG' in other comments here
|
||||
@@ -56,14 +41,15 @@
|
||||
.set noreorder
|
||||
.align 4
|
||||
|
||||
.globl __do_clear_user
|
||||
.ent __do_clear_user
|
||||
.frame $30, 0, $28
|
||||
.globl __clear_user
|
||||
.ent __clear_user
|
||||
.frame $30, 0, $26
|
||||
.prologue 0
|
||||
|
||||
# Pipeline info : Slotting & Comments
|
||||
__do_clear_user:
|
||||
and $6, 7, $4 # .. E .. .. : find dest head misalignment
|
||||
__clear_user:
|
||||
and $17, $17, $0
|
||||
and $16, 7, $4 # .. E .. .. : find dest head misalignment
|
||||
beq $0, $zerolength # U .. .. .. : U L U L
|
||||
|
||||
addq $0, $4, $1 # .. .. .. E : bias counter
|
||||
@@ -75,14 +61,14 @@ __do_clear_user:
|
||||
|
||||
/*
|
||||
* Head is not aligned. Write (8 - $4) bytes to head of destination
|
||||
* This means $6 is known to be misaligned
|
||||
* This means $16 is known to be misaligned
|
||||
*/
|
||||
EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in
|
||||
EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in
|
||||
beq $1, $onebyte # .. .. U .. : sub-word store?
|
||||
mskql $5, $6, $5 # .. U .. .. : take care of misaligned head
|
||||
addq $6, 8, $6 # E .. .. .. : L U U L
|
||||
mskql $5, $16, $5 # .. U .. .. : take care of misaligned head
|
||||
addq $16, 8, $16 # E .. .. .. : L U U L
|
||||
|
||||
EX( stq_u $5, -8($6) ) # .. .. .. L :
|
||||
EX( stq_u $5, -8($16) ) # .. .. .. L :
|
||||
subq $1, 1, $1 # .. .. E .. :
|
||||
addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment
|
||||
subq $0, 8, $0 # E .. .. .. : U L U L
|
||||
@@ -93,11 +79,11 @@ __do_clear_user:
|
||||
* values upon initial entry to the loop
|
||||
* $1 is number of quadwords to clear (zero is a valid value)
|
||||
* $2 is number of trailing bytes (0..7) ($2 never used...)
|
||||
* $6 is known to be aligned 0mod8
|
||||
* $16 is known to be aligned 0mod8
|
||||
*/
|
||||
$headalign:
|
||||
subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop
|
||||
and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop
|
||||
and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop
|
||||
subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop)
|
||||
blt $4, $trailquad # U .. .. .. : U L U L
|
||||
|
||||
@@ -114,21 +100,21 @@ $headalign:
|
||||
beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64
|
||||
|
||||
$alignmod64:
|
||||
EX( stq_u $31, 0($6) ) # .. .. .. L
|
||||
EX( stq_u $31, 0($16) ) # .. .. .. L
|
||||
addq $3, 8, $3 # .. .. E ..
|
||||
subq $0, 8, $0 # .. E .. ..
|
||||
nop # E .. .. .. : U L U L
|
||||
|
||||
nop # .. .. .. E
|
||||
subq $1, 1, $1 # .. .. E ..
|
||||
addq $6, 8, $6 # .. E .. ..
|
||||
addq $16, 8, $16 # .. E .. ..
|
||||
blt $3, $alignmod64 # U .. .. .. : U L U L
|
||||
|
||||
$bigalign:
|
||||
/*
|
||||
* $0 is the number of bytes left
|
||||
* $1 is the number of quads left
|
||||
* $6 is aligned 0mod64
|
||||
* $16 is aligned 0mod64
|
||||
* we know that we'll be taking a minimum of one trip through
|
||||
* CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
|
||||
* We are _not_ going to update $0 after every single store. That
|
||||
@@ -145,39 +131,39 @@ $bigalign:
|
||||
nop # E :
|
||||
nop # E :
|
||||
nop # E :
|
||||
bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest
|
||||
bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest
|
||||
/* This might actually help for the current trip... */
|
||||
|
||||
$do_wh64:
|
||||
wh64 ($3) # .. .. .. L1 : memory subsystem hint
|
||||
subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop?
|
||||
EX( stq_u $31, 0($6) ) # .. L .. ..
|
||||
EX( stq_u $31, 0($16) ) # .. L .. ..
|
||||
subq $0, 8, $0 # E .. .. .. : U L U L
|
||||
|
||||
addq $6, 128, $3 # E : Target address of wh64
|
||||
EX( stq_u $31, 8($6) ) # L :
|
||||
EX( stq_u $31, 16($6) ) # L :
|
||||
addq $16, 128, $3 # E : Target address of wh64
|
||||
EX( stq_u $31, 8($16) ) # L :
|
||||
EX( stq_u $31, 16($16) ) # L :
|
||||
subq $0, 16, $0 # E : U L L U
|
||||
|
||||
nop # E :
|
||||
EX( stq_u $31, 24($6) ) # L :
|
||||
EX( stq_u $31, 32($6) ) # L :
|
||||
EX( stq_u $31, 24($16) ) # L :
|
||||
EX( stq_u $31, 32($16) ) # L :
|
||||
subq $0, 168, $5 # E : U L L U : two trips through the loop left?
|
||||
/* 168 = 192 - 24, since we've already completed some stores */
|
||||
|
||||
subq $0, 16, $0 # E :
|
||||
EX( stq_u $31, 40($6) ) # L :
|
||||
EX( stq_u $31, 48($6) ) # L :
|
||||
cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle
|
||||
EX( stq_u $31, 40($16) ) # L :
|
||||
EX( stq_u $31, 48($16) ) # L :
|
||||
cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle
|
||||
|
||||
subq $1, 8, $1 # E :
|
||||
subq $0, 16, $0 # E :
|
||||
EX( stq_u $31, 56($6) ) # L :
|
||||
EX( stq_u $31, 56($16) ) # L :
|
||||
nop # E : U L U L
|
||||
|
||||
nop # E :
|
||||
subq $0, 8, $0 # E :
|
||||
addq $6, 64, $6 # E :
|
||||
addq $16, 64, $16 # E :
|
||||
bge $4, $do_wh64 # U : U L U L
|
||||
|
||||
$trailquad:
|
||||
@@ -190,14 +176,14 @@ $trailquad:
|
||||
beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go
|
||||
|
||||
$onequad:
|
||||
EX( stq_u $31, 0($6) ) # .. .. .. L
|
||||
EX( stq_u $31, 0($16) ) # .. .. .. L
|
||||
subq $1, 1, $1 # .. .. E ..
|
||||
subq $0, 8, $0 # .. E .. ..
|
||||
nop # E .. .. .. : U L U L
|
||||
|
||||
nop # .. .. .. E
|
||||
nop # .. .. E ..
|
||||
addq $6, 8, $6 # .. E .. ..
|
||||
addq $16, 8, $16 # .. E .. ..
|
||||
bgt $1, $onequad # U .. .. .. : U L U L
|
||||
|
||||
# We have an unknown number of bytes left to go.
|
||||
@@ -211,9 +197,9 @@ $trailbytes:
|
||||
# so we will use $0 as the loop counter
|
||||
# We know for a fact that $0 > 0 zero due to previous context
|
||||
$onebyte:
|
||||
EX( stb $31, 0($6) ) # .. .. .. L
|
||||
EX( stb $31, 0($16) ) # .. .. .. L
|
||||
subq $0, 1, $0 # .. .. E .. :
|
||||
addq $6, 1, $6 # .. E .. .. :
|
||||
addq $16, 1, $16 # .. E .. .. :
|
||||
bgt $0, $onebyte # U .. .. .. : U L U L
|
||||
|
||||
$zerolength:
|
||||
@@ -221,6 +207,6 @@ $exception: # Destination for exception recovery(?)
|
||||
nop # .. .. .. E :
|
||||
nop # .. .. E .. :
|
||||
nop # .. E .. .. :
|
||||
ret $31, ($28), 1 # L0 .. .. .. : L U L U
|
||||
.end __do_clear_user
|
||||
EXPORT_SYMBOL(__do_clear_user)
|
||||
ret $31, ($26), 1 # L0 .. .. .. : L U L U
|
||||
.end __clear_user
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
|
||||
@@ -12,21 +12,6 @@
|
||||
* only _after_ a successful copy). There is also some rather minor
|
||||
* exception setup stuff..
|
||||
*
|
||||
* NOTE! This is not directly C-callable, because the calling semantics are
|
||||
* different:
|
||||
*
|
||||
* Inputs:
|
||||
* length in $0
|
||||
* destination address in $6
|
||||
* source address in $7
|
||||
* return address in $28
|
||||
*
|
||||
* Outputs:
|
||||
* bytes left to copy in $0
|
||||
*
|
||||
* Clobbers:
|
||||
* $1,$2,$3,$4,$5,$6,$7
|
||||
*
|
||||
* Much of the information about 21264 scheduling/coding comes from:
|
||||
* Compiler Writer's Guide for the Alpha 21264
|
||||
* abbreviated as 'CWG' in other comments here
|
||||
@@ -60,10 +45,11 @@
|
||||
# Pipeline info: Slotting & Comments
|
||||
__copy_user:
|
||||
.prologue 0
|
||||
subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy?
|
||||
andq $18, $18, $0
|
||||
subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy?
|
||||
beq $0, $zerolength # U .. .. .. : U L U L
|
||||
|
||||
and $6,7,$3 # .. .. .. E : is leading dest misalignment
|
||||
and $16,7,$3 # .. .. .. E : is leading dest misalignment
|
||||
ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data
|
||||
beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall)
|
||||
subq $3, 8, $3 # E .. .. .. : L U U L : trip counter
|
||||
@@ -73,17 +59,17 @@ __copy_user:
|
||||
* We know we have at least one trip through this loop
|
||||
*/
|
||||
$aligndest:
|
||||
EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores
|
||||
addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG
|
||||
EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores
|
||||
addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG
|
||||
addq $3,1,$3 # .. E .. .. :
|
||||
nop # E .. .. .. : U L U L
|
||||
|
||||
/*
|
||||
* the -1 is to compensate for the inc($6) done in a previous quadpack
|
||||
* the -1 is to compensate for the inc($16) done in a previous quadpack
|
||||
* which allows us zero dependencies within either quadpack in the loop
|
||||
*/
|
||||
EXO( stb $1,-1($6) ) # .. .. .. L :
|
||||
addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG
|
||||
EXO( stb $1,-1($16) ) # .. .. .. L :
|
||||
addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG
|
||||
subq $0,1,$0 # .. E .. .. :
|
||||
bne $3, $aligndest # U .. .. .. : U L U L
|
||||
|
||||
@@ -92,29 +78,29 @@ $aligndest:
|
||||
* If we arrived via branch, we have a minimum of 32 bytes
|
||||
*/
|
||||
$destaligned:
|
||||
and $7,7,$1 # .. .. .. E : Check _current_ source alignment
|
||||
and $17,7,$1 # .. .. .. E : Check _current_ source alignment
|
||||
bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop
|
||||
EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code
|
||||
EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code
|
||||
beq $1,$quadaligned # U .. .. .. : U L U L
|
||||
|
||||
/*
|
||||
* In the worst case, we've just executed an ldq_u here from 0($7)
|
||||
* In the worst case, we've just executed an ldq_u here from 0($17)
|
||||
* and we'll repeat it once if we take the branch
|
||||
*/
|
||||
|
||||
/* Misaligned quadword loop - not unrolled. Leave it that way. */
|
||||
$misquad:
|
||||
EXI( ldq_u $2,8($7) ) # .. .. .. L :
|
||||
EXI( ldq_u $2,8($17) ) # .. .. .. L :
|
||||
subq $4,8,$4 # .. .. E .. :
|
||||
extql $3,$7,$3 # .. U .. .. :
|
||||
extqh $2,$7,$1 # U .. .. .. : U U L L
|
||||
extql $3,$17,$3 # .. U .. .. :
|
||||
extqh $2,$17,$1 # U .. .. .. : U U L L
|
||||
|
||||
bis $3,$1,$1 # .. .. .. E :
|
||||
EXO( stq $1,0($6) ) # .. .. L .. :
|
||||
addq $7,8,$7 # .. E .. .. :
|
||||
EXO( stq $1,0($16) ) # .. .. L .. :
|
||||
addq $17,8,$17 # .. E .. .. :
|
||||
subq $0,8,$0 # E .. .. .. : U L L U
|
||||
|
||||
addq $6,8,$6 # .. .. .. E :
|
||||
addq $16,8,$16 # .. .. .. E :
|
||||
bis $2,$2,$3 # .. .. E .. :
|
||||
nop # .. E .. .. :
|
||||
bne $4,$misquad # U .. .. .. : U L U L
|
||||
@@ -125,8 +111,8 @@ $misquad:
|
||||
beq $0,$zerolength # U .. .. .. : U L U L
|
||||
|
||||
/* We know we have at least one trip through the byte loop */
|
||||
EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
|
||||
addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
|
||||
EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
|
||||
addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
|
||||
nop # .. E .. .. :
|
||||
br $31, $dirtyentry # L0 .. .. .. : L U U L
|
||||
/* Do the trailing byte loop load, then hop into the store part of the loop */
|
||||
@@ -136,8 +122,8 @@ $misquad:
|
||||
* Based upon the usage context, it's worth the effort to unroll this loop
|
||||
* $0 - number of bytes to be moved
|
||||
* $4 - number of bytes to move as quadwords
|
||||
* $6 is current destination address
|
||||
* $7 is current source address
|
||||
* $16 is current destination address
|
||||
* $17 is current source address
|
||||
*/
|
||||
$quadaligned:
|
||||
subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff
|
||||
@@ -155,29 +141,29 @@ $quadaligned:
|
||||
* instruction memory hint instruction).
|
||||
*/
|
||||
$unroll4:
|
||||
EXI( ldq $1,0($7) ) # .. .. .. L
|
||||
EXI( ldq $2,8($7) ) # .. .. L ..
|
||||
EXI( ldq $1,0($17) ) # .. .. .. L
|
||||
EXI( ldq $2,8($17) ) # .. .. L ..
|
||||
subq $4,32,$4 # .. E .. ..
|
||||
nop # E .. .. .. : U U L L
|
||||
|
||||
addq $7,16,$7 # .. .. .. E
|
||||
EXO( stq $1,0($6) ) # .. .. L ..
|
||||
EXO( stq $2,8($6) ) # .. L .. ..
|
||||
addq $17,16,$17 # .. .. .. E
|
||||
EXO( stq $1,0($16) ) # .. .. L ..
|
||||
EXO( stq $2,8($16) ) # .. L .. ..
|
||||
subq $0,16,$0 # E .. .. .. : U L L U
|
||||
|
||||
addq $6,16,$6 # .. .. .. E
|
||||
EXI( ldq $1,0($7) ) # .. .. L ..
|
||||
EXI( ldq $2,8($7) ) # .. L .. ..
|
||||
addq $16,16,$16 # .. .. .. E
|
||||
EXI( ldq $1,0($17) ) # .. .. L ..
|
||||
EXI( ldq $2,8($17) ) # .. L .. ..
|
||||
subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip?
|
||||
|
||||
EXO( stq $1,0($6) ) # .. .. .. L
|
||||
EXO( stq $2,8($6) ) # .. .. L ..
|
||||
EXO( stq $1,0($16) ) # .. .. .. L
|
||||
EXO( stq $2,8($16) ) # .. .. L ..
|
||||
subq $0,16,$0 # .. E .. ..
|
||||
addq $7,16,$7 # E .. .. .. : U L L U
|
||||
addq $17,16,$17 # E .. .. .. : U L L U
|
||||
|
||||
nop # .. .. .. E
|
||||
nop # .. .. E ..
|
||||
addq $6,16,$6 # .. E .. ..
|
||||
addq $16,16,$16 # .. E .. ..
|
||||
bgt $3,$unroll4 # U .. .. .. : U L U L
|
||||
|
||||
nop
|
||||
@@ -186,14 +172,14 @@ $unroll4:
|
||||
beq $4, $noquads
|
||||
|
||||
$onequad:
|
||||
EXI( ldq $1,0($7) )
|
||||
EXI( ldq $1,0($17) )
|
||||
subq $4,8,$4
|
||||
addq $7,8,$7
|
||||
addq $17,8,$17
|
||||
nop
|
||||
|
||||
EXO( stq $1,0($6) )
|
||||
EXO( stq $1,0($16) )
|
||||
subq $0,8,$0
|
||||
addq $6,8,$6
|
||||
addq $16,8,$16
|
||||
bne $4,$onequad
|
||||
|
||||
$noquads:
|
||||
@@ -207,23 +193,23 @@ $noquads:
|
||||
* There's no point in doing a lot of complex alignment calculations to try to
|
||||
* to quadword stuff for a small amount of data.
|
||||
* $0 - remaining number of bytes left to copy
|
||||
* $6 - current dest addr
|
||||
* $7 - current source addr
|
||||
* $16 - current dest addr
|
||||
* $17 - current source addr
|
||||
*/
|
||||
|
||||
$onebyteloop:
|
||||
EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
|
||||
addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
|
||||
EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
|
||||
addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
|
||||
nop # .. E .. .. :
|
||||
nop # E .. .. .. : U L U L
|
||||
|
||||
$dirtyentry:
|
||||
/*
|
||||
* the -1 is to compensate for the inc($6) done in a previous quadpack
|
||||
* the -1 is to compensate for the inc($16) done in a previous quadpack
|
||||
* which allows us zero dependencies within either quadpack in the loop
|
||||
*/
|
||||
EXO ( stb $2,-1($6) ) # .. .. .. L :
|
||||
addq $7,1,$7 # .. .. E .. : quadpack as the load
|
||||
EXO ( stb $2,-1($16) ) # .. .. .. L :
|
||||
addq $17,1,$17 # .. .. E .. : quadpack as the load
|
||||
subq $0,1,$0 # .. E .. .. : change count _after_ copy
|
||||
bgt $0,$onebyteloop # U .. .. .. : U L U L
|
||||
|
||||
@@ -233,7 +219,7 @@ $exitout: # Destination for exception recovery(?)
|
||||
nop # .. .. .. E
|
||||
nop # .. .. E ..
|
||||
nop # .. E .. ..
|
||||
ret $31,($28),1 # L0 .. .. .. : L U L U
|
||||
ret $31,($26),1 # L0 .. .. .. : L U L U
|
||||
|
||||
.end __copy_user
|
||||
EXPORT_SYMBOL(__copy_user)
|
||||
|
||||
@@ -6,6 +6,7 @@ generic-y += device.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += extable.h
|
||||
generic-y += fb.h
|
||||
generic-y += fcntl.h
|
||||
generic-y += ftrace.h
|
||||
|
||||
@@ -24,12 +24,10 @@
|
||||
#ifndef _ASM_ARC_UACCESS_H
|
||||
#define _ASM_ARC_UACCESS_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <asm/errno.h>
|
||||
#include <linux/string.h> /* for generic string functions */
|
||||
|
||||
|
||||
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
|
||||
#define __kernel_ok (uaccess_kernel())
|
||||
|
||||
/*
|
||||
* Algorithmically, for __user_ok() we want do:
|
||||
@@ -170,7 +168,7 @@
|
||||
|
||||
|
||||
static inline unsigned long
|
||||
__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
long res = 0;
|
||||
char val;
|
||||
@@ -396,11 +394,8 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
return res;
|
||||
}
|
||||
|
||||
extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
|
||||
unsigned long n);
|
||||
|
||||
static inline unsigned long
|
||||
__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
long res = 0;
|
||||
char val;
|
||||
@@ -726,24 +721,20 @@ static inline long __arc_strnlen_user(const char __user *s, long n)
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||
#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
|
||||
#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
|
||||
|
||||
#define INLINE_COPY_TO_USER
|
||||
#define INLINE_COPY_FROM_USER
|
||||
|
||||
#define __clear_user(d, n) __arc_clear_user(d, n)
|
||||
#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
|
||||
#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
|
||||
#else
|
||||
extern long arc_copy_from_user_noinline(void *to, const void __user * from,
|
||||
unsigned long n);
|
||||
extern long arc_copy_to_user_noinline(void __user *to, const void *from,
|
||||
unsigned long n);
|
||||
extern unsigned long arc_clear_user_noinline(void __user *to,
|
||||
unsigned long n);
|
||||
extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
|
||||
long count);
|
||||
extern long arc_strnlen_user_noinline(const char __user *src, long n);
|
||||
|
||||
#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
|
||||
#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
|
||||
#define __clear_user(d, n) arc_clear_user_noinline(d, n)
|
||||
#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
|
||||
#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
|
||||
@@ -752,6 +743,4 @@ extern long arc_strnlen_user_noinline(const char __user *src, long n);
|
||||
|
||||
#include <asm-generic/uaccess.h>
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -28,20 +28,6 @@ int fixup_exception(struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||
|
||||
long arc_copy_from_user_noinline(void *to, const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
return __arc_copy_from_user(to, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL(arc_copy_from_user_noinline);
|
||||
|
||||
long arc_copy_to_user_noinline(void __user *to, const void *from,
|
||||
unsigned long n)
|
||||
{
|
||||
return __arc_copy_to_user(to, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL(arc_copy_to_user_noinline);
|
||||
|
||||
unsigned long arc_clear_user_noinline(void __user *to,
|
||||
unsigned long n)
|
||||
{
|
||||
|
||||
@@ -41,7 +41,6 @@ config ARM
|
||||
select HARDIRQS_SW_RESEND
|
||||
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
|
||||
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
|
||||
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
|
||||
@@ -7,6 +7,7 @@ generic-y += early_ioremap.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += exec.h
|
||||
generic-y += extable.h
|
||||
generic-y += ioctl.h
|
||||
generic-y += ipcbuf.h
|
||||
generic-y += irq_regs.h
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/string.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/domain.h>
|
||||
#include <asm/unified.h>
|
||||
@@ -26,28 +24,7 @@
|
||||
#define __put_user_unaligned __put_user
|
||||
#endif
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
* the address at which the program should continue. No registers are
|
||||
* modified, so it is entirely up to the continuation code to figure out
|
||||
* what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
|
||||
struct exception_table_entry
|
||||
{
|
||||
unsigned long insn, fixup;
|
||||
};
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
#include <asm/extable.h>
|
||||
|
||||
/*
|
||||
* These two functions allow hooking accesses to userspace to increase
|
||||
@@ -271,7 +248,7 @@ static inline void set_fs(mm_segment_t fs)
|
||||
#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
|
||||
|
||||
#define user_addr_max() \
|
||||
(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
|
||||
(uaccess_kernel() ? ~0UL : get_fs())
|
||||
|
||||
/*
|
||||
* The "__xxx" versions of the user access functions do not verify the
|
||||
@@ -478,7 +455,7 @@ extern unsigned long __must_check
|
||||
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned int __ua_flags;
|
||||
|
||||
@@ -494,7 +471,7 @@ extern unsigned long __must_check
|
||||
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||
unsigned int __ua_flags;
|
||||
@@ -522,54 +499,22 @@ __clear_user(void __user *addr, unsigned long n)
|
||||
}
|
||||
|
||||
#else
|
||||
#define __arch_copy_from_user(to, from, n) \
|
||||
(memcpy(to, (void __force *)from, n), 0)
|
||||
#define __arch_copy_to_user(to, from, n) \
|
||||
(memcpy((void __force *)to, from, n), 0)
|
||||
static inline unsigned long
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
memcpy(to, (const void __force *)from, n);
|
||||
return 0;
|
||||
}
|
||||
static inline unsigned long
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
memcpy((void __force *)to, from, n);
|
||||
return 0;
|
||||
}
|
||||
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
||||
#endif
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
check_object_size(to, n, false);
|
||||
return __arch_copy_from_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
|
||||
check_object_size(to, n, false);
|
||||
|
||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||
res = __arch_copy_from_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
check_object_size(from, n, true);
|
||||
|
||||
return __arch_copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
check_object_size(from, n, true);
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __arch_copy_to_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
#define INLINE_COPY_TO_USER
|
||||
#define INLINE_COPY_FROM_USER
|
||||
|
||||
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
|
||||
@@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||
unsigned long ua_flags;
|
||||
int atomic;
|
||||
|
||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||
if (uaccess_kernel()) {
|
||||
memcpy((void *)to, from, n);
|
||||
return 0;
|
||||
}
|
||||
@@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
||||
{
|
||||
unsigned long ua_flags;
|
||||
|
||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||
if (uaccess_kernel()) {
|
||||
memset((void *)addr, 0, n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -60,7 +60,6 @@ config ARM64
|
||||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_BITREVERSE
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_HUGE_VMAP
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
#ifndef __ASM_EXTABLE_H
|
||||
#define __ASM_EXTABLE_H
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of relative offsets: the first
|
||||
* is the relative offset to an instruction that is allowed to fault,
|
||||
* and the second is the relative offset at which the program should
|
||||
* continue. No registers are modified, so it is entirely up to the
|
||||
* continuation code to figure out what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
|
||||
struct exception_table_entry
|
||||
{
|
||||
int insn, fixup;
|
||||
};
|
||||
|
||||
#define ARCH_HAS_RELATIVE_EXTABLE
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
#endif
|
||||
@@ -28,38 +28,12 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kasan-checks.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of relative offsets: the first
|
||||
* is the relative offset to an instruction that is allowed to fault,
|
||||
* and the second is the relative offset at which the program should
|
||||
* continue. No registers are modified, so it is entirely up to the
|
||||
* continuation code to figure out what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
|
||||
struct exception_table_entry
|
||||
{
|
||||
int insn, fixup;
|
||||
};
|
||||
|
||||
#define ARCH_HAS_RELATIVE_EXTABLE
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
#include <asm/extable.h>
|
||||
|
||||
#define KERNEL_DS (-1UL)
|
||||
#define get_ds() (KERNEL_DS)
|
||||
@@ -357,58 +331,13 @@ do { \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_from_user __arch_copy_from_user
|
||||
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_to_user __arch_copy_to_user
|
||||
extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
kasan_check_write(to, n);
|
||||
check_object_size(to, n, false);
|
||||
return __arch_copy_from_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
kasan_check_read(from, n);
|
||||
check_object_size(from, n, true);
|
||||
return __arch_copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
kasan_check_write(to, n);
|
||||
check_object_size(to, n, false);
|
||||
|
||||
if (access_ok(VERIFY_READ, from, n)) {
|
||||
res = __arch_copy_from_user(to, from, n);
|
||||
}
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
kasan_check_read(from, n);
|
||||
check_object_size(from, n, true);
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n)) {
|
||||
n = __arch_copy_to_user(to, from, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
|
||||
n = __copy_in_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
#define INLINE_COPY_TO_USER
|
||||
#define INLINE_COPY_FROM_USER
|
||||
|
||||
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
|
||||
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(clear_page);
|
||||
EXPORT_SYMBOL(__arch_copy_from_user);
|
||||
EXPORT_SYMBOL(__arch_copy_to_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(__copy_in_user);
|
||||
EXPORT_SYMBOL(raw_copy_in_user);
|
||||
|
||||
/* physical memory */
|
||||
EXPORT_SYMBOL(memstart_addr);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user