summaryrefslogtreecommitdiff
path: root/arch/alpha
diff options
context:
space:
mode:
authorMarcel Ziswiler <marcel.ziswiler@toradex.com>2021-07-07 01:19:43 +0200
committerMarcel Ziswiler <marcel.ziswiler@toradex.com>2021-07-07 01:19:43 +0200
commitd900385139e5aa8d584dee92c87bb85d0226253e (patch)
tree26aa082f242221c535f2d8aa03b0c314c713e8ea /arch/alpha
parent56168452b2a2fa8b4efc664d9fcb08536486a1ba (diff)
parent200ecf5055dfba12b9bff6984830a7cdddee8ab1 (diff)
Merge tag 'v4.4.274' into toradex_vf_4.4-next
Linux 4.4.274
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/uaccess.h76
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c102
-rw-r--r--arch/alpha/kernel/machvec_impl.h6
-rw-r--r--arch/alpha/kernel/setup.c1
-rw-r--r--arch/alpha/lib/Makefile33
-rw-r--r--arch/alpha/lib/callback_srm.S5
-rw-r--r--arch/alpha/lib/checksum.c3
-rw-r--r--arch/alpha/lib/clear_page.S3
-rw-r--r--arch/alpha/lib/clear_user.S66
-rw-r--r--arch/alpha/lib/copy_page.S3
-rw-r--r--arch/alpha/lib/copy_user.S101
-rw-r--r--arch/alpha/lib/csum_ipv6_magic.S2
-rw-r--r--arch/alpha/lib/csum_partial_copy.c2
-rw-r--r--arch/alpha/lib/dec_and_lock.c2
-rw-r--r--arch/alpha/lib/divide.S3
-rw-r--r--arch/alpha/lib/ev6-clear_page.S3
-rw-r--r--arch/alpha/lib/ev6-clear_user.S85
-rw-r--r--arch/alpha/lib/ev6-copy_page.S3
-rw-r--r--arch/alpha/lib/ev6-copy_user.S130
-rw-r--r--arch/alpha/lib/ev6-csum_ipv6_magic.S2
-rw-r--r--arch/alpha/lib/ev6-divide.S3
-rw-r--r--arch/alpha/lib/ev6-memchr.S3
-rw-r--r--arch/alpha/lib/ev6-memcpy.S3
-rw-r--r--arch/alpha/lib/ev6-memset.S7
-rw-r--r--arch/alpha/lib/ev67-strcat.S3
-rw-r--r--arch/alpha/lib/ev67-strchr.S3
-rw-r--r--arch/alpha/lib/ev67-strlen.S3
-rw-r--r--arch/alpha/lib/ev67-strncat.S3
-rw-r--r--arch/alpha/lib/ev67-strrchr.S3
-rw-r--r--arch/alpha/lib/fpreg.c7
-rw-r--r--arch/alpha/lib/memchr.S3
-rw-r--r--arch/alpha/lib/memcpy.c5
-rw-r--r--arch/alpha/lib/memmove.S3
-rw-r--r--arch/alpha/lib/memset.S7
-rw-r--r--arch/alpha/lib/strcat.S2
-rw-r--r--arch/alpha/lib/strchr.S3
-rw-r--r--arch/alpha/lib/strcpy.S3
-rw-r--r--arch/alpha/lib/strlen.S3
-rw-r--r--arch/alpha/lib/strncat.S3
-rw-r--r--arch/alpha/lib/strncpy.S3
-rw-r--r--arch/alpha/lib/strrchr.S3
43 files changed, 274 insertions, 436 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index ffd9cf5ec8c4..bf8475ce85ee 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,6 +3,7 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
+generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index c0ddbbf73400..89413a29cb07 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -341,45 +341,17 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
* Complex access routines
*/
-/* This little bit of silliness is to get the GP loaded for a function
- that ordinarily wouldn't. Otherwise we could have it done by the macro
- directly, which can be optimized the linker. */
-#ifdef MODULE
-#define __module_address(sym) "r"(sym),
-#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
-#else
-#define __module_address(sym)
-#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
-#endif
-
-extern void __copy_user(void);
-
-extern inline long
-__copy_tofrom_user_nocheck(void *to, const void *from, long len)
-{
- register void * __cu_to __asm__("$6") = to;
- register const void * __cu_from __asm__("$7") = from;
- register long __cu_len __asm__("$0") = len;
-
- __asm__ __volatile__(
- __module_call(28, 3, __copy_user)
- : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
- : __module_address(__copy_user)
- "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
- : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
-
- return __cu_len;
-}
+extern long __copy_user(void *to, const void *from, long len);
-#define __copy_to_user(to, from, n) \
-({ \
- __chk_user_ptr(to); \
- __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \
+#define __copy_to_user(to, from, n) \
+({ \
+ __chk_user_ptr(to); \
+ __copy_user((__force void *)(to), (from), (n)); \
})
-#define __copy_from_user(to, from, n) \
-({ \
- __chk_user_ptr(from); \
- __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \
+#define __copy_from_user(to, from, n) \
+({ \
+ __chk_user_ptr(from); \
+ __copy_user((to), (__force void *)(from), (n)); \
})
#define __copy_to_user_inatomic __copy_to_user
@@ -389,35 +361,22 @@ extern inline long
copy_to_user(void __user *to, const void *from, long n)
{
if (likely(__access_ok((unsigned long)to, n, get_fs())))
- n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
+ n = __copy_user((__force void *)to, from, n);
return n;
}
extern inline long
copy_from_user(void *to, const void __user *from, long n)
{
+ long res = n;
if (likely(__access_ok((unsigned long)from, n, get_fs())))
- n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
- else
- memset(to, 0, n);
- return n;
+ res = __copy_from_user_inatomic(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
-extern void __do_clear_user(void);
-
-extern inline long
-__clear_user(void __user *to, long len)
-{
- register void __user * __cl_to __asm__("$6") = to;
- register long __cl_len __asm__("$0") = len;
- __asm__ __volatile__(
- __module_call(28, 2, __do_clear_user)
- : "=r"(__cl_len), "=r"(__cl_to)
- : __module_address(__do_clear_user)
- "0"(__cl_len), "1"(__cl_to)
- : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
- return __cl_len;
-}
+extern long __clear_user(void __user *to, long len);
extern inline long
clear_user(void __user *to, long len)
@@ -427,9 +386,6 @@ clear_user(void __user *to, long len)
return len;
}
-#undef __module_address
-#undef __module_call
-
#define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 3ecac0106c8a..8ce13d7a2ad3 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -8,7 +8,7 @@ ccflags-y := -Wno-sign-compare
obj-y := entry.o traps.o process.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o \
- alpha_ksyms.o systbls.o err_common.o io.o
+ systbls.o err_common.o io.o
obj-$(CONFIG_VGA_HOSE) += console.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
deleted file mode 100644
index f4c7ab6f43b0..000000000000
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * linux/arch/alpha/kernel/alpha_ksyms.c
- *
- * Export the alpha-specific functions that are needed for loadable
- * modules.
- */
-
-#include <linux/module.h>
-#include <asm/console.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/fpu.h>
-#include <asm/machvec.h>
-
-#include <linux/syscalls.h>
-
-/* these are C runtime functions with special calling conventions: */
-extern void __divl (void);
-extern void __reml (void);
-extern void __divq (void);
-extern void __remq (void);
-extern void __divlu (void);
-extern void __remlu (void);
-extern void __divqu (void);
-extern void __remqu (void);
-
-EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(callback_getenv);
-EXPORT_SYMBOL(callback_setenv);
-EXPORT_SYMBOL(callback_save_env);
-
-/* platform dependent support */
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(___memset);
-EXPORT_SYMBOL(__memsetw);
-EXPORT_SYMBOL(__constant_c_memset);
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
-EXPORT_SYMBOL(alpha_read_fp_reg);
-EXPORT_SYMBOL(alpha_read_fp_reg_s);
-EXPORT_SYMBOL(alpha_write_fp_reg);
-EXPORT_SYMBOL(alpha_write_fp_reg_s);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_tcpudp_magic);
-EXPORT_SYMBOL(ip_compute_csum);
-EXPORT_SYMBOL(ip_fast_csum);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_ipv6_magic);
-
-#ifdef CONFIG_MATHEMU_MODULE
-extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
-extern long (*alpha_fp_emul) (unsigned long pc);
-EXPORT_SYMBOL(alpha_fp_emul_imprecise);
-EXPORT_SYMBOL(alpha_fp_emul);
-#endif
-
-/*
- * The following are specially called from the uaccess assembly stubs.
- */
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__do_clear_user);
-
-/*
- * SMP-specific symbols.
- */
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* CONFIG_SMP */
-
-/*
- * The following are special because they're not called
- * explicitly (the C compiler or assembler generates them in
- * response to division operations). Fortunately, their
- * interface isn't gonna change any time soon now, so it's OK
- * to leave it out of version control.
- */
-# undef memcpy
-# undef memset
-EXPORT_SYMBOL(__divl);
-EXPORT_SYMBOL(__divlu);
-EXPORT_SYMBOL(__divq);
-EXPORT_SYMBOL(__divqu);
-EXPORT_SYMBOL(__reml);
-EXPORT_SYMBOL(__remlu);
-EXPORT_SYMBOL(__remq);
-EXPORT_SYMBOL(__remqu);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memchr);
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index f54bdf658cd0..8c6516025efb 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -144,9 +144,11 @@
else beforehand. Fine. We'll do it ourselves. */
#if 0
#define ALIAS_MV(system) \
- struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
+ struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \
+ EXPORT_SYMBOL(alpha_mv);
#else
#define ALIAS_MV(system) \
- asm(".global alpha_mv\nalpha_mv = " #system "_mv");
+ asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \
+ EXPORT_SYMBOL(alpha_mv);
#endif
#endif /* GENERIC */
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index b20af76f12c1..4811e54069fc 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -115,6 +115,7 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
#ifdef CONFIG_ALPHA_GENERIC
struct alpha_machine_vector alpha_mv;
+EXPORT_SYMBOL(alpha_mv);
#endif
#ifndef alpha_using_srm
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 59660743237c..a80815960364 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -20,12 +20,8 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
checksum.o \
csum_partial_copy.o \
$(ev67-y)strlen.o \
- $(ev67-y)strcat.o \
- strcpy.o \
- $(ev67-y)strncat.o \
- strncpy.o \
- $(ev6-y)stxcpy.o \
- $(ev6-y)stxncpy.o \
+ stycpy.o \
+ styncpy.o \
$(ev67-y)strchr.o \
$(ev67-y)strrchr.o \
$(ev6-y)memchr.o \
@@ -46,11 +42,20 @@ AFLAGS___remqu.o = -DREM
AFLAGS___divlu.o = -DDIV -DINTSIZE
AFLAGS___remlu.o = -DREM -DINTSIZE
-$(obj)/__divqu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__remqu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__divlu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__remlu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
+$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
+ $(src)/$(ev6-y)divide.S FORCE
+ $(call if_changed_rule,as_o_S)
+
+# There are direct branches between {str*cpy,str*cat} and stx*cpy.
+# Ensure the branches are within range by merging these objects.
+
+LDFLAGS_stycpy.o := -r
+LDFLAGS_styncpy.o := -r
+
+$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \
+ $(obj)/$(ev6-y)stxcpy.o FORCE
+ $(call if_changed,ld)
+
+$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \
+ $(obj)/$(ev6-y)stxncpy.o FORCE
+ $(call if_changed,ld)
diff --git a/arch/alpha/lib/callback_srm.S b/arch/alpha/lib/callback_srm.S
index 8804bec2c644..6093addc931a 100644
--- a/arch/alpha/lib/callback_srm.S
+++ b/arch/alpha/lib/callback_srm.S
@@ -3,6 +3,7 @@
*/
#include <asm/console.h>
+#include <asm/export.h>
.text
#define HWRPB_CRB_OFFSET 0xc0
@@ -92,6 +93,10 @@ CALLBACK(reset_env, CCB_RESET_ENV, 4)
CALLBACK(save_env, CCB_SAVE_ENV, 1)
CALLBACK(pswitch, CCB_PSWITCH, 3)
CALLBACK(bios_emul, CCB_BIOS_EMUL, 5)
+
+EXPORT_SYMBOL(callback_getenv)
+EXPORT_SYMBOL(callback_setenv)
+EXPORT_SYMBOL(callback_save_env)
.data
__alpha_using_srm: # For use by bootpheader
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index 199f6efa83fa..65197c3c0845 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -50,6 +50,7 @@ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
(__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8));
}
+EXPORT_SYMBOL(csum_tcpudp_magic);
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
@@ -148,6 +149,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return (__force __sum16)~do_csum(iph,ihl*4);
}
+EXPORT_SYMBOL(ip_fast_csum);
/*
* computes the checksum of a memory block at buff, length len,
@@ -182,3 +184,4 @@ __sum16 ip_compute_csum(const void *buff, int len)
{
return (__force __sum16)~from64to16(do_csum(buff,len));
}
+EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/alpha/lib/clear_page.S b/arch/alpha/lib/clear_page.S
index a221ae266e29..263d7393c0e7 100644
--- a/arch/alpha/lib/clear_page.S
+++ b/arch/alpha/lib/clear_page.S
@@ -3,7 +3,7 @@
*
* Zero an entire page.
*/
-
+#include <asm/export.h>
.text
.align 4
.global clear_page
@@ -37,3 +37,4 @@ clear_page:
nop
.end clear_page
+ EXPORT_SYMBOL(clear_page)
diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S
index 8860316c1957..006f469fef73 100644
--- a/arch/alpha/lib/clear_user.S
+++ b/arch/alpha/lib/clear_user.S
@@ -8,22 +8,8 @@
* right "bytes left to zero" value (and that it is updated only _after_
* a successful copy). There is also some rather minor exception setup
* stuff.
- *
- * NOTE! This is not directly C-callable, because the calling semantics
- * are different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * exception pointer in $7
- * return address in $28 (exceptions expect it there)
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6
*/
+#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
@@ -37,62 +23,63 @@
.set noreorder
.align 4
- .globl __do_clear_user
- .ent __do_clear_user
- .frame $30, 0, $28
+ .globl __clear_user
+ .ent __clear_user
+ .frame $30, 0, $26
.prologue 0
$loop:
and $1, 3, $4 # e0 :
beq $4, 1f # .. e1 :
-0: EX( stq_u $31, 0($6) ) # e0 : zero one word
+0: EX( stq_u $31, 0($16) ) # e0 : zero one word
subq $0, 8, $0 # .. e1 :
subq $4, 1, $4 # e0 :
- addq $6, 8, $6 # .. e1 :
+ addq $16, 8, $16 # .. e1 :
bne $4, 0b # e1 :
unop # :
1: bic $1, 3, $1 # e0 :
beq $1, $tail # .. e1 :
-2: EX( stq_u $31, 0($6) ) # e0 : zero four words
+2: EX( stq_u $31, 0($16) ) # e0 : zero four words
subq $0, 8, $0 # .. e1 :
- EX( stq_u $31, 8($6) ) # e0 :
+ EX( stq_u $31, 8($16) ) # e0 :
subq $0, 8, $0 # .. e1 :
- EX( stq_u $31, 16($6) ) # e0 :
+ EX( stq_u $31, 16($16) ) # e0 :
subq $0, 8, $0 # .. e1 :
- EX( stq_u $31, 24($6) ) # e0 :
+ EX( stq_u $31, 24($16) ) # e0 :
subq $0, 8, $0 # .. e1 :
subq $1, 4, $1 # e0 :
- addq $6, 32, $6 # .. e1 :
+ addq $16, 32, $16 # .. e1 :
bne $1, 2b # e1 :
$tail:
bne $2, 1f # e1 : is there a tail to do?
- ret $31, ($28), 1 # .. e1 :
+ ret $31, ($26), 1 # .. e1 :
-1: EX( ldq_u $5, 0($6) ) # e0 :
+1: EX( ldq_u $5, 0($16) ) # e0 :
clr $0 # .. e1 :
nop # e1 :
mskqh $5, $0, $5 # e0 :
- EX( stq_u $5, 0($6) ) # e0 :
- ret $31, ($28), 1 # .. e1 :
+ EX( stq_u $5, 0($16) ) # e0 :
+ ret $31, ($26), 1 # .. e1 :
-__do_clear_user:
- and $6, 7, $4 # e0 : find dest misalignment
+__clear_user:
+ and $17, $17, $0
+ and $16, 7, $4 # e0 : find dest misalignment
beq $0, $zerolength # .. e1 :
addq $0, $4, $1 # e0 : bias counter
and $1, 7, $2 # e1 : number of bytes in tail
srl $1, 3, $1 # e0 :
beq $4, $loop # .. e1 :
- EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in
+ EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in
beq $1, $oneword # .. e1 : sub-word store?
- mskql $5, $6, $5 # e0 : take care of misaligned head
- addq $6, 8, $6 # .. e1 :
- EX( stq_u $5, -8($6) ) # e0 :
+ mskql $5, $16, $5 # e0 : take care of misaligned head
+ addq $16, 8, $16 # .. e1 :
+ EX( stq_u $5, -8($16) ) # e0 :
addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment
subq $1, 1, $1 # e0 :
subq $0, 8, $0 # .. e1 :
@@ -100,14 +87,15 @@ __do_clear_user:
unop # :
$oneword:
- mskql $5, $6, $4 # e0 :
+ mskql $5, $16, $4 # e0 :
mskqh $5, $2, $5 # e0 :
or $5, $4, $5 # e1 :
- EX( stq_u $5, 0($6) ) # e0 :
+ EX( stq_u $5, 0($16) ) # e0 :
clr $0 # .. e1 :
$zerolength:
$exception:
- ret $31, ($28), 1 # .. e1 :
+ ret $31, ($26), 1 # .. e1 :
- .end __do_clear_user
+ .end __clear_user
+ EXPORT_SYMBOL(__clear_user)
diff --git a/arch/alpha/lib/copy_page.S b/arch/alpha/lib/copy_page.S
index 9f3b97459cc6..2ee0bd0508c5 100644
--- a/arch/alpha/lib/copy_page.S
+++ b/arch/alpha/lib/copy_page.S
@@ -3,7 +3,7 @@
*
* Copy an entire page.
*/
-
+#include <asm/export.h>
.text
.align 4
.global copy_page
@@ -47,3 +47,4 @@ copy_page:
nop
.end copy_page
+ EXPORT_SYMBOL(copy_page)
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 6f3fab9eb434..159f1b7e6e49 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -9,23 +9,10 @@
* contains the right "bytes left to copy" value (and that it is updated
* only _after_ a successful copy). There is also some rather minor
* exception setup stuff..
- *
- * NOTE! This is not directly C-callable, because the calling semantics are
- * different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * source address in $7
- * return address in $28
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6,$7
*/
+#include <asm/export.h>
+
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
99: x,##y; \
@@ -47,58 +34,59 @@
.ent __copy_user
__copy_user:
.prologue 0
- and $6,7,$3
+ and $18,$18,$0
+ and $16,7,$3
beq $0,$35
beq $3,$36
subq $3,8,$3
.align 4
$37:
- EXI( ldq_u $1,0($7) )
- EXO( ldq_u $2,0($6) )
- extbl $1,$7,$1
- mskbl $2,$6,$2
- insbl $1,$6,$1
+ EXI( ldq_u $1,0($17) )
+ EXO( ldq_u $2,0($16) )
+ extbl $1,$17,$1
+ mskbl $2,$16,$2
+ insbl $1,$16,$1
addq $3,1,$3
bis $1,$2,$1
- EXO( stq_u $1,0($6) )
+ EXO( stq_u $1,0($16) )
subq $0,1,$0
- addq $6,1,$6
- addq $7,1,$7
+ addq $16,1,$16
+ addq $17,1,$17
beq $0,$41
bne $3,$37
$36:
- and $7,7,$1
+ and $17,7,$1
bic $0,7,$4
beq $1,$43
beq $4,$48
- EXI( ldq_u $3,0($7) )
+ EXI( ldq_u $3,0($17) )
.align 4
$50:
- EXI( ldq_u $2,8($7) )
+ EXI( ldq_u $2,8($17) )
subq $4,8,$4
- extql $3,$7,$3
- extqh $2,$7,$1
+ extql $3,$17,$3
+ extqh $2,$17,$1
bis $3,$1,$1
- EXO( stq $1,0($6) )
- addq $7,8,$7
+ EXO( stq $1,0($16) )
+ addq $17,8,$17
subq $0,8,$0
- addq $6,8,$6
+ addq $16,8,$16
bis $2,$2,$3
bne $4,$50
$48:
beq $0,$41
.align 4
$57:
- EXI( ldq_u $1,0($7) )
- EXO( ldq_u $2,0($6) )
- extbl $1,$7,$1
- mskbl $2,$6,$2
- insbl $1,$6,$1
+ EXI( ldq_u $1,0($17) )
+ EXO( ldq_u $2,0($16) )
+ extbl $1,$17,$1
+ mskbl $2,$16,$2
+ insbl $1,$16,$1
bis $1,$2,$1
- EXO( stq_u $1,0($6) )
+ EXO( stq_u $1,0($16) )
subq $0,1,$0
- addq $6,1,$6
- addq $7,1,$7
+ addq $16,1,$16
+ addq $17,1,$17
bne $0,$57
br $31,$41
.align 4
@@ -106,40 +94,27 @@ $43:
beq $4,$65
.align 4
$66:
- EXI( ldq $1,0($7) )
+ EXI( ldq $1,0($17) )
subq $4,8,$4
- EXO( stq $1,0($6) )
- addq $7,8,$7
+ EXO( stq $1,0($16) )
+ addq $17,8,$17
subq $0,8,$0
- addq $6,8,$6
+ addq $16,8,$16
bne $4,$66
$65:
beq $0,$41
- EXI( ldq $2,0($7) )
- EXO( ldq $1,0($6) )
+ EXI( ldq $2,0($17) )
+ EXO( ldq $1,0($16) )
mskql $2,$0,$2
mskqh $1,$0,$1
bis $2,$1,$2
- EXO( stq $2,0($6) )
+ EXO( stq $2,0($16) )
bis $31,$31,$0
$41:
$35:
-$exitout:
- ret $31,($28),1
-
$exitin:
- /* A stupid byte-by-byte zeroing of the rest of the output
- buffer. This cures security holes by never leaving
- random kernel data around to be copied elsewhere. */
-
- mov $0,$1
-$101:
- EXO ( ldq_u $2,0($6) )
- subq $1,1,$1
- mskbl $2,$6,$2
- EXO ( stq_u $2,0($6) )
- addq $6,1,$6
- bgt $1,$101
- ret $31,($28),1
+$exitout:
+ ret $31,($26),1
.end __copy_user
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/alpha/lib/csum_ipv6_magic.S b/arch/alpha/lib/csum_ipv6_magic.S
index 2c2acb96deb6..e74b4544b0cc 100644
--- a/arch/alpha/lib/csum_ipv6_magic.S
+++ b/arch/alpha/lib/csum_ipv6_magic.S
@@ -12,6 +12,7 @@
* added by Ivan Kokshaysky <ink@jurassic.park.msu.ru>
*/
+#include <asm/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
@@ -113,3 +114,4 @@ csum_ipv6_magic:
ret # .. e1 :
.end csum_ipv6_magic
+ EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 5675dca8dbb1..b4ff3b683bcd 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -374,6 +374,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
}
return (__force __wsum)checksum;
}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
@@ -386,3 +387,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
set_fs(oldfs);
return checksum;
}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
index f9f5fe830e9f..4221b40167ee 100644
--- a/arch/alpha/lib/dec_and_lock.c
+++ b/arch/alpha/lib/dec_and_lock.c
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#include <linux/export.h>
asm (".text \n\
.global _atomic_dec_and_lock \n\
@@ -39,3 +40,4 @@ static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
spin_unlock(lock);
return 0;
}
+EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/alpha/lib/divide.S b/arch/alpha/lib/divide.S
index 2d1a0484a99e..1e33bd127621 100644
--- a/arch/alpha/lib/divide.S
+++ b/arch/alpha/lib/divide.S
@@ -45,6 +45,7 @@
* $28 - compare status
*/
+#include <asm/export.h>
#define halt .long 0
/*
@@ -151,6 +152,7 @@ ufunction:
addq $30,STACK,$30
ret $31,($23),1
.end ufunction
+EXPORT_SYMBOL(ufunction)
/*
* Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -193,3 +195,4 @@ sfunction:
addq $30,STACK,$30
ret $31,($23),1
.end sfunction
+EXPORT_SYMBOL(sfunction)
diff --git a/arch/alpha/lib/ev6-clear_page.S b/arch/alpha/lib/ev6-clear_page.S
index adf4f7be0e2b..abe99e69a194 100644
--- a/arch/alpha/lib/ev6-clear_page.S
+++ b/arch/alpha/lib/ev6-clear_page.S
@@ -3,7 +3,7 @@
*
* Zero an entire page.
*/
-
+#include <asm/export.h>
.text
.align 4
.global clear_page
@@ -52,3 +52,4 @@ clear_page:
nop
.end clear_page
+ EXPORT_SYMBOL(clear_page)
diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S
index 4f42a16b7f53..e179e4757ef8 100644
--- a/arch/alpha/lib/ev6-clear_user.S
+++ b/arch/alpha/lib/ev6-clear_user.S
@@ -9,21 +9,6 @@
* a successful copy). There is also some rather minor exception setup
* stuff.
*
- * NOTE! This is not directly C-callable, because the calling semantics
- * are different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * exception pointer in $7
- * return address in $28 (exceptions expect it there)
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6
- *
* Much of the information about 21264 scheduling/coding comes from:
* Compiler Writer's Guide for the Alpha 21264
* abbreviated as 'CWG' in other comments here
@@ -43,6 +28,7 @@
* want to leave a hole (and we also want to avoid repeating lots of work)
*/
+#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
99: x,##y; \
@@ -55,14 +41,15 @@
.set noreorder
.align 4
- .globl __do_clear_user
- .ent __do_clear_user
- .frame $30, 0, $28
+ .globl __clear_user
+ .ent __clear_user
+ .frame $30, 0, $26
.prologue 0
# Pipeline info : Slotting & Comments
-__do_clear_user:
- and $6, 7, $4 # .. E .. .. : find dest head misalignment
+__clear_user:
+ and $17, $17, $0
+ and $16, 7, $4 # .. E .. .. : find dest head misalignment
beq $0, $zerolength # U .. .. .. : U L U L
addq $0, $4, $1 # .. .. .. E : bias counter
@@ -74,14 +61,14 @@ __do_clear_user:
/*
* Head is not aligned. Write (8 - $4) bytes to head of destination
- * This means $6 is known to be misaligned
+ * This means $16 is known to be misaligned
*/
- EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in
+ EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in
beq $1, $onebyte # .. .. U .. : sub-word store?
- mskql $5, $6, $5 # .. U .. .. : take care of misaligned head
- addq $6, 8, $6 # E .. .. .. : L U U L
+ mskql $5, $16, $5 # .. U .. .. : take care of misaligned head
+ addq $16, 8, $16 # E .. .. .. : L U U L
- EX( stq_u $5, -8($6) ) # .. .. .. L :
+ EX( stq_u $5, -8($16) ) # .. .. .. L :
subq $1, 1, $1 # .. .. E .. :
addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment
subq $0, 8, $0 # E .. .. .. : U L U L
@@ -92,11 +79,11 @@ __do_clear_user:
* values upon initial entry to the loop
* $1 is number of quadwords to clear (zero is a valid value)
* $2 is number of trailing bytes (0..7) ($2 never used...)
- * $6 is known to be aligned 0mod8
+ * $16 is known to be aligned 0mod8
*/
$headalign:
subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop
- and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop
+ and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop
subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop)
blt $4, $trailquad # U .. .. .. : U L U L
@@ -113,21 +100,21 @@ $headalign:
beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64
$alignmod64:
- EX( stq_u $31, 0($6) ) # .. .. .. L
+ EX( stq_u $31, 0($16) ) # .. .. .. L
addq $3, 8, $3 # .. .. E ..
subq $0, 8, $0 # .. E .. ..
nop # E .. .. .. : U L U L
nop # .. .. .. E
subq $1, 1, $1 # .. .. E ..
- addq $6, 8, $6 # .. E .. ..
+ addq $16, 8, $16 # .. E .. ..
blt $3, $alignmod64 # U .. .. .. : U L U L
$bigalign:
/*
* $0 is the number of bytes left
* $1 is the number of quads left
- * $6 is aligned 0mod64
+ * $16 is aligned 0mod64
* we know that we'll be taking a minimum of one trip through
* CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
* We are _not_ going to update $0 after every single store. That
@@ -144,39 +131,39 @@ $bigalign:
nop # E :
nop # E :
nop # E :
- bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest
+ bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest
/* This might actually help for the current trip... */
$do_wh64:
wh64 ($3) # .. .. .. L1 : memory subsystem hint
subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop?
- EX( stq_u $31, 0($6) ) # .. L .. ..
+ EX( stq_u $31, 0($16) ) # .. L .. ..
subq $0, 8, $0 # E .. .. .. : U L U L
- addq $6, 128, $3 # E : Target address of wh64
- EX( stq_u $31, 8($6) ) # L :
- EX( stq_u $31, 16($6) ) # L :
+ addq $16, 128, $3 # E : Target address of wh64
+ EX( stq_u $31, 8($16) ) # L :
+ EX( stq_u $31, 16($16) ) # L :
subq $0, 16, $0 # E : U L L U
nop # E :
- EX( stq_u $31, 24($6) ) # L :
- EX( stq_u $31, 32($6) ) # L :
+ EX( stq_u $31, 24($16) ) # L :
+ EX( stq_u $31, 32($16) ) # L :
subq $0, 168, $5 # E : U L L U : two trips through the loop left?
/* 168 = 192 - 24, since we've already completed some stores */
subq $0, 16, $0 # E :
- EX( stq_u $31, 40($6) ) # L :
- EX( stq_u $31, 48($6) ) # L :
- cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle
+ EX( stq_u $31, 40($16) ) # L :
+ EX( stq_u $31, 48($16) ) # L :
+ cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle
subq $1, 8, $1 # E :
subq $0, 16, $0 # E :
- EX( stq_u $31, 56($6) ) # L :
+ EX( stq_u $31, 56($16) ) # L :
nop # E : U L U L
nop # E :
subq $0, 8, $0 # E :
- addq $6, 64, $6 # E :
+ addq $16, 64, $16 # E :
bge $4, $do_wh64 # U : U L U L
$trailquad:
@@ -189,14 +176,14 @@ $trailquad:
beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go
$onequad:
- EX( stq_u $31, 0($6) ) # .. .. .. L
+ EX( stq_u $31, 0($16) ) # .. .. .. L
subq $1, 1, $1 # .. .. E ..
subq $0, 8, $0 # .. E .. ..
nop # E .. .. .. : U L U L
nop # .. .. .. E
nop # .. .. E ..
- addq $6, 8, $6 # .. E .. ..
+ addq $16, 8, $16 # .. E .. ..
bgt $1, $onequad # U .. .. .. : U L U L
# We have an unknown number of bytes left to go.
@@ -210,9 +197,9 @@ $trailbytes:
# so we will use $0 as the loop counter
# We know for a fact that $0 > 0 zero due to previous context
$onebyte:
- EX( stb $31, 0($6) ) # .. .. .. L
+ EX( stb $31, 0($16) ) # .. .. .. L
subq $0, 1, $0 # .. .. E .. :
- addq $6, 1, $6 # .. E .. .. :
+ addq $16, 1, $16 # .. E .. .. :
bgt $0, $onebyte # U .. .. .. : U L U L
$zerolength:
@@ -220,6 +207,6 @@ $exception: # Destination for exception recovery(?)
nop # .. .. .. E :
nop # .. .. E .. :
nop # .. E .. .. :
- ret $31, ($28), 1 # L0 .. .. .. : L U L U
- .end __do_clear_user
-
+ ret $31, ($26), 1 # L0 .. .. .. : L U L U
+ .end __clear_user
+ EXPORT_SYMBOL(__clear_user)
diff --git a/arch/alpha/lib/ev6-copy_page.S b/arch/alpha/lib/ev6-copy_page.S
index b789db192754..77935061bddb 100644
--- a/arch/alpha/lib/ev6-copy_page.S
+++ b/arch/alpha/lib/ev6-copy_page.S
@@ -56,7 +56,7 @@
destination pages are in the dcache, but it is my guess that this is
less important than the dcache miss case. */
-
+#include <asm/export.h>
.text
.align 4
.global copy_page
@@ -201,3 +201,4 @@ copy_page:
nop
.end copy_page
+ EXPORT_SYMBOL(copy_page)
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index db42ffe9c350..35e6710d0700 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -12,21 +12,6 @@
* only _after_ a successful copy). There is also some rather minor
* exception setup stuff..
*
- * NOTE! This is not directly C-callable, because the calling semantics are
- * different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * source address in $7
- * return address in $28
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6,$7
- *
* Much of the information about 21264 scheduling/coding comes from:
* Compiler Writer's Guide for the Alpha 21264
* abbreviated as 'CWG' in other comments here
@@ -37,6 +22,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
+#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
99: x,##y; \
@@ -59,10 +45,11 @@
# Pipeline info: Slotting & Comments
__copy_user:
.prologue 0
- subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy?
+ andq $18, $18, $0
+ subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy?
beq $0, $zerolength # U .. .. .. : U L U L
- and $6,7,$3 # .. .. .. E : is leading dest misalignment
+ and $16,7,$3 # .. .. .. E : is leading dest misalignment
ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data
beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall)
subq $3, 8, $3 # E .. .. .. : L U U L : trip counter
@@ -72,17 +59,17 @@ __copy_user:
* We know we have at least one trip through this loop
*/
$aligndest:
- EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores
- addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG
+ EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores
+ addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG
addq $3,1,$3 # .. E .. .. :
nop # E .. .. .. : U L U L
/*
- * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * the -1 is to compensate for the inc($16) done in a previous quadpack
* which allows us zero dependencies within either quadpack in the loop
*/
- EXO( stb $1,-1($6) ) # .. .. .. L :
- addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG
+ EXO( stb $1,-1($16) ) # .. .. .. L :
+ addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG
subq $0,1,$0 # .. E .. .. :
bne $3, $aligndest # U .. .. .. : U L U L
@@ -91,29 +78,29 @@ $aligndest:
* If we arrived via branch, we have a minimum of 32 bytes
*/
$destaligned:
- and $7,7,$1 # .. .. .. E : Check _current_ source alignment
+ and $17,7,$1 # .. .. .. E : Check _current_ source alignment
bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop
- EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code
+ EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code
beq $1,$quadaligned # U .. .. .. : U L U L
/*
- * In the worst case, we've just executed an ldq_u here from 0($7)
+ * In the worst case, we've just executed an ldq_u here from 0($17)
* and we'll repeat it once if we take the branch
*/
/* Misaligned quadword loop - not unrolled. Leave it that way. */
$misquad:
- EXI( ldq_u $2,8($7) ) # .. .. .. L :
+ EXI( ldq_u $2,8($17) ) # .. .. .. L :
subq $4,8,$4 # .. .. E .. :
- extql $3,$7,$3 # .. U .. .. :
- extqh $2,$7,$1 # U .. .. .. : U U L L
+ extql $3,$17,$3 # .. U .. .. :
+ extqh $2,$17,$1 # U .. .. .. : U U L L
bis $3,$1,$1 # .. .. .. E :
- EXO( stq $1,0($6) ) # .. .. L .. :
- addq $7,8,$7 # .. E .. .. :
+ EXO( stq $1,0($16) ) # .. .. L .. :
+ addq $17,8,$17 # .. E .. .. :
subq $0,8,$0 # E .. .. .. : U L L U
- addq $6,8,$6 # .. .. .. E :
+ addq $16,8,$16 # .. .. .. E :
bis $2,$2,$3 # .. .. E .. :
nop # .. E .. .. :
bne $4,$misquad # U .. .. .. : U L U L
@@ -124,8 +111,8 @@ $misquad:
beq $0,$zerolength # U .. .. .. : U L U L
/* We know we have at least one trip through the byte loop */
- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
+ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
+ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
nop # .. E .. .. :
br $31, $dirtyentry # L0 .. .. .. : L U U L
/* Do the trailing byte loop load, then hop into the store part of the loop */
@@ -135,8 +122,8 @@ $misquad:
* Based upon the usage context, it's worth the effort to unroll this loop
* $0 - number of bytes to be moved
* $4 - number of bytes to move as quadwords
- * $6 is current destination address
- * $7 is current source address
+ * $16 is current destination address
+ * $17 is current source address
*/
$quadaligned:
subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff
@@ -154,29 +141,29 @@ $quadaligned:
* instruction memory hint instruction).
*/
$unroll4:
- EXI( ldq $1,0($7) ) # .. .. .. L
- EXI( ldq $2,8($7) ) # .. .. L ..
+ EXI( ldq $1,0($17) ) # .. .. .. L
+ EXI( ldq $2,8($17) ) # .. .. L ..
subq $4,32,$4 # .. E .. ..
nop # E .. .. .. : U U L L
- addq $7,16,$7 # .. .. .. E
- EXO( stq $1,0($6) ) # .. .. L ..
- EXO( stq $2,8($6) ) # .. L .. ..
+ addq $17,16,$17 # .. .. .. E
+ EXO( stq $1,0($16) ) # .. .. L ..
+ EXO( stq $2,8($16) ) # .. L .. ..
subq $0,16,$0 # E .. .. .. : U L L U
- addq $6,16,$6 # .. .. .. E
- EXI( ldq $1,0($7) ) # .. .. L ..
- EXI( ldq $2,8($7) ) # .. L .. ..
+ addq $16,16,$16 # .. .. .. E
+ EXI( ldq $1,0($17) ) # .. .. L ..
+ EXI( ldq $2,8($17) ) # .. L .. ..
subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip?
- EXO( stq $1,0($6) ) # .. .. .. L
- EXO( stq $2,8($6) ) # .. .. L ..
+ EXO( stq $1,0($16) ) # .. .. .. L
+ EXO( stq $2,8($16) ) # .. .. L ..
subq $0,16,$0 # .. E .. ..
- addq $7,16,$7 # E .. .. .. : U L L U
+ addq $17,16,$17 # E .. .. .. : U L L U
nop # .. .. .. E
nop # .. .. E ..
- addq $6,16,$6 # .. E .. ..
+ addq $16,16,$16 # .. E .. ..
bgt $3,$unroll4 # U .. .. .. : U L U L
nop
@@ -185,14 +172,14 @@ $unroll4:
beq $4, $noquads
$onequad:
- EXI( ldq $1,0($7) )
+ EXI( ldq $1,0($17) )
subq $4,8,$4
- addq $7,8,$7
+ addq $17,8,$17
nop
- EXO( stq $1,0($6) )
+ EXO( stq $1,0($16) )
subq $0,8,$0
- addq $6,8,$6
+ addq $16,8,$16
bne $4,$onequad
$noquads:
@@ -206,54 +193,33 @@ $noquads:
* There's no point in doing a lot of complex alignment calculations to try to
* to quadword stuff for a small amount of data.
* $0 - remaining number of bytes left to copy
- * $6 - current dest addr
- * $7 - current source addr
+ * $16 - current dest addr
+ * $17 - current source addr
*/
$onebyteloop:
- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
+ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
+ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
nop # .. E .. .. :
nop # E .. .. .. : U L U L
$dirtyentry:
/*
- * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * the -1 is to compensate for the inc($16) done in a previous quadpack
* which allows us zero dependencies within either quadpack in the loop
*/
- EXO ( stb $2,-1($6) ) # .. .. .. L :
- addq $7,1,$7 # .. .. E .. : quadpack as the load
+ EXO ( stb $2,-1($16) ) # .. .. .. L :
+ addq $17,1,$17 # .. .. E .. : quadpack as the load
subq $0,1,$0 # .. E .. .. : change count _after_ copy
bgt $0,$onebyteloop # U .. .. .. : U L U L
$zerolength:
+$exitin:
$exitout: # Destination for exception recovery(?)
nop # .. .. .. E
nop # .. .. E ..
nop # .. E .. ..
- ret $31,($28),1 # L0 .. .. .. : L U L U
-
-$exitin:
-
- /* A stupid byte-by-byte zeroing of the rest of the output
- buffer. This cures security holes by never leaving
- random kernel data around to be copied elsewhere. */
-
- nop
- nop
- nop
- mov $0,$1
-
-$101:
- EXO ( stb $31,0($6) ) # L
- subq $1,1,$1 # E
- addq $6,1,$6 # E
- bgt $1,$101 # U
-
- nop
- nop
- nop
- ret $31,($28),1 # L0
+ ret $31,($26),1 # L0 .. .. .. : L U L U
.end __copy_user
-
+ EXPORT_SYMBOL(__copy_user)
diff --git a/arch/alpha/lib/ev6-csum_ipv6_magic.S b/arch/alpha/lib/ev6-csum_ipv6_magic.S
index fc0bc399f872..de62627ac4fe 100644
--- a/arch/alpha/lib/ev6-csum_ipv6_magic.S
+++ b/arch/alpha/lib/ev6-csum_ipv6_magic.S
@@ -52,6 +52,7 @@
* may cause additional delay in rare cases (load-load replay traps).
*/
+#include <asm/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
@@ -148,3 +149,4 @@ csum_ipv6_magic:
ret # L0 : L U L U
.end csum_ipv6_magic
+ EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/alpha/lib/ev6-divide.S b/arch/alpha/lib/ev6-divide.S
index 2a82b9be93fa..d18dc0e96e3d 100644
--- a/arch/alpha/lib/ev6-divide.S
+++ b/arch/alpha/lib/ev6-divide.S
@@ -55,6 +55,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
+#include <asm/export.h>
#define halt .long 0
/*
@@ -205,6 +206,7 @@ ufunction:
addq $30,STACK,$30 # E :
ret $31,($23),1 # L0 : L U U L
.end ufunction
+EXPORT_SYMBOL(ufunction)
/*
* Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -257,3 +259,4 @@ sfunction:
addq $30,STACK,$30 # E :
ret $31,($23),1 # L0 : L U U L
.end sfunction
+EXPORT_SYMBOL(sfunction)
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S
index 1a5f71b9d8b1..419adc53ccb4 100644
--- a/arch/alpha/lib/ev6-memchr.S
+++ b/arch/alpha/lib/ev6-memchr.S
@@ -27,7 +27,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -189,3 +189,4 @@ $not_found:
ret # L0 :
.end memchr
+ EXPORT_SYMBOL(memchr)
diff --git a/arch/alpha/lib/ev6-memcpy.S b/arch/alpha/lib/ev6-memcpy.S
index 52b37b0f2af5..b19798b2efc0 100644
--- a/arch/alpha/lib/ev6-memcpy.S
+++ b/arch/alpha/lib/ev6-memcpy.S
@@ -19,7 +19,7 @@
* Temp usage notes:
* $1,$2, - scratch
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -242,6 +242,7 @@ $nomoredata:
nop # E :
.end memcpy
+ EXPORT_SYMBOL(memcpy)
/* For backwards module compatibility. */
__memcpy = memcpy
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 356bb2fdd705..fed21c6893e8 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -26,7 +26,7 @@
* as fixes will need to be made in multiple places. The performance gain
* is worth it.
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
.text
@@ -229,6 +229,7 @@ end_b:
nop
ret $31,($26),1 # L0 :
.end ___memset
+ EXPORT_SYMBOL(___memset)
/*
* This is the original body of code, prior to replication and
@@ -406,6 +407,7 @@ end:
nop
ret $31,($26),1 # L0 :
.end __constant_c_memset
+ EXPORT_SYMBOL(__constant_c_memset)
/*
* This is a replicant of the __constant_c_memset code, rescheduled
@@ -594,6 +596,9 @@ end_w:
ret $31,($26),1 # L0 :
.end __memsetw
+ EXPORT_SYMBOL(__memsetw)
memset = ___memset
__memset = ___memset
+ EXPORT_SYMBOL(memset)
+ EXPORT_SYMBOL(__memset)
diff --git a/arch/alpha/lib/ev67-strcat.S b/arch/alpha/lib/ev67-strcat.S
index c426fe3ed72f..b69f60419be1 100644
--- a/arch/alpha/lib/ev67-strcat.S
+++ b/arch/alpha/lib/ev67-strcat.S
@@ -19,7 +19,7 @@
* string once.
*/
-
+#include <asm/export.h>
.text
.align 4
@@ -52,3 +52,4 @@ $found: cttz $2, $3 # U0 :
br __stxcpy # L0 :
.end strcat
+ EXPORT_SYMBOL(strcat)
diff --git a/arch/alpha/lib/ev67-strchr.S b/arch/alpha/lib/ev67-strchr.S
index fbb7b4ffade9..ea8f2f35db9c 100644
--- a/arch/alpha/lib/ev67-strchr.S
+++ b/arch/alpha/lib/ev67-strchr.S
@@ -15,7 +15,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -86,3 +86,4 @@ $found: negq t0, t1 # E : clear all but least set bit
ret # L0 :
.end strchr
+ EXPORT_SYMBOL(strchr)
diff --git a/arch/alpha/lib/ev67-strlen.S b/arch/alpha/lib/ev67-strlen.S
index 503928072523..736fd41884a8 100644
--- a/arch/alpha/lib/ev67-strlen.S
+++ b/arch/alpha/lib/ev67-strlen.S
@@ -17,7 +17,7 @@
* U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -47,3 +47,4 @@ $found:
ret $31, ($26) # L0 :
.end strlen
+ EXPORT_SYMBOL(strlen)
diff --git a/arch/alpha/lib/ev67-strncat.S b/arch/alpha/lib/ev67-strncat.S
index 4ae716cd2bfb..cd35cbade73a 100644
--- a/arch/alpha/lib/ev67-strncat.S
+++ b/arch/alpha/lib/ev67-strncat.S
@@ -20,7 +20,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
-
+#include <asm/export.h>
.text
.align 4
@@ -92,3 +92,4 @@ $zerocount:
ret # L0 :
.end strncat
+ EXPORT_SYMBOL(strncat)
diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S
index dd0d8c6b9f59..747455f0328c 100644
--- a/arch/alpha/lib/ev67-strrchr.S
+++ b/arch/alpha/lib/ev67-strrchr.S
@@ -18,7 +18,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -107,3 +107,4 @@ $eos:
nop
.end strrchr
+ EXPORT_SYMBOL(strrchr)
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
index 05017ba34c3c..4aa6dbfa14ee 100644
--- a/arch/alpha/lib/fpreg.c
+++ b/arch/alpha/lib/fpreg.c
@@ -4,6 +4,9 @@
* (C) Copyright 1998 Linus Torvalds
*/
+#include <linux/compiler.h>
+#include <linux/export.h>
+
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
#else
@@ -52,6 +55,7 @@ alpha_read_fp_reg (unsigned long reg)
}
return val;
}
+EXPORT_SYMBOL(alpha_read_fp_reg);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val));
@@ -97,6 +101,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val)
case 31: LDT(31, val); break;
}
}
+EXPORT_SYMBOL(alpha_write_fp_reg);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val));
@@ -146,6 +151,7 @@ alpha_read_fp_reg_s (unsigned long reg)
}
return val;
}
+EXPORT_SYMBOL(alpha_read_fp_reg_s);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val));
@@ -191,3 +197,4 @@ alpha_write_fp_reg_s (unsigned long reg, unsigned long val)
case 31: LDS(31, val); break;
}
}
+EXPORT_SYMBOL(alpha_write_fp_reg_s);
diff --git a/arch/alpha/lib/memchr.S b/arch/alpha/lib/memchr.S
index 14427eeb555e..c13d3eca2e05 100644
--- a/arch/alpha/lib/memchr.S
+++ b/arch/alpha/lib/memchr.S
@@ -31,7 +31,7 @@ For correctness consider that:
- only minimum number of quadwords may be accessed
- the third argument is an unsigned long
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -162,3 +162,4 @@ $not_found:
ret # .. e1 :
.end memchr
+ EXPORT_SYMBOL(memchr)
diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c
index 64083fc73238..57d9291ad172 100644
--- a/arch/alpha/lib/memcpy.c
+++ b/arch/alpha/lib/memcpy.c
@@ -16,6 +16,7 @@
*/
#include <linux/types.h>
+#include <linux/export.h>
/*
* This should be done in one go with ldq_u*2/mask/stq_u. Do it
@@ -158,6 +159,4 @@ void * memcpy(void * dest, const void *src, size_t n)
__memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
return dest;
}
-
-/* For backward modules compatibility, define __memcpy. */
-asm("__memcpy = memcpy; .globl __memcpy");
+EXPORT_SYMBOL(memcpy);
diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S
index eb3b6e02242f..ff6a39d38385 100644
--- a/arch/alpha/lib/memmove.S
+++ b/arch/alpha/lib/memmove.S
@@ -6,7 +6,7 @@
* This is hand-massaged output from the original memcpy.c. We defer to
* memcpy whenever possible; the backwards copy loops are not unrolled.
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
.text
@@ -179,3 +179,4 @@ $egress:
nop
.end memmove
+ EXPORT_SYMBOL(memmove)
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
index 76ccc6d1f364..89a26f5e89de 100644
--- a/arch/alpha/lib/memset.S
+++ b/arch/alpha/lib/memset.S
@@ -13,7 +13,7 @@
* The scheduling comments are according to the EV5 documentation (and done by
* hand, so they might well be incorrect, please do tell me about it..)
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
.text
@@ -106,6 +106,8 @@ within_one_quad:
end:
ret $31,($26),1 /* E1 */
.end ___memset
+EXPORT_SYMBOL(___memset)
+EXPORT_SYMBOL(__constant_c_memset)
.align 5
.ent __memsetw
@@ -122,6 +124,9 @@ __memsetw:
br __constant_c_memset /* .. E1 */
.end __memsetw
+EXPORT_SYMBOL(__memsetw)
memset = ___memset
__memset = ___memset
+ EXPORT_SYMBOL(memset)
+ EXPORT_SYMBOL(__memset)
diff --git a/arch/alpha/lib/strcat.S b/arch/alpha/lib/strcat.S
index 393f50384878..249837b03d4b 100644
--- a/arch/alpha/lib/strcat.S
+++ b/arch/alpha/lib/strcat.S
@@ -4,6 +4,7 @@
*
* Append a null-terminated string from SRC to DST.
*/
+#include <asm/export.h>
.text
@@ -50,3 +51,4 @@ $found: negq $2, $3 # clear all but least set bit
br __stxcpy
.end strcat
+EXPORT_SYMBOL(strcat);
diff --git a/arch/alpha/lib/strchr.S b/arch/alpha/lib/strchr.S
index 011a175e8329..7412a173ea39 100644
--- a/arch/alpha/lib/strchr.S
+++ b/arch/alpha/lib/strchr.S
@@ -5,7 +5,7 @@
* Return the address of a given character within a null-terminated
* string, or null if it is not found.
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -68,3 +68,4 @@ $retnull:
ret # .. e1 :
.end strchr
+ EXPORT_SYMBOL(strchr)
diff --git a/arch/alpha/lib/strcpy.S b/arch/alpha/lib/strcpy.S
index e0728e4ad21f..98deae1e4d08 100644
--- a/arch/alpha/lib/strcpy.S
+++ b/arch/alpha/lib/strcpy.S
@@ -5,7 +5,7 @@
* Copy a null-terminated string from SRC to DST. Return a pointer
* to the null-terminator in the source.
*/
-
+#include <asm/export.h>
.text
.align 3
@@ -21,3 +21,4 @@ strcpy:
br __stxcpy # do the copy
.end strcpy
+ EXPORT_SYMBOL(strcpy)
diff --git a/arch/alpha/lib/strlen.S b/arch/alpha/lib/strlen.S
index fe63353de152..79c416f71bac 100644
--- a/arch/alpha/lib/strlen.S
+++ b/arch/alpha/lib/strlen.S
@@ -11,7 +11,7 @@
* do this instead of the 9 instructions that
* binary search needs).
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -55,3 +55,4 @@ done: subq $0, $16, $0
ret $31, ($26)
.end strlen
+ EXPORT_SYMBOL(strlen)
diff --git a/arch/alpha/lib/strncat.S b/arch/alpha/lib/strncat.S
index a8278163c972..6c29ea60869a 100644
--- a/arch/alpha/lib/strncat.S
+++ b/arch/alpha/lib/strncat.S
@@ -9,7 +9,7 @@
* past count, whereas libc may write to count+1. This follows the generic
* implementation in lib/string.c and is, IMHO, more sensible.
*/
-
+#include <asm/export.h>
.text
.align 3
@@ -82,3 +82,4 @@ $zerocount:
ret
.end strncat
+ EXPORT_SYMBOL(strncat)
diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S
index a46f7f3ad8c7..e102cf1567dd 100644
--- a/arch/alpha/lib/strncpy.S
+++ b/arch/alpha/lib/strncpy.S
@@ -10,7 +10,7 @@
* version has cropped that bit o' nastiness as well as assuming that
* __stxncpy is in range of a branch.
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
@@ -79,3 +79,4 @@ $zerolen:
ret
.end strncpy
+ EXPORT_SYMBOL(strncpy)
diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S
index 1970dc07cfd1..4bc6cb4b9812 100644
--- a/arch/alpha/lib/strrchr.S
+++ b/arch/alpha/lib/strrchr.S
@@ -5,7 +5,7 @@
* Return the address of the last occurrence of a given character
* within a null-terminated string, or null if it is not found.
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -85,3 +85,4 @@ $retnull:
ret # .. e1 :
.end strrchr
+ EXPORT_SYMBOL(strrchr)