summaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-02-15 16:48:28 +0900
committerPaul Mundt <lethal@linux-sh.org>2011-02-15 16:48:28 +0900
commit18e9550273b8a4d28044202f51cb2c3c9254d7c6 (patch)
treea162ad088c712bd6dc35c7013cd30999731823c2 /arch/sh
parent17292ecc07857bb16737c340dda289ab9d219e05 (diff)
parentd4f7e513234019a005c4d33477189f2a4e53bb9c (diff)
Merge branch 'sh/st-integration' into sh-latest
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/boot/compressed/Makefile2
-rw-r--r--arch/sh/include/asm/sections.h2
-rw-r--r--arch/sh/lib/delay.c10
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/cache.c3
5 files changed, 17 insertions, 2 deletions
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index e0b0293bae63..780e083e4d17 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -11,6 +11,8 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz \
OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
+GCOV_PROFILE := n
+
#
# IMAGE_OFFSET is the load offset of the compression loader
#
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index a78701da775b..4a5350037c8f 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,7 +3,7 @@
#include <asm-generic/sections.h>
-extern void __nosave_begin, __nosave_end;
+extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
extern char _ebss[];
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index faa8f86c0db4..0901b2f14e15 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -10,6 +10,16 @@
void __delay(unsigned long loops)
{
__asm__ __volatile__(
+ /*
+ * ST40-300 appears to have an issue with this code,
+ * normally taking two cycles each loop, as with all
+ * other SH variants. If however the branch and the
+ * delay slot straddle an 8 byte boundary, this increases
+ * to 3 cycles.
+ * This align directive ensures this doesn't occur.
+ */
+ ".balign 8\n\t"
+
"tst %0, %0\n\t"
"1:\t"
"bf/s 1b\n\t"
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 150aa326afff..2228c8cee4d6 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -42,6 +42,8 @@ obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
+GCOV_PROFILE_pmb.o := n
+
# Special flags for fault_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.
# This is required because the code is called from a context in entry.S where
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 88d3dc3d30d5..5a580ea04429 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
kunmap_atomic(vfrom, KM_USER0);
}
- if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+ if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
+ (vma->vm_flags & VM_EXEC))
__flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vto, KM_USER1);