summaryrefslogtreecommitdiff
path: root/arch/sh/include/asm/mmu.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-17 13:23:00 +0900
committerPaul Mundt <lethal@linux-sh.org>2010-02-17 13:23:00 +0900
commit7bdda6209f224aa784a036df54b22cb338d2e859 (patch)
tree2c2ce99f0ec55386246379ffb8412b3a893402b0 /arch/sh/include/asm/mmu.h
parent49f3bfe9334a4cf86079d2ee1d08e674b58862a9 (diff)
sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.
Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/mmu.h')
-rw-r--r--arch/sh/include/asm/mmu.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 2fcbedb55002..151bc922701b 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -33,6 +33,7 @@
#ifndef __ASSEMBLY__
#include <linux/errno.h>
#include <linux/threads.h>
+#include <asm/page.h>
/* Default "unsigned long" context */
typedef unsigned long mm_context_id_t[NR_CPUS];
@@ -71,13 +72,13 @@ struct pmb_entry {
#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
long pmb_remap(unsigned long virt, unsigned long phys,
- unsigned long size, unsigned long flags);
+ unsigned long size, pgprot_t prot);
void pmb_unmap(unsigned long addr);
int pmb_init(void);
bool __in_29bit_mode(void);
#else
static inline long pmb_remap(unsigned long virt, unsigned long phys,
- unsigned long size, unsigned long flags)
+ unsigned long size, pgprot_t prot)
{
return -EINVAL;
}