summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c81
-rw-r--r--arch/arm/mm/flush.c9
-rw-r--r--arch/arm/mm/ioremap.c57
-rw-r--r--arch/arm/mm/mmu.c5
-rw-r--r--arch/arm/mm/nommu.c12
-rw-r--r--arch/arm/mm/proc-macros.S4
-rw-r--r--arch/arm/mm/proc-v6.S16
7 files changed, 158 insertions, 26 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b480f1d3591f..6688b9bd17ec 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/io.h>
@@ -24,8 +25,15 @@
#include <asm/hardware/cache-l2x0.h>
#define CACHE_LINE_SIZE 32
+#ifdef CONFIG_OPROFILE_ARM11_EVTMON
+#define L2_ENABLE_BIT 0x1
+#define L2_EVTBUS_BIT 0x100000
+#define L2_CTL_REG (l2x0_base + L2X0_CTRL)
+#define L2_AUX_REG (l2x0_base + L2X0_AUX_CTRL)
+#endif
static void __iomem *l2x0_base;
+static unsigned long l2x0_aux;
static DEFINE_SPINLOCK(l2x0_lock);
static inline void sync_writel(unsigned long val, unsigned long reg,
@@ -53,6 +61,13 @@ static inline void l2x0_inv_all(void)
cache_sync();
}
+static void l2x0_flush_all(void)
+{
+ /* clean and invalidate all ways */
+ sync_writel(0xff, L2X0_CLEAN_INV_WAY, 0xff);
+ cache_sync();
+}
+
static void l2x0_inv_range(unsigned long start, unsigned long end)
{
unsigned long addr;
@@ -93,6 +108,49 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
cache_sync();
}
+#ifdef CONFIG_OPROFILE_ARM11_EVTMON
+/*!
+ * Enable the EVTBUS to monitor L2 cache events
+ */
+void l2x0_evtbus_enable(void)
+{
+ unsigned int flags;
+
+ local_irq_save(flags);
+ /* If L2 cache is enabled then disable L2 cache, enable L2 evtbus,
+ re-enable L2 cache */
+ if ((readl(L2_CTL_REG) & L2_ENABLE_BIT) != 0) {
+ writel(0, L2_CTL_REG);
+ writel((readl(L2_AUX_REG)| L2_EVTBUS_BIT), L2_AUX_REG);
+ writel(L2_ENABLE_BIT, L2_CTL_REG);
+ } else {
+ writel((readl(L2_AUX_REG)| L2_EVTBUS_BIT), L2_AUX_REG);
+ }
+ local_irq_restore(flags);
+}
+
+/*!
+ * Disable the EVTBUS
+ */
+void l2x0_evtbus_disable(void)
+{
+ unsigned int flags;
+
+ local_irq_save(flags);
+ /* If L2 cache is enabled then disable L2 cache, disable L2 evtbus,
+ re-enable L2 cache */
+ if ((readl(L2_CTL_REG) & L2_ENABLE_BIT) != 0) {
+ writel(0, L2_CTL_REG);
+ writel((readl(L2_AUX_REG)& ~L2_EVTBUS_BIT), L2_AUX_REG);
+ writel(L2_ENABLE_BIT, L2_CTL_REG);
+ } else {
+ writel((readl(L2_AUX_REG)& ~L2_EVTBUS_BIT), L2_AUX_REG);
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(l2x0_evtbus_enable);
+EXPORT_SYMBOL(l2x0_evtbus_disable);
+#endif
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
@@ -105,6 +163,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
aux = readl(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
+ l2x0_aux = aux;
writel(aux, l2x0_base + L2X0_AUX_CTRL);
l2x0_inv_all();
@@ -115,6 +174,28 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
+ outer_cache.flush_all = l2x0_flush_all;
printk(KERN_INFO "L2X0 cache controller enabled\n");
}
+EXPORT_SYMBOL(outer_cache);
+
+void l2x0_disable(void)
+{
+ if (readl(l2x0_base + L2X0_CTRL)
+ && !(readl(l2x0_base + L2X0_DEBUG_CTRL) & 0x2)) {
+ l2x0_flush_all();
+ writel(0, l2x0_base + L2X0_CTRL);
+ l2x0_flush_all();
+ }
+}
+
+void l2x0_enable(void)
+{
+ if (!readl(l2x0_base + L2X0_CTRL)) {
+ writel(l2x0_aux, l2x0_base + L2X0_AUX_CTRL);
+ l2x0_inv_all();
+ /* enable L2X0 */
+ writel(1, l2x0_base + L2X0_CTRL);
+ }
+}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c07222eb5ce0..575f3ad722e7 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -144,7 +144,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
- __cpuc_flush_dcache_page(page_address(page));
+#ifdef CONFIG_HIGHMEM
+ /*
+ * kmap_atomic() doesn't set the page virtual address, and
+ * kunmap_atomic() takes care of cache flushing already.
+ */
+ if (page_address(page))
+#endif
+ __cpuc_flush_dcache_page(page_address(page));
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0ab75c60f7cf..28c8b950ef04 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
- * Note that get_vm_area() allocates a guard 4K page, so we need to mask
- * the size back to 1MB aligned or we will overflow in the loop below.
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
}
#endif
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- *
- * 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping. See <asm/pgtable.h> for more information.
- */
-void __iomem *
-__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
- unsigned int mtype)
+void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
int err;
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
*/
size = PAGE_ALIGN(offset + size);
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
flush_cache_vmap(addr, addr + size);
return (void __iomem *) (offset + addr);
}
-EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem *
-__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
unsigned long last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
if (!size || last_addr < phys_addr)
return NULL;
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
+{
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__arm_ioremap_pfn);
+
+void __iomem *
+__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+{
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4426ee67ceca..3d4411322bd3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -936,7 +936,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
+#ifdef CONFIG_VECTORS_PHY_ADDR
+ /* use OCRAM as vector for CQ116049 */
+ map.pfn = CONFIG_VECTORS_PHY_ADDR ;
+#else
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+#endif
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index ad7bacc693b2..5c50cd729555 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -73,6 +73,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
+void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
+ size_t size, unsigned int mtype, void *caller)
+{
+ return __arm_ioremap_pfn(pfn, offset, size, mtype);
+}
+
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
unsigned int mtype)
{
@@ -80,6 +86,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
+void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ return __arm_ioremap(phys_addr, size, mtype);
+}
+
void __iounmap(volatile void __iomem *addr)
{
}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 54b1f721dec8..2d9211930dc1 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -119,8 +119,8 @@
.long 0x00 @ unused
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
.long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
- .long 0x00 @ unused
- .long 0x00 @ unused
+ .long PTE_EXT_TEX(4) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_OUTER_UNCACHED
+ .long PTE_EXT_TEX(6) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_OUTER_WRITETHRU
.long 0x00 @ unused
.endm
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 524ddae92595..deef0921f494 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -176,6 +176,22 @@ __v6_setup:
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
+#ifdef CONFIG_ARM_ERRATA_364296
+ /* Workaround for the 364296 ARM1136 r0pX errata (possible cache data
+ * corruption with hit-under-miss enabled). The conditional code below
+ * (setting the undocumented bit 31 in the auxiliary control register
+ * and the FI bit in the control register) disables hit-under-miss
+ * without putting the processor into full low interrupt latency mode.
+ */
+ ldr r6, =0x4107b360 @ id for ARM1136 r0pX
+ mrc p15, 0, r5, c0, c0, 0 @ get processor id
+ bic r5, r5, #0xf @ mask out part bits [3:0]
+ teq r5, r6 @ check for the faulty core
+ mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg
+ orreq r5, r5, #(1 << 31) @ set the undocumented bit 31
+ mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
+ orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
+#endif
mov pc, lr @ return to head.S:__ret
/*