diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/pageattr.c | 72 |
4 files changed, 62 insertions, 26 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index e6871a3e4d1c..972c7bd08a83 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -83,7 +83,8 @@ static inline void l2x0_inv_line(unsigned long addr) writel_relaxed(addr, base + L2X0_INV_LINE_PA); } -#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) +#if !defined(CONFIG_TRUSTED_FOUNDATIONS) && \ + (defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)) #define debug_writel(val) outer_cache.set_debug(val) @@ -161,7 +162,7 @@ static void l2x0_flush_all(void) unsigned long flags; #ifdef CONFIG_PL310_ERRATA_727915 - if (is_pl310_rev(REV_PL310_R2P0)) { + if (is_pl310_rev(REV_PL310_R2P0) || is_pl310_rev(REV_PL310_R3P1_50)) { l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX); return; } @@ -178,7 +179,7 @@ static void l2x0_clean_all(void) unsigned long flags; #ifdef CONFIG_PL310_ERRATA_727915 - if (is_pl310_rev(REV_PL310_R2P0)) { + if (is_pl310_rev(REV_PL310_R2P0) || is_pl310_rev(REV_PL310_R3P1_50)) { l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX); return; } @@ -418,6 +419,7 @@ void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.flush_range = l2x0_flush_range; outer_cache.sync = l2x0_cache_sync; outer_cache.flush_all = l2x0_flush_all; + outer_cache.clean_all = l2x0_clean_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; outer_cache.set_debug = l2x0_set_debug; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 6f81c8e05c3a..cb4e96e1019d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -339,7 +339,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) #ifdef CONFIG_XIP_KERNEL memblock_reserve(__pa(_sdata), _end - _sdata); #else - memblock_reserve(__pa(_stext), _end - _stext); + memblock_reserve(__pa(_stext), ALIGN(_end - _stext, PMD_SIZE)); #endif #ifdef CONFIG_BLK_DEV_INITRD if (phys_initrd_size && @@ -734,6 +734,7 @@ void __init mem_init(void) void free_initmem(void) { +#ifndef CONFIG_CPA #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; @@ -748,6 +749,7 @@ void free_initmem(void) totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), __phys_to_pfn(__pa(__init_end)), "init"); +#endif } #ifdef CONFIG_BLK_DEV_INITRD @@ -756,12 +758,14 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { +#ifndef CONFIG_CPA if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); totalram_pages += free_area(__phys_to_pfn(__pa(start)), __phys_to_pfn(__pa(end)), "initrd"); } +#endif } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index bb80555edac9..992bbc52938c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -942,7 +942,7 @@ void __init arm_mm_memblock_reserve(void) */ memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); -#ifdef CONFIG_SA1111 +#if defined(CONFIG_SA1111) || defined(CONFIG_CPA) /* * Because of the SA1111 DMA bug, we want to preserve our * precious DMA-able memory... diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index b7ff7f19b541..c11064517f56 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -39,6 +39,19 @@ static void inner_flush_cache_all(void) } #if defined(CONFIG_CPA) + +/* + * The arm kernel uses different cache policies(CPOLICY_WRITEBACK, + * CPOLICY_WRITEALLOC, CPOLICY_WRITETHROUGH) based on architecture version + * and smp mode. Using L_PTE_MT_WRITEALLOC or L_PTE_MT_WRITEBACK or + * L_PTE_MT_WRITETHROUGH directly in CPA code can result in restoring incorrect + * PTE attributes. + * pgprot_kernel would always have PTE attributes based on the cache policy + * in use for kernel cache memory. Use this to set the correct PTE attributes + * for kernel cache memory. + * */ +#define L_PTE_MT_KERNEL (pgprot_kernel & L_PTE_MT_MASK) + /* * The current flushing context - we pass it instead of 5 arguments: */ @@ -224,7 +237,8 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, { pgprot_t ref_prot; - ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; + ref_prot = PMD_TYPE_SECT | PMD_DOMAIN(DOMAIN_KERNEL) | + PMD_SECT_AP_WRITE; if (pte & L_PTE_MT_BUFFERABLE) ref_prot |= PMD_SECT_BUFFERABLE; @@ -232,17 +246,23 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, if (pte & L_PTE_MT_WRITETHROUGH) ref_prot |= PMD_SECT_CACHEABLE; - if (pte & L_PTE_SHARED) - ref_prot |= PMD_SECT_S; - if (pte & L_PTE_XN) ref_prot |= PMD_SECT_XN; + if (pte & L_PTE_USER) + ref_prot |= PMD_SECT_AP_READ; + + if (pte & (1 << 4)) + ref_prot |= PMD_SECT_TEX(1); + if (pte & L_PTE_RDONLY) - ref_prot &= ~PMD_SECT_AP_WRITE; + ref_prot |= PMD_SECT_APX; + + if (pte & L_PTE_SHARED) + ref_prot |= PMD_SECT_S; - ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX | - PTE_EXT_NG | (7 << 6))) << 6; + if (pte & PTE_EXT_NG) + ref_prot |= PMD_SECT_nG; return ref_prot; } @@ -250,9 +270,10 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd, unsigned long *ext_prot) { - pgprot_t ref_prot = 0; + pgprot_t ref_prot; - ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY; + *ext_prot = 0; + ref_prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY; if (pmd & PMD_SECT_BUFFERABLE) ref_prot |= L_PTE_MT_BUFFERABLE; @@ -260,18 +281,23 @@ static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd, if (pmd & PMD_SECT_CACHEABLE) ref_prot |= L_PTE_MT_WRITETHROUGH; - if (pmd & PMD_SECT_S) - ref_prot |= L_PTE_SHARED; - if (pmd & PMD_SECT_XN) ref_prot |= L_PTE_XN; - if (pmd & PMD_SECT_AP_WRITE) - ref_prot &= ~L_PTE_RDONLY; + if (pmd & PMD_SECT_AP_READ) + ref_prot |= L_PTE_USER; + + if (pmd & PMD_SECT_TEX(1)) + ref_prot |= (1 << 4); - /* AP/APX/TEX bits */ - *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | - PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6; + if (pmd & PMD_SECT_APX) + ref_prot |= L_PTE_RDONLY; + + if (pmd & PMD_SECT_S) + ref_prot |= L_PTE_SHARED; + + if (pmd & PMD_SECT_nG) + ref_prot |= PTE_EXT_NG; return ref_prot; } @@ -395,6 +421,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, if (numpages < cpa->numpages) cpa->numpages = numpages; + old_pte = *kpte; old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); @@ -476,6 +503,8 @@ static int split_large_page(pte_t *kpte, unsigned long address) pgprot_t ref_prot = 0, ext_prot = 0; int ret = 0; + BUG_ON((address & PMD_MASK) < __pa(_end)); + pbase = pte_alloc_one_kernel(&init_mm, address); if (!pbase) return -ENOMEM; @@ -502,6 +531,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); + BUG_ON(ref_prot != pgprot_kernel); /* * Get the target pfn from the original entry: */ @@ -760,7 +790,7 @@ static inline int cache_attr(pgprot_t attr) * We need to flush the cache for all memory type changes * except when a page is being marked write back cacheable */ - return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK); + return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_KERNEL); } static int change_page_attr_set_clr(unsigned long *addr, int numpages, @@ -916,7 +946,7 @@ EXPORT_SYMBOL(set_memory_wc); int set_memory_wb(unsigned long addr, int numpages) { return change_page_attr_set_clr(&addr, numpages, - __pgprot(L_PTE_MT_WRITEBACK), + __pgprot(L_PTE_MT_KERNEL), __pgprot(L_PTE_MT_MASK), 0, 0, NULL); } @@ -934,7 +964,7 @@ EXPORT_SYMBOL(set_memory_iwb); int set_memory_array_wb(unsigned long *addr, int addrinarray) { return change_page_attr_set_clr(addr, addrinarray, - __pgprot(L_PTE_MT_WRITEBACK), + __pgprot(L_PTE_MT_KERNEL), __pgprot(L_PTE_MT_MASK), 0, CPA_ARRAY, NULL); @@ -1017,7 +1047,7 @@ EXPORT_SYMBOL(set_pages_array_wc); int set_pages_array_wb(struct page **pages, int addrinarray) { return _set_pages_array(pages, addrinarray, - L_PTE_MT_WRITEBACK, L_PTE_MT_MASK); + L_PTE_MT_KERNEL, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_pages_array_wb); |