diff options
author | Krishna Reddy <vdumpa@nvidia.com> | 2012-08-09 11:18:21 -0700 |
---|---|---|
committer | Simone Willett <swillett@nvidia.com> | 2012-08-15 12:42:26 -0700 |
commit | 18156565232996364bbeef583db5fee07940ca7d (patch) | |
tree | e41b7215b0da1d094609f0612d3e51fd2a31a487 | |
parent | c921a79e0a8834dc697a2afa8e920311340447c6 (diff) |
mm: Fix incorrect cache attribute restoration.
armv7 uses cache policy as WRITEALLOC. CPA is
restoring the cache attributes to WRITEBACK during
set_pages_array_wb().
Fixed issues in pmd to pte prot translation and vice versa.
Change-Id: I8406b784f62d559f657ef7bc08e77c83ac6e5690
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/122472
Reviewed-by: Simone Willett <swillett@nvidia.com>
Tested-by: Simone Willett <swillett@nvidia.com>
-rw-r--r-- | arch/arm/mm/pageattr.c | 70 |
1 files changed, 49 insertions, 21 deletions
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index b7ff7f19b541..526bf59e61cc 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -39,6 +39,19 @@ static void inner_flush_cache_all(void) } #if defined(CONFIG_CPA) + +/* + * The arm kernel uses different cache policies(CPOLICY_WRITEBACK, + * CPOLICY_WRITEALLOC, CPOLICY_WRITETHROUGH) based on architecture version + * and smp mode. Using L_PTE_MT_WRITEALLOC or L_PTE_MT_WRITEBACK or + * L_PTE_MT_WRITETHROUGH directly in CPA code can result in restoring incorrect + * PTE attributes. + * pgprot_kernel would always have PTE attributes based on the cache policy + * in use for kernel cache memory. Use this to set the correct PTE attributes + * for kernel cache memory. + * */ +#define L_PTE_MT_KERNEL (pgprot_kernel & L_PTE_MT_MASK) + /* * The current flushing context - we pass it instead of 5 arguments: */ @@ -224,7 +237,8 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, { pgprot_t ref_prot; - ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; + ref_prot = PMD_TYPE_SECT | PMD_DOMAIN(DOMAIN_KERNEL) | + PMD_SECT_AP_WRITE; if (pte & L_PTE_MT_BUFFERABLE) ref_prot |= PMD_SECT_BUFFERABLE; @@ -232,17 +246,23 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, if (pte & L_PTE_MT_WRITETHROUGH) ref_prot |= PMD_SECT_CACHEABLE; - if (pte & L_PTE_SHARED) - ref_prot |= PMD_SECT_S; - if (pte & L_PTE_XN) ref_prot |= PMD_SECT_XN; + if (pte & L_PTE_USER) + ref_prot |= PMD_SECT_AP_READ; + + if (pte & (1 << 4)) + ref_prot |= PMD_SECT_TEX(1); + if (pte & L_PTE_RDONLY) - ref_prot &= ~PMD_SECT_AP_WRITE; + ref_prot |= PMD_SECT_APX; + + if (pte & L_PTE_SHARED) + ref_prot |= PMD_SECT_S; - ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX | - PTE_EXT_NG | (7 << 6))) << 6; + if (pte & PTE_EXT_NG) + ref_prot |= PMD_SECT_nG; return ref_prot; } @@ -250,9 +270,10 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd, unsigned long *ext_prot) { - pgprot_t ref_prot = 0; + pgprot_t ref_prot; - ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY; + *ext_prot = 0; + ref_prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY; if (pmd & PMD_SECT_BUFFERABLE) ref_prot |= L_PTE_MT_BUFFERABLE; @@ -260,18 +281,23 @@ static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd, if (pmd & PMD_SECT_CACHEABLE) ref_prot |= L_PTE_MT_WRITETHROUGH; - if (pmd & PMD_SECT_S) - ref_prot |= L_PTE_SHARED; - if (pmd & PMD_SECT_XN) ref_prot |= L_PTE_XN; - if (pmd & PMD_SECT_AP_WRITE) - ref_prot &= ~L_PTE_RDONLY; + if (pmd & PMD_SECT_AP_READ) + ref_prot |= L_PTE_USER; + + if (pmd & PMD_SECT_TEX(1)) + ref_prot |= (1 << 4); - /* AP/APX/TEX bits */ - *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | - PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6; + if (pmd & PMD_SECT_APX) + ref_prot |= L_PTE_RDONLY; + + if (pmd & PMD_SECT_S) + ref_prot |= L_PTE_SHARED; + + if (pmd & PMD_SECT_nG) + ref_prot |= PTE_EXT_NG; return ref_prot; } @@ -395,6 +421,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, if (numpages < cpa->numpages) cpa->numpages = numpages; + old_pte = *kpte; old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); @@ -502,6 +529,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); + BUG_ON(ref_prot != pgprot_kernel); /* * Get the target pfn from the original entry: */ @@ -760,7 +788,7 @@ static inline int cache_attr(pgprot_t attr) * We need to flush the cache for all memory type changes * except when a page is being marked write back cacheable */ - return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK); + return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_KERNEL); } static int change_page_attr_set_clr(unsigned long *addr, int numpages, @@ -916,7 +944,7 @@ EXPORT_SYMBOL(set_memory_wc); int set_memory_wb(unsigned long addr, int numpages) { return change_page_attr_set_clr(&addr, numpages, - __pgprot(L_PTE_MT_WRITEBACK), + __pgprot(L_PTE_MT_KERNEL), __pgprot(L_PTE_MT_MASK), 0, 0, NULL); } @@ -934,7 +962,7 @@ EXPORT_SYMBOL(set_memory_iwb); int set_memory_array_wb(unsigned long *addr, int addrinarray) { return change_page_attr_set_clr(addr, addrinarray, - __pgprot(L_PTE_MT_WRITEBACK), + __pgprot(L_PTE_MT_KERNEL), __pgprot(L_PTE_MT_MASK), 0, CPA_ARRAY, NULL); @@ -1017,7 +1045,7 @@ EXPORT_SYMBOL(set_pages_array_wc); int set_pages_array_wb(struct page **pages, int addrinarray) { return _set_pages_array(pages, addrinarray, - L_PTE_MT_WRITEBACK, L_PTE_MT_MASK); + L_PTE_MT_KERNEL, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_pages_array_wb); |