summaryrefslogtreecommitdiff
path: root/arch/arm/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/pageattr.c')
-rw-r--r--arch/arm/mm/pageattr.c72
1 files changed, 51 insertions, 21 deletions
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index b7ff7f19b541..c11064517f56 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -39,6 +39,19 @@ static void inner_flush_cache_all(void)
}
#if defined(CONFIG_CPA)
+
+/*
+ * The arm kernel uses different cache policies(CPOLICY_WRITEBACK,
+ * CPOLICY_WRITEALLOC, CPOLICY_WRITETHROUGH) based on architecture version
+ * and smp mode. Using L_PTE_MT_WRITEALLOC or L_PTE_MT_WRITEBACK or
+ * L_PTE_MT_WRITETHROUGH directly in CPA code can result in restoring incorrect
+ * PTE attributes.
+ * pgprot_kernel would always have PTE attributes based on the cache policy
+ * in use for kernel cache memory. Use this to set the correct PTE attributes
+ * for kernel cache memory.
+ * */
+#define L_PTE_MT_KERNEL (pgprot_kernel & L_PTE_MT_MASK)
+
/*
* The current flushing context - we pass it instead of 5 arguments:
*/
@@ -224,7 +237,8 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte,
{
pgprot_t ref_prot;
- ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
+ ref_prot = PMD_TYPE_SECT | PMD_DOMAIN(DOMAIN_KERNEL) |
+ PMD_SECT_AP_WRITE;
if (pte & L_PTE_MT_BUFFERABLE)
ref_prot |= PMD_SECT_BUFFERABLE;
@@ -232,17 +246,23 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte,
if (pte & L_PTE_MT_WRITETHROUGH)
ref_prot |= PMD_SECT_CACHEABLE;
- if (pte & L_PTE_SHARED)
- ref_prot |= PMD_SECT_S;
-
if (pte & L_PTE_XN)
ref_prot |= PMD_SECT_XN;
+ if (pte & L_PTE_USER)
+ ref_prot |= PMD_SECT_AP_READ;
+
+ if (pte & (1 << 4))
+ ref_prot |= PMD_SECT_TEX(1);
+
if (pte & L_PTE_RDONLY)
- ref_prot &= ~PMD_SECT_AP_WRITE;
+ ref_prot |= PMD_SECT_APX;
+
+ if (pte & L_PTE_SHARED)
+ ref_prot |= PMD_SECT_S;
- ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX |
- PTE_EXT_NG | (7 << 6))) << 6;
+ if (pte & PTE_EXT_NG)
+ ref_prot |= PMD_SECT_nG;
return ref_prot;
}
@@ -250,9 +270,10 @@ static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte,
static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd,
unsigned long *ext_prot)
{
- pgprot_t ref_prot = 0;
+ pgprot_t ref_prot;
- ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY;
+ *ext_prot = 0;
+ ref_prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY;
if (pmd & PMD_SECT_BUFFERABLE)
ref_prot |= L_PTE_MT_BUFFERABLE;
@@ -260,18 +281,23 @@ static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd,
if (pmd & PMD_SECT_CACHEABLE)
ref_prot |= L_PTE_MT_WRITETHROUGH;
- if (pmd & PMD_SECT_S)
- ref_prot |= L_PTE_SHARED;
-
if (pmd & PMD_SECT_XN)
ref_prot |= L_PTE_XN;
- if (pmd & PMD_SECT_AP_WRITE)
- ref_prot &= ~L_PTE_RDONLY;
+ if (pmd & PMD_SECT_AP_READ)
+ ref_prot |= L_PTE_USER;
+
+ if (pmd & PMD_SECT_TEX(1))
+ ref_prot |= (1 << 4);
- /* AP/APX/TEX bits */
- *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
- PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6;
+ if (pmd & PMD_SECT_APX)
+ ref_prot |= L_PTE_RDONLY;
+
+ if (pmd & PMD_SECT_S)
+ ref_prot |= L_PTE_SHARED;
+
+ if (pmd & PMD_SECT_nG)
+ ref_prot |= PTE_EXT_NG;
return ref_prot;
}
@@ -395,6 +421,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
if (numpages < cpa->numpages)
cpa->numpages = numpages;
+ old_pte = *kpte;
old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte),
&ext_prot);
@@ -476,6 +503,8 @@ static int split_large_page(pte_t *kpte, unsigned long address)
pgprot_t ref_prot = 0, ext_prot = 0;
int ret = 0;
+ BUG_ON((address & PMD_MASK) < __pa(_end));
+
pbase = pte_alloc_one_kernel(&init_mm, address);
if (!pbase)
return -ENOMEM;
@@ -502,6 +531,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot);
+ BUG_ON(ref_prot != pgprot_kernel);
/*
* Get the target pfn from the original entry:
*/
@@ -760,7 +790,7 @@ static inline int cache_attr(pgprot_t attr)
* We need to flush the cache for all memory type changes
* except when a page is being marked write back cacheable
*/
- return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK);
+ return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_KERNEL);
}
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
@@ -916,7 +946,7 @@ EXPORT_SYMBOL(set_memory_wc);
int set_memory_wb(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages,
- __pgprot(L_PTE_MT_WRITEBACK),
+ __pgprot(L_PTE_MT_KERNEL),
__pgprot(L_PTE_MT_MASK),
0, 0, NULL);
}
@@ -934,7 +964,7 @@ EXPORT_SYMBOL(set_memory_iwb);
int set_memory_array_wb(unsigned long *addr, int addrinarray)
{
return change_page_attr_set_clr(addr, addrinarray,
- __pgprot(L_PTE_MT_WRITEBACK),
+ __pgprot(L_PTE_MT_KERNEL),
__pgprot(L_PTE_MT_MASK),
0, CPA_ARRAY, NULL);
@@ -1017,7 +1047,7 @@ EXPORT_SYMBOL(set_pages_array_wc);
int set_pages_array_wb(struct page **pages, int addrinarray)
{
return _set_pages_array(pages, addrinarray,
- L_PTE_MT_WRITEBACK, L_PTE_MT_MASK);
+ L_PTE_MT_KERNEL, L_PTE_MT_MASK);
}
EXPORT_SYMBOL(set_pages_array_wb);