summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorVinod Rex <vrex@nvidia.com>2011-10-07 11:30:35 -0700
committerDan Willemsen <dwillemsen@nvidia.com>2011-11-30 21:49:18 -0800
commit3e6793f5b88bdbe33294bb50f7424c0a3af70b0a (patch)
tree472746a8ffb1d80d2ceb7376f578636d0b4ed2c2 /arch/arm/include
parent25ff8cf8fae7de0a7a47afbe54ea4e298b93e66d (diff)
arm: mm: change_page_attr support
bug 865816 Adapted from x86 change_page_attr() implementation Change-Id: I398c9d460b841484de4fcfcac10ffffdf49a4a5a Reviewed-on: http://git-master/r/56769 Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Tested-by: Krishna Reddy <vdumpa@nvidia.com> Rebase-Id: Rddeccf358c948ba84af52316f084814ae53dca5e
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/cacheflush.h49
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pgtable.h21
3 files changed, 72 insertions, 0 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c72682..c57641bf36e2 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -344,4 +344,53 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
flush_cache_all();
}
+/*
+ * The set_memory_* API can be used to change various attributes of a virtual
+ * address range. The attributes include:
+ * Cachability : UnCached, WriteCombining, WriteBack
+ * Executability : eXeutable, NoteXecutable
+ * Read/Write : ReadOnly, ReadWrite
+ * Presence : NotPresent
+ *
+ * Within a catagory, the attributes are mutually exclusive.
+ *
+ * The implementation of this API will take care of various aspects that
+ * are associated with changing such attributes, such as:
+ * - Flushing TLBs
+ * - Flushing CPU caches
+ * - Making sure aliases of the memory behind the mapping don't violate
+ * coherency rules as defined by the CPU in the system.
+ *
+ * What this API does not do:
+ * - Provide exclusion between various callers - including callers that
+ * operation on other mappings of the same physical page
+ * - Restore default attributes when a page is freed
+ * - Guarantee that mappings other than the requested one are
+ * in any state, other than that these do not violate rules for
+ * the CPU you have. Do not depend on any effects on other mappings,
+ * CPUs other than the one you have may have more relaxed rules.
+ * The caller is required to take care of these.
+ */
+
+int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wc(unsigned long addr, int numpages);
+int set_memory_wb(unsigned long addr, int numpages);
+int set_memory_iwb(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
+int set_memory_4k(unsigned long addr, int numpages);
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
+int set_memory_array_wb(unsigned long *addr, int addrinarray);
+int set_memory_array_iwb(unsigned long *addr, int addrinarray);
+
+int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
+int set_pages_array_wb(struct page **pages, int addrinarray);
+int set_pages_array_iwb(struct page **pages, int addrinarray);
+
#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ac75d0848889..c906a2534c88 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -201,6 +201,8 @@ typedef struct page *pgtable_t;
extern int pfn_valid(unsigned long);
#endif
+extern phys_addr_t lowmem_limit;
+
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 6e68977529c0..e6d609c2cb9b 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -329,6 +329,24 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
clean_pmd_entry(pmdp); \
} while (0)
+extern spinlock_t pgd_lock;
+extern struct list_head pgd_list;
+
+pte_t *lookup_address(unsigned long address, unsigned int *level);
+enum {
+ PG_LEVEL_NONE,
+ PG_LEVEL_4K,
+ PG_LEVEL_2M,
+ PG_LEVEL_NUM
+};
+
+#ifdef CONFIG_PROC_FS
+extern void update_page_count(int level, unsigned long pages);
+#else
+static inline void update_page_count(int level, unsigned long pages) { }
+#endif
+
+
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
return __va(pmd_val(pmd) & PAGE_MASK);
@@ -358,6 +376,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
+#define pmd_pfn(pmd) ((pmd_val(pmd) & SECTION_MASK) >> PAGE_SHIFT)
+#define pte_pgprot(pte) ((pgprot_t)(pte_val(pte) & ~PAGE_MASK))
+
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)