summaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c106
1 files changed, 83 insertions, 23 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index daf336fe0be8..b832135ec433 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -595,11 +595,25 @@ static void __init *early_alloc(unsigned long sz)
return early_alloc_aligned(sz, sz);
}
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd)
+{
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+ return pmd_page_vaddr(*pmd);
+}
+
+static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
+{
+ __pmd_populate(pmd, __pa(pte), prot);
+ BUG_ON(pmd_bad(*pmd));
+}
+
+static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
+ unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
- __pmd_populate(pmd, __pa(pte), prot);
+ pte_t *pte = early_pte_alloc(pmd);
+ early_pte_install(pmd, pte, prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
@@ -609,11 +623,17 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
- pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+ pte_t *start_pte = early_pte_alloc(pmd);
+ pte_t *pte = start_pte + pte_index(addr);
+
+ /* If replacing a section mapping, the whole section must be replaced */
+ BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
+ early_pte_install(pmd, start_pte, type->prot_l1);
}
static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
@@ -645,7 +665,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type,
+ bool force_pages)
{
pmd_t *pmd = pmd_offset(pud, addr);
unsigned long next;
@@ -662,7 +683,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
* aligned to a section boundary.
*/
if (type->prot_sect &&
- ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ ((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ !force_pages) {
__map_init_section(pmd, addr, next, phys, type);
} else {
alloc_init_pte(pmd, addr, next,
@@ -675,14 +697,15 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys, const struct mem_type *type)
+ unsigned long end, unsigned long phys, const struct mem_type *type,
+ bool force_pages)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type, force_pages);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -756,7 +779,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-static void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md, bool force_pages)
{
unsigned long addr, length, end;
phys_addr_t phys;
@@ -806,7 +829,7 @@ static void __init create_mapping(struct map_desc *md)
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type);
+ alloc_init_pud(pgd, addr, next, phys, type, force_pages);
phys += next - addr;
addr = next;
@@ -828,7 +851,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
- create_mapping(md);
+ create_mapping(md, false);
vm = &svm->vm;
vm->addr = (void *)(md->virtual & PAGE_MASK);
@@ -949,7 +972,7 @@ void __init debug_ll_io_init(void)
map.virtual &= PAGE_MASK;
map.length = PAGE_SIZE;
map.type = MT_DEVICE;
- create_mapping(&map);
+ create_mapping(&map, false);
}
#endif
@@ -994,6 +1017,28 @@ void __init sanity_check_meminfo(void)
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
+#ifdef CONFIG_SPARSEMEM
+ if (pfn_to_section_nr(bank_pfn_start(bank)) !=
+ pfn_to_section_nr(bank_pfn_end(bank) - 1)) {
+ phys_addr_t sz;
+ unsigned long start_pfn = bank_pfn_start(bank);
+ unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+ sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT);
+
+ if (meminfo.nr_banks >= NR_BANKS) {
+ pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n",
+ (unsigned long long)(bank->size - sz));
+ } else {
+ memmove(bank + 1, bank,
+ (meminfo.nr_banks - i) * sizeof(*bank));
+ meminfo.nr_banks++;
+ bank[1].size -= sz;
+ bank[1].start = __pfn_to_phys(end_pfn);
+ }
+ bank->size = sz;
+ }
+#endif
+
if (bank->start > ULONG_MAX)
highmem = 1;
@@ -1191,7 +1236,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
- create_mapping(&map);
+ create_mapping(&map, false);
#endif
/*
@@ -1202,14 +1247,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.virtual = FLUSH_BASE;
map.length = SZ_1M;
map.type = MT_CACHECLEAN;
- create_mapping(&map);
+ create_mapping(&map, false);
#endif
#ifdef FLUSH_BASE_MINICACHE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
map.virtual = FLUSH_BASE_MINICACHE;
map.length = SZ_1M;
map.type = MT_MINICLEAN;
- create_mapping(&map);
+ create_mapping(&map, false);
#endif
/*
@@ -1225,13 +1270,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
#else
map.type = MT_LOW_VECTORS;
#endif
- create_mapping(&map);
+ create_mapping(&map, false);
if (!vectors_high()) {
map.virtual = 0;
map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS;
- create_mapping(&map);
+ create_mapping(&map, false);
}
/* Now create a kernel read-only mapping */
@@ -1239,7 +1284,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.virtual = 0xffff0000 + PAGE_SIZE;
map.length = PAGE_SIZE;
map.type = MT_LOW_VECTORS;
- create_mapping(&map);
+ create_mapping(&map, false);
/*
* Ask the machine support to map in the statically mapped devices.
@@ -1264,20 +1309,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
static void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
- pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
+ pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
#endif
}
+
static void __init map_lowmem(void)
{
struct memblock_region *reg;
+ phys_addr_t start;
+ phys_addr_t end;
+ struct map_desc map;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
- struct map_desc map;
+ start = reg->base;
+ end = start + reg->size;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1289,8 +1337,20 @@ static void __init map_lowmem(void)
map.length = end - start;
map.type = MT_MEMORY;
- create_mapping(&map);
+ create_mapping(&map, false);
}
+
+#ifdef CONFIG_DEBUG_RODATA
+ start = __pa(_stext) & PMD_MASK;
+ end = ALIGN(__pa(__end_rodata), PMD_SIZE);
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
+
+ create_mapping(&map, true);
+#endif
}
/*