summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 643dcac40fcb..d135f93cb0f6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -34,7 +34,7 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/lockdep.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca)
* the CPU that ignores the top 2 bits of the address in real
* mode so we can access kernel globals normally provided we
* only toy with things in the RMO region. From here, we do
- * some early parsing of the device-tree to setup out LMB
+ * some early parsing of the device-tree to setup out MEMBLOCK
* data structures, and allocate & initialize the hash table
* and segment tables so we can start running with translation
* enabled.
@@ -404,7 +404,7 @@ void __init setup_system(void)
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
- printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size());
+ printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
if (ppc64_caches.dline_size != 0x80)
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
@@ -443,10 +443,10 @@ static void __init irqstack_early_init(void)
*/
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc_base(THREAD_SIZE,
+ __va(memblock_alloc_base(THREAD_SIZE,
THREAD_SIZE, limit));
hardirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc_base(THREAD_SIZE,
+ __va(memblock_alloc_base(THREAD_SIZE,
THREAD_SIZE, limit));
}
}
@@ -458,11 +458,11 @@ static void __init exc_lvl_early_init(void)
for_each_possible_cpu(i) {
critirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
dbgirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
mcheckirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
}
}
#else
@@ -487,11 +487,11 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(slb0_limit(), lmb.rmo_size);
+ limit = min(slb0_limit(), memblock.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;
- sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+ sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
sp += THREAD_SIZE;
paca[i].emergency_sp = __va(sp);
}