summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2013-09-11 14:22:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 15:57:51 -0700
commite76b63f80d938a1319eb5fb0ae7ea69bddfbae38 (patch)
tree4480ea31ebd4cbae35fcf7fa75c834ab06e39ffd /mm/page_alloc.c
parent0bf598d863e3c741d47e3178d645f04c9d6c186c (diff)
memblock, numa: binary search node id
Current early_pfn_to_nid() on arch that support memblock go over memblock.memory one by one, so will take too many try near the end. We can use existing memblock_search to find the node id for given pfn, that could save some time on bigger system that have many entries memblock.memory array. Here are the timing differences for several machines. In each case with the patch less time was spent in __early_pfn_to_nid(). 3.11-rc5 with patch difference (%) -------- ---------- -------------- UV1: 256 nodes 9TB: 411.66 402.47 -9.19 (2.23%) UV2: 255 nodes 16TB: 1141.02 1138.12 -2.90 (0.25%) UV2: 64 nodes 2TB: 128.15 126.53 -1.62 (1.26%) UV2: 32 nodes 2TB: 121.87 121.07 -0.80 (0.66%) Time in seconds. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Tejun Heo <tj@kernel.org> Acked-by: Russ Anderson <rja@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f7cc08dad26a..22653e34a047 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4306,7 +4306,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
- int i, nid;
+ int nid;
/*
* NOTE: The following SMP-unsafe globals are only used early in boot
* when the kernel is running single-threaded.
@@ -4317,15 +4317,14 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
if (last_start_pfn <= pfn && pfn < last_end_pfn)
return last_nid;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
- if (start_pfn <= pfn && pfn < end_pfn) {
- last_start_pfn = start_pfn;
- last_end_pfn = end_pfn;
- last_nid = nid;
- return nid;
- }
- /* This is a memory hole */
- return -1;
+ nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
+ if (nid != -1) {
+ last_start_pfn = start_pfn;
+ last_end_pfn = end_pfn;
+ last_nid = nid;
+ }
+
+ return nid;
}
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */