summaryrefslogtreecommitdiff
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-04 08:10:59 +0900
committerTejun Heo <tj@kernel.org>2009-07-04 08:10:59 +0900
commitc8a51be4cabb7009db5f865169389242d49c4c60 (patch)
treec049dc4811a36fefa4108444a950dbf70623a420 /mm/percpu.c
parent38a6be525460f52ac6f2de1c3f73c5615a8853cd (diff)
percpu: reorder a few functions in mm/percpu.c
(de)populate functions are about to be reimplemented to drop pcpu_chunk->page array. Move a few functions so that the rewrite patch doesn't have code movement making it more difficult to read. [ Impact: code movement ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 21d938a10662..639fce4d2caf 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -181,12 +181,6 @@ static int pcpu_page_idx(unsigned int cpu, int page_idx)
return cpu * pcpu_unit_pages + page_idx;
}
-static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
- unsigned int cpu, int page_idx)
-{
- return &chunk->page[pcpu_page_idx(cpu, page_idx)];
-}
-
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
{
@@ -194,6 +188,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
(pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
}
+static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
+ unsigned int cpu, int page_idx)
+{
+ return &chunk->page[pcpu_page_idx(cpu, page_idx)];
+}
+
static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
int page_idx)
{
@@ -583,6 +583,45 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
pcpu_chunk_addr(chunk, last, page_end));
}
+static int __pcpu_map_pages(unsigned long addr, struct page **pages,
+ int nr_pages)
+{
+ return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
+ PAGE_KERNEL, pages);
+}
+
+/**
+ * pcpu_map - map pages into a pcpu_chunk
+ * @chunk: chunk of interest
+ * @page_start: page index of the first page to map
+ * @page_end: page index of the last page to map + 1
+ *
+ * For each cpu, map pages [@page_start,@page_end) into @chunk.
+ * vcache is flushed afterwards.
+ */
+static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
+{
+ unsigned int last = num_possible_cpus() - 1;
+ unsigned int cpu;
+ int err;
+
+ /* map must not be done on immutable chunk */
+ WARN_ON(chunk->immutable);
+
+ for_each_possible_cpu(cpu) {
+ err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
+ pcpu_chunk_pagep(chunk, cpu, page_start),
+ page_end - page_start);
+ if (err < 0)
+ return err;
+ }
+
+ /* flush at once, please read comments in pcpu_unmap() */
+ flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
+ return 0;
+}
+
/**
* pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
* @chunk: chunk to depopulate
@@ -632,45 +671,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
pcpu_unmap(chunk, unmap_start, unmap_end, flush);
}
-static int __pcpu_map_pages(unsigned long addr, struct page **pages,
- int nr_pages)
-{
- return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
- PAGE_KERNEL, pages);
-}
-
-/**
- * pcpu_map - map pages into a pcpu_chunk
- * @chunk: chunk of interest
- * @page_start: page index of the first page to map
- * @page_end: page index of the last page to map + 1
- *
- * For each cpu, map pages [@page_start,@page_end) into @chunk.
- * vcache is flushed afterwards.
- */
-static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
-{
- unsigned int last = num_possible_cpus() - 1;
- unsigned int cpu;
- int err;
-
- /* map must not be done on immutable chunk */
- WARN_ON(chunk->immutable);
-
- for_each_possible_cpu(cpu) {
- err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
- pcpu_chunk_pagep(chunk, cpu, page_start),
- page_end - page_start);
- if (err < 0)
- return err;
- }
-
- /* flush at once, please read comments in pcpu_unmap() */
- flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
- return 0;
-}
-
/**
* pcpu_populate_chunk - populate and map an area of a pcpu_chunk
* @chunk: chunk of interest