summaryrefslogtreecommitdiff
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2011-02-28 16:37:34 -0500
committerChris Metcalf <cmetcalf@tilera.com>2011-03-10 13:17:53 -0500
commit76c567fbba50c3da2f4d40e2e551bab26cfd4381 (patch)
tree6e3c92a266d0ec255e1930adf5ba5268cd71dee9 /arch/tile/kernel
parent09c17eab075ceeafb53935d858c575b6776394d1 (diff)
arch/tile: support 4KB page size as well as 64KB
The Tilera architecture traditionally supports 64KB page sizes to improve TLB utilization and improve performance when the hardware is being used primarily to run a single application. For more generic server scenarios, it can be beneficial to run with 4KB page sizes, so this commit allows that to be specified (by modifying the arch/tile/include/hv/pagesize.h header). As part of this change, we also re-worked the PTE management slightly so that PTE writes all go through a __set_pte() function where we can do some additional validation. The set_pte_order() function was eliminated since the "order" argument wasn't being used. One bug uncovered was in the PCI DMA code, which wasn't properly flushing the specified range. This was benign with 64KB pages, but with 4KB pages we were getting some larger flushes wrong. The per-cpu memory reservation code also needed updating to conform with the newer percpu stuff; before it always chose 64KB, and that was always correct, but with 4KB granularity we now have to pay closer attention and reserve the amount of memory that will be requested when the percpu code starts allocating. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/intvec_32.S16
-rw-r--r--arch/tile/kernel/machine_kexec.c7
-rw-r--r--arch/tile/kernel/pci-dma.c38
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/setup.c20
5 files changed, 50 insertions, 33 deletions
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index eabf1ef02cb2..fffcfa6b3a62 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1556,7 +1556,10 @@ STD_ENTRY(_sys_clone)
.align 64
/* Align much later jump on the start of a cache line. */
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
- nop; nop
+ nop
+#if PAGE_SIZE >= 0x10000
+ nop
+#endif
#endif
ENTRY(sys_cmpxchg)
@@ -1587,6 +1590,10 @@ ENTRY(sys_cmpxchg)
* NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
*/
+#if (PAGE_OFFSET & 0xffff) != 0
+# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
+#endif
+
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
{
/* Check for unaligned input. */
@@ -1679,11 +1686,14 @@ ENTRY(sys_cmpxchg)
lw r26, r0
}
{
- /* atomic_locks is page aligned so this suffices to get its addr. */
- auli r21, zero, hi16(atomic_locks)
+ auli r21, zero, ha16(atomic_locks)
bbns r23, .Lcmpxchg_badaddr
}
+#if PAGE_SIZE < 0x10000
+ /* atomic_locks is page-aligned so for big pages we don't need this. */
+ addli r21, r21, lo16(atomic_locks)
+#endif
{
/*
* Insert the hash bits into the page-aligned pointer.
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 0d8b9e933487..e00d7179989e 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -240,8 +240,11 @@ static void setup_quasi_va_is_pa(void)
pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
- for (i = 0; i < pgd_index(PAGE_OFFSET); i++)
- pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte);
+ for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
+ unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
+ if (pfn_valid(pfn))
+ __set_pte(&pgtable[i], pfn_pte(pfn, pte));
+ }
}
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 5ad5e13b0fa6..658752b2835e 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -86,6 +86,21 @@ EXPORT_SYMBOL(dma_free_coherent);
* can count on nothing having been touched.
*/
+/* Flush a PA range from cache page by page. */
+static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size)
+{
+ struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
+ size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1));
+
+ while ((ssize_t)size > 0) {
+ /* Flush the page. */
+ homecache_flush_cache(page++, 0);
+
+ /* Figure out if we need to continue on the next page. */
+ size -= bytesleft;
+ bytesleft = PAGE_SIZE;
+ }
+}
/*
* dma_map_single can be passed any memory address, and there appear
@@ -97,26 +112,12 @@ EXPORT_SYMBOL(dma_free_coherent);
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
- struct page *page;
- dma_addr_t dma_addr;
- int thispage;
+ dma_addr_t dma_addr = __pa(ptr);
BUG_ON(!valid_dma_direction(direction));
WARN_ON(size == 0);
- dma_addr = __pa(ptr);
-
- /* We might have been handed a buffer that wraps a page boundary */
- while ((int)size > 0) {
- /* The amount to flush that's on this page */
- thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1));
- thispage = min((int)thispage, (int)size);
- /* Is this valid for any page we could be handed? */
- page = pfn_to_page(kaddr_to_pfn(ptr));
- homecache_flush_cache(page, 0);
- ptr += thispage;
- size -= thispage;
- }
+ __dma_map_pa_range(dma_addr, size);
return dma_addr;
}
@@ -140,10 +141,8 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
WARN_ON(nents == 0 || sglist->length == 0);
for_each_sg(sglist, sg, nents, i) {
- struct page *page;
sg->dma_address = sg_phys(sg);
- page = pfn_to_page(sg->dma_address >> PAGE_SHIFT);
- homecache_flush_cache(page, 0);
+ __dma_map_pa_range(sg->dma_address, sg->length);
}
return nents;
@@ -163,6 +162,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
{
BUG_ON(!valid_dma_direction(direction));
+ BUG_ON(offset + size > PAGE_SIZE);
homecache_flush_cache(page, 0);
return page_to_pa(page) + offset;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 5db8b5b63cea..b9cd962e1d30 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -165,7 +165,7 @@ void free_thread_info(struct thread_info *info)
kfree(step_state);
}
- free_page((unsigned long)info);
+ free_pages((unsigned long)info, THREAD_SIZE_ORDER);
}
static void save_arch_state(struct thread_struct *t);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index f18573643ed1..3696b1832566 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -59,6 +59,8 @@ unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
unsigned long __initdata node_free_pfn[MAX_NUMNODES];
+static unsigned long __initdata node_percpu[MAX_NUMNODES];
+
#ifdef CONFIG_HIGHMEM
/* Page frame index of end of lowmem on each controller. */
unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
@@ -554,7 +556,6 @@ static void __init setup_bootmem_allocator(void)
reserve_bootmem(crashk_res.start,
crashk_res.end - crashk_res.start + 1, 0);
#endif
-
}
void *__init alloc_remap(int nid, unsigned long size)
@@ -568,11 +569,13 @@ void *__init alloc_remap(int nid, unsigned long size)
static int __init percpu_size(void)
{
- int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
-#ifdef CONFIG_MODULES
- if (size < PERCPU_ENOUGH_ROOM)
- size = PERCPU_ENOUGH_ROOM;
-#endif
+ int size = __per_cpu_end - __per_cpu_start;
+ size += PERCPU_MODULE_RESERVE;
+ size += PERCPU_DYNAMIC_EARLY_SIZE;
+ if (size < PCPU_MIN_UNIT_SIZE)
+ size = PCPU_MIN_UNIT_SIZE;
+ size = roundup(size, PAGE_SIZE);
+
/* In several places we assume the per-cpu data fits on a huge page. */
BUG_ON(kdata_huge && size > HPAGE_SIZE);
return size;
@@ -589,7 +592,6 @@ static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
static void __init zone_sizes_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0 };
- unsigned long node_percpu[MAX_NUMNODES] = { 0 };
int size = percpu_size();
int num_cpus = smp_height * smp_width;
int i;
@@ -674,7 +676,7 @@ static void __init zone_sizes_init(void)
NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
free_area_init_node(i, zones_size, start, NULL);
- printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n",
+ printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
PFN_UP(node_percpu[i]));
/* Track the type of memory on each node */
@@ -1312,6 +1314,8 @@ static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
BUG_ON(size % PAGE_SIZE != 0);
pfn_offset[nid] += size / PAGE_SIZE;
+ BUG_ON(node_percpu[nid] < size);
+ node_percpu[nid] -= size;
if (percpu_pfn[cpu] == 0)
percpu_pfn[cpu] = pfn;
return pfn_to_kaddr(pfn);