summaryrefslogtreecommitdiff
path: root/arch/tile/mm
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-06-25 17:04:17 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-07-06 13:41:51 -0400
commit0707ad30d10110aebc01a5a64fb63f4b32d20b73 (patch)
tree64d8ba73e605ac26e56808d1d77701b3f83cf8b2 /arch/tile/mm
parentc78095bd8c77fca2619769ff8efb639fd100e373 (diff)
arch/tile: Miscellaneous cleanup changes.
This commit is primarily changes caused by reviewing "sparse" and "checkpatch" output on our sources, so is somewhat noisy, since things like "printk() -> pr_err()" (or whatever) throughout the codebase tend to get tedious to read. Rather than trying to tease apart precisely which things changed due to which type of code review, this commit includes various cleanups in the code: - sparse: Add declarations in headers for globals. - sparse: Fix __user annotations. - sparse: Using gfp_t consistently instead of int. - sparse: removing functions not actually used. - checkpatch: Clean up printk() warnings by using pr_info(), etc.; also avoid partial-line printks except in bootup code. - checkpatch: Use exposed structs rather than typedefs. - checkpatch: Change some C99 comments to C89 comments. In addition, a couple of minor other changes are rolled in to this commit: - Add support for a "raise" instruction to cause SIGFPE, etc., to be raised. - Remove some compat code that is unnecessary when we fully eliminate some of the deprecated syscalls from the generic syscall ABI. - Update the tile_defconfig to reflect current config contents. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/tile/mm')
-rw-r--r--arch/tile/mm/elf.c4
-rw-r--r--arch/tile/mm/fault.c64
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/homecache.c18
-rw-r--r--arch/tile/mm/hugetlbpage.c2
-rw-r--r--arch/tile/mm/init.c99
-rw-r--r--arch/tile/mm/pgtable.c46
7 files changed, 76 insertions, 159 deletions
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c
index 818c9bef060c..55e58e93bfc5 100644
--- a/arch/tile/mm/elf.c
+++ b/arch/tile/mm/elf.c
@@ -20,6 +20,7 @@
#include <linux/elf.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/sections.h>
/* Notify a running simulator, if any, that an exec just occurred. */
static void sim_notify_exec(const char *binary_name)
@@ -77,9 +78,8 @@ static void *vdso_page;
/* One-entry array used for install_special_mapping. */
static struct page *vdso_pages[1];
-int __init vdso_setup(void)
+static int __init vdso_setup(void)
{
- extern char __rt_sigreturn[], __rt_sigreturn_end[];
vdso_page = (void *)get_zeroed_page(GFP_ATOMIC);
memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn);
vdso_pages[0] = virt_to_page(vdso_page);
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 9b6b92f07def..0011f06b4fe2 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -39,32 +39,11 @@
#include <asm/system.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
+#include <asm/traps.h>
+#include <asm/syscalls.h>
#include <arch/interrupts.h>
-/*
- * Unlock any spinlocks which will prevent us from getting the
- * message out
- */
-void bust_spinlocks(int yes)
-{
- int loglevel_save = console_loglevel;
-
- if (yes) {
- oops_in_progress = 1;
- return;
- }
- oops_in_progress = 0;
- /*
- * OK, the message is on the console. Now we call printk()
- * without oops_in_progress set so that printk will give klogd
- * a poke. Hold onto your hats...
- */
- console_loglevel = 15; /* NMI oopser may have shut the console up */
- printk(" ");
- console_loglevel = loglevel_save;
-}
-
static noinline void force_sig_info_fault(int si_signo, int si_code,
unsigned long address, int fault_num, struct task_struct *tsk)
{
@@ -301,10 +280,10 @@ static int handle_page_fault(struct pt_regs *regs,
*/
stack_offset = stack_pointer & (THREAD_SIZE-1);
if (stack_offset < THREAD_SIZE / 8) {
- printk(KERN_ALERT "Potential stack overrun: sp %#lx\n",
+ pr_alert("Potential stack overrun: sp %#lx\n",
stack_pointer);
show_regs(regs);
- printk(KERN_ALERT "Killing current process %d/%s\n",
+ pr_alert("Killing current process %d/%s\n",
tsk->pid, tsk->comm);
do_group_exit(SIGKILL);
}
@@ -422,7 +401,7 @@ good_area:
} else if (write) {
#ifdef TEST_VERIFY_AREA
if (!is_page_fault && regs->cs == KERNEL_CS)
- printk("WP fault at "REGFMT"\n", regs->eip);
+ pr_err("WP fault at "REGFMT"\n", regs->eip);
#endif
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
@@ -450,6 +429,7 @@ good_area:
else
tsk->min_flt++;
+#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
/*
* If this was an asynchronous fault,
* restart the appropriate engine.
@@ -472,6 +452,7 @@ good_area:
break;
#endif
}
+#endif
up_read(&mm->mmap_sem);
return 1;
@@ -514,17 +495,17 @@ no_context:
pte_t *pte = lookup_address(address);
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
- printk(KERN_CRIT "kernel tried to execute"
+ pr_crit("kernel tried to execute"
" non-executable page - exploit attempt?"
" (uid: %d)\n", current->uid);
}
#endif
if (address < PAGE_SIZE)
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference\n");
+ pr_alert("Unable to handle kernel NULL pointer dereference\n");
else
- printk(KERN_ALERT "Unable to handle kernel paging request\n");
- printk(" at virtual address "REGFMT", pc "REGFMT"\n",
- address, regs->pc);
+ pr_alert("Unable to handle kernel paging request\n");
+ pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
+ address, regs->pc);
show_regs(regs);
@@ -555,7 +536,7 @@ out_of_memory:
down_read(&mm->mmap_sem);
goto survive;
}
- printk("VM: killing process %s\n", tsk->comm);
+ pr_alert("VM: killing process %s\n", tsk->comm);
if (!is_kernel_mode)
do_group_exit(SIGKILL);
goto no_context;
@@ -573,31 +554,12 @@ do_sigbus:
#ifndef __tilegx__
-extern char sys_cmpxchg[], __sys_cmpxchg_end[];
-extern char __sys_cmpxchg_grab_lock[];
-extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
-
-/*
- * We return this structure in registers to avoid having to write
- * additional save/restore code in the intvec.S caller.
- */
-struct intvec_state {
- void *handler;
- unsigned long vecnum;
- unsigned long fault_num;
- unsigned long info;
- unsigned long retval;
-};
-
/* We must release ICS before panicking or we won't get anywhere. */
#define ics_panic(fmt, ...) do { \
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
panic(fmt, __VA_ARGS__); \
} while (0)
-void do_page_fault(struct pt_regs *regs, int fault_num,
- unsigned long address, unsigned long write);
-
/*
* When we take an ITLB or DTLB fault or access violation in the
* supervisor while the critical section bit is set, the hypervisor is
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 1fcecc5b9e03..ff1cdff5114d 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -121,7 +121,7 @@ static struct list_head amp_list = LIST_HEAD_INIT(amp_list);
struct kmap_amps {
struct atomic_mapped_page per_type[KM_TYPE_NR];
};
-DEFINE_PER_CPU(struct kmap_amps, amps);
+static DEFINE_PER_CPU(struct kmap_amps, amps);
/*
* Add a page and va, on this cpu, to the list of kmap_atomic pages,
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 52feb77133ce..97c478e7be27 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -46,7 +46,7 @@
* locally from a remote home. There's no point in using it if we
* don't have coherent local caching, though.
*/
-int __write_once noallocl2;
+static int __write_once noallocl2;
static int __init set_noallocl2(char *str)
{
noallocl2 = 1;
@@ -60,15 +60,11 @@ early_param("noallocl2", set_noallocl2);
#endif
-
-
/* Provide no-op versions of these routines to keep flush_remote() cleaner. */
#define mark_caches_evicted_start() 0
#define mark_caches_evicted_finish(mask, timestamp) do {} while (0)
-
-
/*
* Update the irq_stat for cpus that we are going to interrupt
* with TLB or cache flushes. Also handle removing dataplane cpus
@@ -171,20 +167,12 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
- printk("hv_flush_remote(%#llx, %#lx, %p [%s],"
+ pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
" %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
cache_pa, cache_control, cache_cpumask, cache_buf,
(unsigned long)tlb_va, tlb_length, tlb_pgsize,
tlb_cpumask, tlb_buf,
asids, asidcount, rc);
- if (asidcount > 0) {
- int i;
- printk(" asids:");
- for (i = 0; i < asidcount; ++i)
- printk(" %d,%d,%d",
- asids[i].x, asids[i].y, asids[i].asid);
- printk("\n");
- }
panic("Unsafe to continue.");
}
@@ -293,7 +281,7 @@ pte_t pte_set_home(pte_t pte, int home)
*/
if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
pte = hv_pte_clear_nc(pte);
- printk("non-immutable page incoherently referenced: %#llx\n",
+ pr_err("non-immutable page incoherently referenced: %#llx\n",
pte.val);
}
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index c38570f8f0d0..24688b697a8d 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -332,7 +332,7 @@ static __init int setup_hugepagesz(char *opt)
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
- printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
+ pr_err("hugepagesz: Unsupported page size %lu M\n",
ps >> 20);
return 0;
}
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 125ac53b60fc..d89c9eacd162 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -67,7 +67,9 @@
#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
+#ifndef __tilegx__
unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
+#endif
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -282,9 +284,9 @@ static pgprot_t __init init_pgprot(ulong address)
/*
* Everything else that isn't data or bss is heap, so mark it
* with the initial heap home (hash-for-home, or this cpu). This
- * includes any addresses after the loaded image; any address before
- * _einittext (since we already captured the case of text before
- * _sinittext); and any init-data pages.
+ * includes any addresses after the loaded image and any address before
+ * _einitdata, since we already captured the case of text before
+ * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
*
* All the LOWMEM pages that we mark this way will get their
* struct page homecache properly marked later, in set_page_homes().
@@ -292,9 +294,7 @@ static pgprot_t __init init_pgprot(ulong address)
* homes, but with a zero free_time we don't have to actually
* do a flush action the first time we use them, either.
*/
- if (address >= (ulong) _end || address < (ulong) _sdata ||
- (address >= (ulong) _sinitdata &&
- address < (ulong) _einitdata))
+ if (address >= (ulong) _end || address < (ulong) _einitdata)
return construct_pgprot(PAGE_KERNEL, initial_heap_home());
#if CHIP_HAS_CBOX_HOME_MAP()
@@ -304,35 +304,38 @@ static pgprot_t __init init_pgprot(ulong address)
#endif
/*
+ * Make the w1data homed like heap to start with, to avoid
+ * making it part of the page-striped data area when we're just
+ * going to convert it to read-only soon anyway.
+ */
+ if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
+ return construct_pgprot(PAGE_KERNEL, initial_heap_home());
+
+ /*
* Otherwise we just hand out consecutive cpus. To avoid
* requiring this function to hold state, we just walk forward from
* _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
* the requested address, while walking cpu home around kdata_mask.
* This is typically no more than a dozen or so iterations.
*/
- BUG_ON(_einitdata != __bss_start);
- for (page = (ulong)_sdata, cpu = NR_CPUS; ; ) {
- cpu = cpumask_next(cpu, &kdata_mask);
- if (cpu == NR_CPUS)
- cpu = cpumask_first(&kdata_mask);
- if (page >= address)
- break;
- page += PAGE_SIZE;
- if (page == (ulong)__start_rodata)
- page = (ulong)__end_rodata;
- if (page == (ulong)&init_thread_union)
- page += THREAD_SIZE;
- if (page == (ulong)_sinitdata)
- page = (ulong)_einitdata;
+ page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK;
+ BUG_ON(address < page || address >= (ulong)_end);
+ cpu = cpumask_first(&kdata_mask);
+ for (; page < address; page += PAGE_SIZE) {
+ if (page >= (ulong)&init_thread_union &&
+ page < (ulong)&init_thread_union + THREAD_SIZE)
+ continue;
if (page == (ulong)empty_zero_page)
- page += PAGE_SIZE;
+ continue;
#ifndef __tilegx__
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
if (page == (ulong)atomic_locks)
- page += PAGE_SIZE;
+ continue;
#endif
#endif
-
+ cpu = cpumask_next(cpu, &kdata_mask);
+ if (cpu == NR_CPUS)
+ cpu = cpumask_first(&kdata_mask);
}
return construct_pgprot(PAGE_KERNEL, cpu);
}
@@ -362,7 +365,7 @@ static int __init setup_ktext(char *str)
/* If you have a leading "nocache", turn off ktext caching */
if (strncmp(str, "nocache", 7) == 0) {
ktext_nocache = 1;
- printk("ktext: disabling local caching of kernel text\n");
+ pr_info("ktext: disabling local caching of kernel text\n");
str += 7;
if (*str == ',')
++str;
@@ -374,20 +377,20 @@ static int __init setup_ktext(char *str)
/* Default setting on Tile64: use a huge page */
if (strcmp(str, "huge") == 0)
- printk("ktext: using one huge locally cached page\n");
+ pr_info("ktext: using one huge locally cached page\n");
/* Pay TLB cost but get no cache benefit: cache small pages locally */
else if (strcmp(str, "local") == 0) {
ktext_small = 1;
ktext_local = 1;
- printk("ktext: using small pages with local caching\n");
+ pr_info("ktext: using small pages with local caching\n");
}
/* Neighborhood cache ktext pages on all cpus. */
else if (strcmp(str, "all") == 0) {
ktext_small = 1;
ktext_all = 1;
- printk("ktext: using maximal caching neighborhood\n");
+ pr_info("ktext: using maximal caching neighborhood\n");
}
@@ -397,10 +400,10 @@ static int __init setup_ktext(char *str)
cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
if (cpumask_weight(&ktext_mask) > 1) {
ktext_small = 1;
- printk("ktext: using caching neighborhood %s "
+ pr_info("ktext: using caching neighborhood %s "
"with small pages\n", buf);
} else {
- printk("ktext: caching on cpu %s with one huge page\n",
+ pr_info("ktext: caching on cpu %s with one huge page\n",
buf);
}
}
@@ -470,19 +473,19 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
#if CHIP_HAS_CBOX_HOME_MAP()
if (ktext_arg_seen && ktext_hash) {
- printk("warning: \"ktext\" boot argument ignored"
- " if \"kcache_hash\" sets up text hash-for-home\n");
+ pr_warning("warning: \"ktext\" boot argument ignored"
+ " if \"kcache_hash\" sets up text hash-for-home\n");
ktext_small = 0;
}
if (kdata_arg_seen && kdata_hash) {
- printk("warning: \"kdata\" boot argument ignored"
- " if \"kcache_hash\" sets up data hash-for-home\n");
+ pr_warning("warning: \"kdata\" boot argument ignored"
+ " if \"kcache_hash\" sets up data hash-for-home\n");
}
if (kdata_huge && !hash_default) {
- printk("warning: disabling \"kdata=huge\"; requires"
- " kcache_hash=all or =allbutstack\n");
+ pr_warning("warning: disabling \"kdata=huge\"; requires"
+ " kcache_hash=all or =allbutstack\n");
kdata_huge = 0;
}
#endif
@@ -556,11 +559,11 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
if (!cpumask_empty(&bad)) {
char buf[NR_CPUS * 5];
cpulist_scnprintf(buf, sizeof(buf), &bad);
- printk("ktext: not using unavailable cpus %s\n", buf);
+ pr_info("ktext: not using unavailable cpus %s\n", buf);
}
if (cpumask_empty(&ktext_mask)) {
- printk("ktext: no valid cpus; caching on %d.\n",
- smp_processor_id());
+ pr_warning("ktext: no valid cpus; caching on %d.\n",
+ smp_processor_id());
cpumask_copy(&ktext_mask,
cpumask_of(smp_processor_id()));
}
@@ -737,17 +740,18 @@ static void __init set_non_bootmem_pages_init(void)
for_each_zone(z) {
unsigned long start, end;
int nid = z->zone_pgdat->node_id;
+ int idx = zone_idx(z);
start = z->zone_start_pfn;
if (start == 0)
continue; /* bootmem */
end = start + z->spanned_pages;
- if (zone_idx(z) == ZONE_NORMAL) {
+ if (idx == ZONE_NORMAL) {
BUG_ON(start != node_start_pfn[nid]);
start = node_free_pfn[nid];
}
#ifdef CONFIG_HIGHMEM
- if (zone_idx(z) == ZONE_HIGHMEM)
+ if (idx == ZONE_HIGHMEM)
totalhigh_pages += z->spanned_pages;
#endif
if (kdata_huge) {
@@ -841,9 +845,9 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
/* check that fixmap and pkmap do not overlap */
if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
- printk(KERN_ERR "fixmap and kmap areas overlap"
+ pr_err("fixmap and kmap areas overlap"
" - this will crash\n");
- printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
+ pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
FIXADDR_START);
BUG();
@@ -863,7 +867,7 @@ void __init mem_init(void)
initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext;
initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata;
- printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
+ pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
@@ -968,7 +972,6 @@ static void mark_w1data_ro(void)
BUG_ON((addr & (PAGE_SIZE-1)) != 0);
for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
unsigned long pfn = kaddr_to_pfn((void *)addr);
- struct page *page = pfn_to_page(pfn);
pte_t *ptep = virt_to_pte(NULL, addr);
BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */
set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
@@ -986,7 +989,7 @@ static long __write_once initfree = 1;
static int __init set_initfree(char *str)
{
strict_strtol(str, 0, &initfree);
- printk("initfree: %s free init pages\n", initfree ? "will" : "won't");
+ pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
return 1;
}
__setup("initfree=", set_initfree);
@@ -996,8 +999,8 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
unsigned long addr = (unsigned long) begin;
if (kdata_huge && !initfree) {
- printk("Warning: ignoring initfree=0:"
- " incompatible with kdata=huge\n");
+ pr_warning("Warning: ignoring initfree=0:"
+ " incompatible with kdata=huge\n");
initfree = 1;
}
end = (end + PAGE_SIZE - 1) & PAGE_MASK;
@@ -1033,7 +1036,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
free_page(addr);
totalram_pages++;
}
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
void free_initmem(void)
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 289e729bbd76..28c23140c947 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -46,7 +46,7 @@ void show_mem(void)
{
struct zone *zone;
- printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
+ pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
" free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
" pagecache:%lu swap:%lu\n",
(global_page_state(NR_ACTIVE_ANON) +
@@ -71,7 +71,6 @@ void show_mem(void)
if (!populated_zone(zone))
continue;
- printk("Node %d %7s: ", zone_to_nid(zone), zone->name);
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
int nr = zone->free_area[order].nr_free;
@@ -80,7 +79,8 @@ void show_mem(void)
largest_order = order;
}
spin_unlock_irqrestore(&zone->lock, flags);
- printk("%lukB (largest %luKb)\n",
+ pr_err("Node %d %7s: %lukB (largest %luKb)\n",
+ zone_to_nid(zone), zone->name,
K(total), largest_order ? K(1UL) << largest_order : 0);
}
}
@@ -123,42 +123,6 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
}
-/*
- * Associate a huge virtual page frame with a given physical page frame
- * and protection flags for that frame. pfn is for the base of the page,
- * vaddr is what the page gets mapped to - both must be properly aligned.
- * The pmd must already be instantiated.
- */
-void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
- printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
- return; /* BUG(); */
- }
- if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
- printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
- return; /* BUG(); */
- }
- pgd = swapper_pg_dir + pgd_index(vaddr);
- if (pgd_none(*pgd)) {
- printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
- return; /* BUG(); */
- }
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(pfn), flags));
- /*
- * It's enough to flush this one mapping.
- * We flush both small and huge TSBs to be sure.
- */
- local_flush_tlb_page(NULL, vaddr, HPAGE_SIZE);
- local_flush_tlb_pages(NULL, vaddr, PAGE_SIZE, HPAGE_SIZE);
-}
-
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
{
unsigned long address = __fix_to_virt(idx);
@@ -257,7 +221,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- int flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP;
+ gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP;
struct page *p;
#ifdef CONFIG_HIGHPTE
@@ -550,7 +514,7 @@ void iounmap(volatile void __iomem *addr_in)
read_unlock(&vmlist_lock);
if (!p) {
- printk("iounmap: bad address %p\n", addr);
+ pr_err("iounmap: bad address %p\n", addr);
dump_stack();
return;
}