summaryrefslogtreecommitdiff
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/barrier.h2
-rw-r--r--include/asm-alpha/param.h4
-rw-r--r--include/asm-alpha/pgtable.h21
3 files changed, 24 insertions, 3 deletions
diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h
index 384dc08d6f53..ac78eba909bc 100644
--- a/include/asm-alpha/barrier.h
+++ b/include/asm-alpha/barrier.h
@@ -24,7 +24,7 @@ __asm__ __volatile__("mb": : :"memory")
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define smp_read_barrier_depends() barrier()
+#define smp_read_barrier_depends() do { } while (0)
#endif
#define set_mb(var, value) \
diff --git a/include/asm-alpha/param.h b/include/asm-alpha/param.h
index 0982f1d39499..e691ecfedb2c 100644
--- a/include/asm-alpha/param.h
+++ b/include/asm-alpha/param.h
@@ -5,8 +5,12 @@
hardware ignores reprogramming. We also need userland buy-in to the
change in HZ, since this is visible in the wait4 resources etc. */
+#ifdef __KERNEL__
#define HZ CONFIG_HZ
#define USER_HZ HZ
+#else
+#define HZ 1024
+#endif
#define EXEC_PAGESIZE 8192
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 05ce5fba43e3..3f0c59f6d8aa 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -287,17 +287,34 @@ extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+/*
+ * The smp_read_barrier_depends() in the following functions are required to
+ * order the load of *dir (the pointer in the top level page table) with any
+ * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
+ *
+ * If this ordering is not enforced, the CPU might load an older value of
+ * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
+ * more details.
+ *
+ * Note that we never change the mm->pgd pointer after the task is running, so
+ * pgd_offset does not require such a barrier.
+ */
+
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
- return (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+ pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+ smp_read_barrier_depends(); /* see above */
+ return ret;
}
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
{
- return (pte_t *) pmd_page_vaddr(*dir)
+ pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
+ smp_read_barrier_depends(); /* see above */
+ return ret;
}
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))