summaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile8
-rw-r--r--arch/blackfin/kernel/asm-offsets.c3
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c13
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c169
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c5
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinfo.c8
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c4
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c128
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinfo.c15
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c31
-rw-r--r--arch/blackfin/kernel/dma-mapping.c4
-rw-r--r--arch/blackfin/kernel/gptimers.c24
-rw-r--r--arch/blackfin/kernel/process.c69
-rw-r--r--arch/blackfin/kernel/ptrace.c7
-rw-r--r--arch/blackfin/kernel/reboot.c69
-rw-r--r--arch/blackfin/kernel/setup.c131
-rw-r--r--arch/blackfin/kernel/signal.c26
-rw-r--r--arch/blackfin/kernel/sys_bfin.c2
-rw-r--r--arch/blackfin/kernel/time-ts.c219
-rw-r--r--arch/blackfin/kernel/time.c24
-rw-r--r--arch/blackfin/kernel/traps.c94
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S54
22 files changed, 735 insertions, 372 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 318b9b692a48..6140cd69c782 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -6,9 +6,15 @@ extra-y := init_task.o vmlinux.lds
obj-y := \
entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
- sys_bfin.o time.o traps.o irqchip.o dma-mapping.o flat.o \
+ sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \
fixed_code.o reboot.o bfin_gpio.o
+ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y)
+ obj-y += time-ts.o
+else
+ obj-y += time.o
+endif
+
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_BFIN_DMA_5XX) += bfin_dma_5xx.o
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index b56b2741cdea..721f15f3cebf 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -34,8 +34,7 @@
#include <linux/hardirq.h>
#include <linux/irq.h>
#include <linux/thread_info.h>
-
-#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+#include <linux/kbuild.h>
int main(void)
{
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 8fd5d22cec34..fd5448d6107c 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -67,7 +67,7 @@ static int __init blackfin_dma_init(void)
for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) {
dma_ch[i].chan_status = DMA_CHANNEL_FREE;
- dma_ch[i].regs = base_addr[i];
+ dma_ch[i].regs = dma_io_base_addr[i];
mutex_init(&(dma_ch[i].dmalock));
}
/* Mark MEMDMA Channel 0 as requested since we're using it internally */
@@ -106,12 +106,15 @@ int request_dma(unsigned int channel, char *device_id)
#ifdef CONFIG_BF54x
if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
- if (strncmp(device_id, "BFIN_UART", 9) == 0)
+ if (strncmp(device_id, "BFIN_UART", 9) == 0) {
+ dma_ch[channel].regs->peripheral_map &= 0x0FFF;
dma_ch[channel].regs->peripheral_map |=
- (channel - CH_UART2_RX + 0xC);
- else
+ ((channel - CH_UART2_RX + 0xC)<<12);
+ } else {
+ dma_ch[channel].regs->peripheral_map &= 0x0FFF;
dma_ch[channel].regs->peripheral_map |=
- (channel - CH_UART2_RX + 0x6);
+ ((channel - CH_UART2_RX + 0x6)<<12);
+ }
}
#endif
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 08788f7bbfba..7e8eaf4a31bb 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -95,14 +95,14 @@ enum {
AWA_data_clear = SYSCR,
AWA_data_set = SYSCR,
AWA_toggle = SYSCR,
- AWA_maska = UART_SCR,
- AWA_maska_clear = UART_SCR,
- AWA_maska_set = UART_SCR,
- AWA_maska_toggle = UART_SCR,
- AWA_maskb = UART_GCTL,
- AWA_maskb_clear = UART_GCTL,
- AWA_maskb_set = UART_GCTL,
- AWA_maskb_toggle = UART_GCTL,
+ AWA_maska = BFIN_UART_SCR,
+ AWA_maska_clear = BFIN_UART_SCR,
+ AWA_maska_set = BFIN_UART_SCR,
+ AWA_maska_toggle = BFIN_UART_SCR,
+ AWA_maskb = BFIN_UART_GCTL,
+ AWA_maskb_clear = BFIN_UART_GCTL,
+ AWA_maskb_set = BFIN_UART_GCTL,
+ AWA_maskb_toggle = BFIN_UART_GCTL,
AWA_dir = SPORT1_STAT,
AWA_polar = SPORT1_STAT,
AWA_edge = SPORT1_STAT,
@@ -348,11 +348,10 @@ static void portmux_setup(unsigned short per, unsigned short function)
offset = port_mux_lut[y].offset;
muxreg = bfin_read_PORT_MUX();
- if (offset != 1) {
+ if (offset != 1)
muxreg &= ~(1 << offset);
- } else {
+ else
muxreg &= ~(3 << 1);
- }
muxreg |= (function << offset);
bfin_write_PORT_MUX(muxreg);
@@ -396,39 +395,11 @@ inline void portmux_setup(unsigned short portno, unsigned short function)
# define portmux_setup(...) do { } while (0)
#endif
-#ifndef BF548_FAMILY
-static void default_gpio(unsigned gpio)
-{
- unsigned short bank, bitmask;
- unsigned long flags;
-
- bank = gpio_bank(gpio);
- bitmask = gpio_bit(gpio);
-
- local_irq_save(flags);
-
- gpio_bankb[bank]->maska_clear = bitmask;
- gpio_bankb[bank]->maskb_clear = bitmask;
- SSYNC();
- gpio_bankb[bank]->inen &= ~bitmask;
- gpio_bankb[bank]->dir &= ~bitmask;
- gpio_bankb[bank]->polar &= ~bitmask;
- gpio_bankb[bank]->both &= ~bitmask;
- gpio_bankb[bank]->edge &= ~bitmask;
- AWA_DUMMY_READ(edge);
- local_irq_restore(flags);
-}
-#else
-# define default_gpio(...) do { } while (0)
-#endif
-
static int __init bfin_gpio_init(void)
{
-
printk(KERN_INFO "Blackfin GPIO Controller\n");
return 0;
-
}
arch_initcall(bfin_gpio_init);
@@ -821,10 +792,10 @@ int peripheral_request(unsigned short per, const char *label)
local_irq_save(flags);
if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
+ dump_stack();
printk(KERN_ERR
"%s: Peripheral %d is already reserved as GPIO by %s !\n",
- __FUNCTION__, ident, get_label(ident));
- dump_stack();
+ __func__, ident, get_label(ident));
local_irq_restore(flags);
return -EBUSY;
}
@@ -833,31 +804,31 @@ int peripheral_request(unsigned short per, const char *label)
u16 funct = get_portmux(ident);
- /*
- * Pin functions like AMC address strobes my
- * be requested and used by several drivers
- */
+ /*
+ * Pin functions like AMC address strobes my
+ * be requested and used by several drivers
+ */
if (!((per & P_MAYSHARE) && (funct == P_FUNCT2MUX(per)))) {
- /*
- * Allow that the identical pin function can
- * be requested from the same driver twice
- */
+ /*
+ * Allow that the identical pin function can
+ * be requested from the same driver twice
+ */
- if (cmp_label(ident, label) == 0)
- goto anyway;
+ if (cmp_label(ident, label) == 0)
+ goto anyway;
+ dump_stack();
printk(KERN_ERR
"%s: Peripheral %d function %d is already reserved by %s !\n",
- __FUNCTION__, ident, P_FUNCT2MUX(per), get_label(ident));
- dump_stack();
+ __func__, ident, P_FUNCT2MUX(per), get_label(ident));
local_irq_restore(flags);
return -EBUSY;
}
}
-anyway:
+ anyway:
reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident);
portmux_setup(ident, P_FUNCT2MUX(per));
@@ -890,47 +861,47 @@ int peripheral_request(unsigned short per, const char *label)
if (!check_gpio(ident)) {
- if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
- printk(KERN_ERR
- "%s: Peripheral %d is already reserved as GPIO by %s !\n",
- __FUNCTION__, ident, get_label(ident));
- dump_stack();
- local_irq_restore(flags);
- return -EBUSY;
- }
+ if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
+ dump_stack();
+ printk(KERN_ERR
+ "%s: Peripheral %d is already reserved as GPIO by %s !\n",
+ __func__, ident, get_label(ident));
+ local_irq_restore(flags);
+ return -EBUSY;
+ }
}
if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) {
- /*
- * Pin functions like AMC address strobes my
- * be requested and used by several drivers
- */
+ /*
+ * Pin functions like AMC address strobes my
+ * be requested and used by several drivers
+ */
- if (!(per & P_MAYSHARE)) {
+ if (!(per & P_MAYSHARE)) {
- /*
- * Allow that the identical pin function can
- * be requested from the same driver twice
- */
+ /*
+ * Allow that the identical pin function can
+ * be requested from the same driver twice
+ */
- if (cmp_label(ident, label) == 0)
- goto anyway;
+ if (cmp_label(ident, label) == 0)
+ goto anyway;
+ dump_stack();
printk(KERN_ERR
"%s: Peripheral %d function %d is already"
" reserved by %s !\n",
- __FUNCTION__, ident, P_FUNCT2MUX(per),
+ __func__, ident, P_FUNCT2MUX(per),
get_label(ident));
- dump_stack();
local_irq_restore(flags);
return -EBUSY;
}
}
-anyway:
+ anyway:
portmux_setup(per, P_FUNCT2MUX(per));
port_setup(ident, PERIPHERAL_USAGE);
@@ -944,7 +915,7 @@ anyway:
EXPORT_SYMBOL(peripheral_request);
#endif
-int peripheral_request_list(unsigned short per[], const char *label)
+int peripheral_request_list(const unsigned short per[], const char *label)
{
u16 cnt;
int ret;
@@ -954,10 +925,10 @@ int peripheral_request_list(unsigned short per[], const char *label)
ret = peripheral_request(per[cnt], label);
if (ret < 0) {
- for ( ; cnt > 0; cnt--) {
+ for ( ; cnt > 0; cnt--)
peripheral_free(per[cnt - 1]);
- }
- return ret;
+
+ return ret;
}
}
@@ -981,15 +952,13 @@ void peripheral_free(unsigned short per)
local_irq_save(flags);
- if (unlikely(!(reserved_peri_map[gpio_bank(ident)]
- & gpio_bit(ident)))) {
+ if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) {
local_irq_restore(flags);
return;
}
- if (!(per & P_MAYSHARE)) {
+ if (!(per & P_MAYSHARE))
port_setup(ident, GPIO_USAGE);
- }
reserved_peri_map[gpio_bank(ident)] &= ~gpio_bit(ident);
@@ -999,14 +968,11 @@ void peripheral_free(unsigned short per)
}
EXPORT_SYMBOL(peripheral_free);
-void peripheral_free_list(unsigned short per[])
+void peripheral_free_list(const unsigned short per[])
{
u16 cnt;
-
- for (cnt = 0; per[cnt] != 0; cnt++) {
+ for (cnt = 0; per[cnt] != 0; cnt++)
peripheral_free(per[cnt]);
- }
-
}
EXPORT_SYMBOL(peripheral_free_list);
@@ -1046,17 +1012,17 @@ int gpio_request(unsigned gpio, const char *label)
}
if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
+ dump_stack();
printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
gpio, get_label(gpio));
- dump_stack();
local_irq_restore(flags);
return -EBUSY;
}
if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
+ dump_stack();
printk(KERN_ERR
"bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
gpio, get_label(gpio));
- dump_stack();
local_irq_restore(flags);
return -EBUSY;
}
@@ -1082,14 +1048,12 @@ void gpio_free(unsigned gpio)
local_irq_save(flags);
if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
- gpio_error(gpio);
dump_stack();
+ gpio_error(gpio);
local_irq_restore(flags);
return;
}
- default_gpio(gpio);
-
reserved_gpio_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
set_label(gpio, "free");
@@ -1152,6 +1116,18 @@ int gpio_get_value(unsigned gpio)
}
EXPORT_SYMBOL(gpio_get_value);
+void bfin_gpio_irq_prepare(unsigned gpio)
+{
+ unsigned long flags;
+
+ port_setup(gpio, GPIO_USAGE);
+
+ local_irq_save(flags);
+ gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio);
+ gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio);
+ local_irq_restore(flags);
+}
+
#else
int gpio_direction_input(unsigned gpio)
@@ -1218,6 +1194,11 @@ void bfin_gpio_reset_spi0_ssel1(void)
udelay(1);
}
+void bfin_gpio_irq_prepare(unsigned gpio)
+{
+ port_setup(gpio, GPIO_USAGE);
+}
+
#endif /*BF548_FAMILY */
#if defined(CONFIG_PROC_FS)
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 0bfbb269e350..053edff6c0d8 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__down_interruptible);
-
EXPORT_SYMBOL(is_in_rom);
EXPORT_SYMBOL(bfin_return_from_exception);
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c b/arch/blackfin/kernel/cplb-mpu/cplbinfo.c
index bd072299f7f2..822beefa3a4b 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinfo.c
@@ -39,14 +39,6 @@
#include <asm/cplbinit.h>
#include <asm/blackfin.h>
-#define CPLB_I 1
-#define CPLB_D 2
-
-#define SYNC_SYS SSYNC()
-#define SYNC_CORE CSYNC()
-
-#define CPLB_BIT_PAGESIZE 0x30000
-
static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" };
static char *cplb_print_entry(char *buf, struct cplb_entry *tbl, int switched)
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index dc6e8a7a8bda..48060105346a 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -43,13 +43,15 @@ void __init generate_cpl_tables(void)
unsigned long d_data, i_data;
unsigned long d_cache = 0, i_cache = 0;
+ printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
+
#ifdef CONFIG_BFIN_ICACHE
i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
#ifdef CONFIG_BFIN_DCACHE
d_cache = CPLB_L1_CHBL;
-#ifdef CONFIG_BLKFIN_WT
+#ifdef CONFIG_BFIN_WT
d_cache |= CPLB_L1_AOW | CPLB_WT;
#endif
#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index c426a22f9907..99f2831e2964 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -24,8 +24,6 @@
#include <asm/cplbinit.h>
#include <asm/mmu_context.h>
-#ifdef CONFIG_BFIN_ICACHE
-
#define FAULT_RW (1 << 16)
#define FAULT_USERSUPV (1 << 17)
@@ -143,30 +141,48 @@ static noinline int dcplb_miss(void)
unsigned long d_data;
nr_dcplb_miss++;
- if (addr >= _ramend)
- return CPLB_PROT_VIOL;
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_DCACHE
- d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
-#ifdef CONFIG_BLKFIN_WT
- d_data |= CPLB_L1_AOW | CPLB_WT;
-#endif
+ if (addr < _ramend - DMA_UNCACHED_REGION ||
+ (reserved_mem_dcache_on && addr >= _ramend &&
+ addr < physical_mem_end)) {
+ d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+#ifdef CONFIG_BFIN_WT
+ d_data |= CPLB_L1_AOW | CPLB_WT;
#endif
- mask = current_rwx_mask;
- if (mask) {
- int page = addr >> PAGE_SHIFT;
- int offs = page >> 5;
- int bit = 1 << (page & 31);
-
- if (mask[offs] & bit)
- d_data |= CPLB_USER_RD;
-
- mask += page_mask_nelts;
- if (mask[offs] & bit)
- d_data |= CPLB_USER_WR;
}
+#endif
+ if (addr >= physical_mem_end) {
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
+ && (status & FAULT_USERSUPV)) {
+ addr &= ~0x3fffff;
+ d_data &= ~PAGE_SIZE_4KB;
+ d_data |= PAGE_SIZE_4MB;
+ } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
+ && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
+ addr &= ~(1 * 1024 * 1024 - 1);
+ d_data &= ~PAGE_SIZE_4KB;
+ d_data |= PAGE_SIZE_1MB;
+ } else
+ return CPLB_PROT_VIOL;
+ } else if (addr >= _ramend) {
+ d_data |= CPLB_USER_RD | CPLB_USER_WR;
+ } else {
+ mask = current_rwx_mask;
+ if (mask) {
+ int page = addr >> PAGE_SHIFT;
+ int offs = page >> 5;
+ int bit = 1 << (page & 31);
+
+ if (mask[offs] & bit)
+ d_data |= CPLB_USER_RD;
+ mask += page_mask_nelts;
+ if (mask[offs] & bit)
+ d_data |= CPLB_USER_WR;
+ }
+ }
idx = evict_one_dcplb();
addr &= PAGE_MASK;
@@ -189,12 +205,14 @@ static noinline int icplb_miss(void)
unsigned long i_data;
nr_icplb_miss++;
- if (status & FAULT_USERSUPV)
- nr_icplb_supv_miss++;
- if (addr >= _ramend)
+ /* If inside the uncached DMA region, fault. */
+ if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
return CPLB_PROT_VIOL;
+ if (status & FAULT_USERSUPV)
+ nr_icplb_supv_miss++;
+
/*
* First, try to find a CPLB that matches this address. If we
* find one, then the fact that we're in the miss handler means
@@ -211,30 +229,48 @@ static noinline int icplb_miss(void)
}
i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_ICACHE
- i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
-#endif
+#ifdef CONFIG_BFIN_ICACHE
/*
- * Two cases to distinguish - a supervisor access must necessarily
- * be for a module page; we grant it unconditionally (could do better
- * here in the future). Otherwise, check the x bitmap of the current
- * process.
+ * Normal RAM, and possibly the reserved memory area, are
+ * cacheable.
*/
- if (!(status & FAULT_USERSUPV)) {
- unsigned long *mask = current_rwx_mask;
-
- if (mask) {
- int page = addr >> PAGE_SHIFT;
- int offs = page >> 5;
- int bit = 1 << (page & 31);
+ if (addr < _ramend ||
+ (addr < physical_mem_end && reserved_mem_icache_on))
+ i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+#endif
- mask += 2 * page_mask_nelts;
- if (mask[offs] & bit)
- i_data |= CPLB_USER_RD;
+ if (addr >= physical_mem_end) {
+ if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
+ && (status & FAULT_USERSUPV)) {
+ addr &= ~(1 * 1024 * 1024 - 1);
+ i_data &= ~PAGE_SIZE_4KB;
+ i_data |= PAGE_SIZE_1MB;
+ } else
+ return CPLB_PROT_VIOL;
+ } else if (addr >= _ramend) {
+ i_data |= CPLB_USER_RD;
+ } else {
+ /*
+ * Two cases to distinguish - a supervisor access must
+ * necessarily be for a module page; we grant it
+ * unconditionally (could do better here in the future).
+ * Otherwise, check the x bitmap of the current process.
+ */
+ if (!(status & FAULT_USERSUPV)) {
+ unsigned long *mask = current_rwx_mask;
+
+ if (mask) {
+ int page = addr >> PAGE_SHIFT;
+ int offs = page >> 5;
+ int bit = 1 << (page & 31);
+
+ mask += 2 * page_mask_nelts;
+ if (mask[offs] & bit)
+ i_data |= CPLB_USER_RD;
+ }
}
}
-
idx = evict_one_icplb();
addr &= PAGE_MASK;
icplb_tbl[idx].addr = addr;
@@ -250,7 +286,6 @@ static noinline int icplb_miss(void)
static noinline int dcplb_protection_fault(void)
{
- unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
int status = bfin_read_DCPLB_STATUS();
nr_dcplb_prot++;
@@ -280,8 +315,7 @@ int cplb_hdr(int seqstat, struct pt_regs *regs)
case 0x26:
return dcplb_miss();
default:
- return 1;
- panic_cplb_error(seqstat, regs);
+ return 1;
}
}
@@ -299,7 +333,7 @@ void flush_switched_cplbs(void)
enable_icplb();
disable_dcplb();
- for (i = first_mask_dcplb; i < MAX_CPLBS; i++) {
+ for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
dcplb_tbl[i].data = 0;
bfin_write32(DCPLB_DATA0 + i * 4, 0);
}
@@ -319,7 +353,7 @@ void set_mask_dcplbs(unsigned long *masks)
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_DCACHE
d_data |= CPLB_L1_CHBL;
-#ifdef CONFIG_BLKFIN_WT
+#ifdef CONFIG_BFIN_WT
d_data |= CPLB_L1_AOW | CPLB_WT;
#endif
#endif
@@ -334,5 +368,3 @@ void set_mask_dcplbs(unsigned long *masks)
}
enable_dcplb();
}
-
-#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c
index a4f0b428a34d..1e74f0b97996 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c
@@ -33,9 +33,7 @@
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
-#include <asm/current.h>
-#include <asm/system.h>
-#include <asm/cplb.h>
+#include <asm/cplbinit.h>
#include <asm/blackfin.h>
#define CPLB_I 1
@@ -174,16 +172,6 @@ static int cplbinfo_read_proc(char *page, char **start, off_t off,
return len;
}
-static int cplbinfo_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- printk(KERN_INFO "Reset the CPLB swap in/out counts.\n");
- memset(ipdt_swapcount_table, 0, MAX_SWITCH_I_CPLBS * sizeof(unsigned long));
- memset(dpdt_swapcount_table, 0, MAX_SWITCH_D_CPLBS * sizeof(unsigned long));
-
- return count;
-}
-
static int __init cplbinfo_init(void)
{
struct proc_dir_entry *entry;
@@ -193,7 +181,6 @@ static int __init cplbinfo_init(void)
return -ENOMEM;
entry->read_proc = cplbinfo_read_proc;
- entry->write_proc = cplbinfo_write_proc;
entry->data = NULL;
return 0;
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 6320bc45fbba..917325bfbd84 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -26,6 +26,35 @@
#include <asm/cplb.h>
#include <asm/cplbinit.h>
+#ifdef CONFIG_MAX_MEM_SIZE
+# define CPLB_MEM CONFIG_MAX_MEM_SIZE
+#else
+# define CPLB_MEM CONFIG_MEM_SIZE
+#endif
+
+/*
+* Number of required data CPLB switchtable entries
+* MEMSIZE / 4 (we mostly install 4M page size CPLBs
+* approx 16 for smaller 1MB page size CPLBs for allignment purposes
+* 1 for L1 Data Memory
+* possibly 1 for L2 Data Memory
+* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
+* 1 for ASYNC Memory
+*/
+#define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \
+ + ASYNC_MEMORY_CPLB_COVERAGE) * 2)
+
+/*
+* Number of required instruction CPLB switchtable entries
+* MEMSIZE / 4 (we mostly install 4M page size CPLBs
+* approx 12 for smaller 1MB page size CPLBs for allignment purposes
+* 1 for L1 Instruction Memory
+* possibly 1 for L2 Instruction Memory
+* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
+*/
+#define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2)
+
+
u_long icplb_table[MAX_CPLBS + 1];
u_long dcplb_table[MAX_CPLBS + 1];
@@ -295,6 +324,8 @@ void __init generate_cpl_tables(void)
struct cplb_tab *t_d = NULL;
struct s_cplb cplb;
+ printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n");
+
cplb.init_i.size = MAX_CPLBS;
cplb.init_d.size = MAX_CPLBS;
cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index d6b61d56b656..2f62a9f4058a 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -59,7 +59,7 @@ void dma_alloc_init(unsigned long start, unsigned long end)
memset((void *)dma_base, 0, DMA_UNCACHED_REGION);
dma_initialized = 1;
- printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __FUNCTION__,
+ printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__,
dma_page, dma_pages, dma_base);
}
@@ -100,7 +100,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
int i;
if ((page + pages) > dma_pages) {
- printk(KERN_ERR "%s: freeing outside range.\n", __FUNCTION__);
+ printk(KERN_ERR "%s: freeing outside range.\n", __func__);
BUG();
}
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 1904d8b53328..e698554895a7 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -52,12 +52,14 @@ static volatile GPTIMER_timer_regs *const timer_regs[MAX_BLACKFIN_GPTIMERS] =
(GPTIMER_timer_regs *)TIMER5_CONFIG,
(GPTIMER_timer_regs *)TIMER6_CONFIG,
(GPTIMER_timer_regs *)TIMER7_CONFIG,
-#endif
-#if (MAX_BLACKFIN_GPTIMERS > 8)
+# if (MAX_BLACKFIN_GPTIMERS > 8)
(GPTIMER_timer_regs *)TIMER8_CONFIG,
(GPTIMER_timer_regs *)TIMER9_CONFIG,
(GPTIMER_timer_regs *)TIMER10_CONFIG,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
(GPTIMER_timer_regs *)TIMER11_CONFIG,
+# endif
+# endif
#endif
};
@@ -80,12 +82,14 @@ static uint32_t const trun_mask[MAX_BLACKFIN_GPTIMERS] =
TIMER_STATUS_TRUN5,
TIMER_STATUS_TRUN6,
TIMER_STATUS_TRUN7,
-#endif
-#if (MAX_BLACKFIN_GPTIMERS > 8)
+# if (MAX_BLACKFIN_GPTIMERS > 8)
TIMER_STATUS_TRUN8,
TIMER_STATUS_TRUN9,
TIMER_STATUS_TRUN10,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
TIMER_STATUS_TRUN11,
+# endif
+# endif
#endif
};
@@ -100,12 +104,14 @@ static uint32_t const tovf_mask[MAX_BLACKFIN_GPTIMERS] =
TIMER_STATUS_TOVF5,
TIMER_STATUS_TOVF6,
TIMER_STATUS_TOVF7,
-#endif
-#if (MAX_BLACKFIN_GPTIMERS > 8)
+# if (MAX_BLACKFIN_GPTIMERS > 8)
TIMER_STATUS_TOVF8,
TIMER_STATUS_TOVF9,
TIMER_STATUS_TOVF10,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
TIMER_STATUS_TOVF11,
+# endif
+# endif
#endif
};
@@ -120,12 +126,14 @@ static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] =
TIMER_STATUS_TIMIL5,
TIMER_STATUS_TIMIL6,
TIMER_STATUS_TIMIL7,
-#endif
-#if (MAX_BLACKFIN_GPTIMERS > 8)
+# if (MAX_BLACKFIN_GPTIMERS > 8)
TIMER_STATUS_TIMIL8,
TIMER_STATUS_TIMIL9,
TIMER_STATUS_TIMIL10,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
TIMER_STATUS_TIMIL11,
+# endif
+# endif
#endif
};
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6b8459c66163..be9fdd00d7cb 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -32,6 +32,8 @@
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
#include <linux/fs.h>
#include <linux/err.h>
@@ -69,33 +71,44 @@ EXPORT_SYMBOL(pm_power_off);
* The idle loop on BFIN
*/
#ifdef CONFIG_IDLE_L1
-void default_idle(void)__attribute__((l1_text));
+static void default_idle(void)__attribute__((l1_text));
void cpu_idle(void)__attribute__((l1_text));
#endif
-void default_idle(void)
+/*
+ * This is our default idle handler. We need to disable
+ * interrupts here to ensure we don't miss a wakeup call.
+ */
+static void default_idle(void)
{
- while (!need_resched()) {
- local_irq_disable();
- if (likely(!need_resched()))
- idle_with_irq_disabled();
- local_irq_enable();
- }
-}
+ local_irq_disable();
+ if (!need_resched())
+ idle_with_irq_disabled();
-void (*idle)(void) = default_idle;
+ local_irq_enable();
+}
/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
+ * The idle thread. We try to conserve power, while trying to keep
+ * overall latency low. The architecture specific idle is passed
+ * a value to indicate the level of "idleness" of the system.
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- idle();
+ void (*idle)(void) = pm_idle;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_is_offline(smp_processor_id()))
+ cpu_die();
+#endif
+ if (!idle)
+ idle = default_idle;
+ tick_nohz_stop_sched_tick();
+ while (!need_resched())
+ idle();
+ tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -189,7 +202,7 @@ copy_thread(int nr, unsigned long clone_flags,
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(char *name, char **argv, char **envp)
+asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
{
int error;
char *filename;
@@ -232,23 +245,25 @@ unsigned long get_wchan(struct task_struct *p)
void finish_atomic_sections (struct pt_regs *regs)
{
+ int __user *up0 = (int __user *)&regs->p0;
+
if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
return;
switch (regs->pc) {
case ATOMIC_XCHG32 + 2:
- put_user(regs->r1, (int *)regs->p0);
+ put_user(regs->r1, up0);
regs->pc += 2;
break;
case ATOMIC_CAS32 + 2:
case ATOMIC_CAS32 + 4:
if (regs->r0 == regs->r1)
- put_user(regs->r2, (int *)regs->p0);
+ put_user(regs->r2, up0);
regs->pc = ATOMIC_CAS32 + 8;
break;
case ATOMIC_CAS32 + 6:
- put_user(regs->r2, (int *)regs->p0);
+ put_user(regs->r2, up0);
regs->pc += 2;
break;
@@ -256,7 +271,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 + regs->r0;
/* fall through */
case ATOMIC_ADD32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_ADD32 + 6;
break;
@@ -264,7 +279,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 - regs->r0;
/* fall through */
case ATOMIC_SUB32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_SUB32 + 6;
break;
@@ -272,7 +287,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 | regs->r0;
/* fall through */
case ATOMIC_IOR32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_IOR32 + 6;
break;
@@ -280,7 +295,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 & regs->r0;
/* fall through */
case ATOMIC_AND32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_AND32 + 6;
break;
@@ -288,7 +303,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 ^ regs->r0;
/* fall through */
case ATOMIC_XOR32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_XOR32 + 6;
break;
}
@@ -309,6 +324,12 @@ int _access_ok(unsigned long addr, unsigned long size)
return 1;
if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
return 1;
+
+#ifdef CONFIG_ROMFS_MTD_FS
+ /* For XIP, allow user space to use pointers within the ROMFS. */
+ if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
+ return 1;
+#endif
#else
if (addr >= memory_start && (addr + size) <= physical_mem_end)
return 1;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 85caf9b711a1..b4f062c172c6 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -193,6 +193,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
int add = 0;
+ unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -229,7 +230,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp);
if (copied != sizeof(tmp))
break;
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
}
@@ -263,7 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
} else {
tmp = get_reg(child, addr);
}
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
}
@@ -389,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
/* Get all gp regs from the child. */
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
}
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index 483f93dfc1b5..367e2dc09881 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -11,45 +11,56 @@
#include <asm/reboot.h>
#include <asm/system.h>
-#if defined(BF537_FAMILY) || defined(BF533_FAMILY) || defined(BF527_FAMILY)
-#define SYSCR_VAL 0x0
-#elif defined(BF561_FAMILY)
-#define SYSCR_VAL 0x20
-#elif defined(BF548_FAMILY)
-#define SYSCR_VAL 0x10
-#endif
-
-/*
- * Delay min 5 SCLK cycles using worst case CCLK/SCLK ratio (15)
- */
-#define SWRST_DELAY (5 * 15)
-
-/* A system soft reset makes external memory unusable
- * so force this function into L1.
+/* A system soft reset makes external memory unusable so force
+ * this function into L1. We use the compiler ssync here rather
+ * than SSYNC() because it's safe (no interrupts and such) and
+ * we save some L1. We do not need to force sanity in the SYSCR
+ * register as the BMODE selection bit is cleared by the soft
+ * reset while the Core B bit (on dual core parts) is cleared by
+ * the core reset.
*/
__attribute__((l1_text))
void bfin_reset(void)
{
- /* force BMODE and disable Core B (as needed) */
- bfin_write_SYSCR(SYSCR_VAL);
-
- /* we use asm ssync here because it's save and we save some L1 */
- asm("ssync;");
+ /* Wait for completion of "system" events such as cache line
+ * line fills so that we avoid infinite stalls later on as
+ * much as possible. This code is in L1, so it won't trigger
+ * any such event after this point in time.
+ */
+ __builtin_bfin_ssync();
while (1) {
- /* initiate system soft reset with magic 0x7 */
+ /* Initiate System software reset. */
bfin_write_SWRST(0x7);
- /* Wait for System reset to actually reset, needs to be 5 SCLKs, */
- /* Assume CCLK / SCLK ratio is worst case (15), and use 5*15 */
-
- asm("LSETUP(.Lfoo,.Lfoo) LC0 = %0\n .Lfoo: NOP;\n"
- : : "a" (SWRST_DELAY) : "LC0", "LT0", "LB0");
+ /* Due to the way reset is handled in the hardware, we need
+ * to delay for 7 SCLKS. The only reliable way to do this is
+ * to calculate the CCLK/SCLK ratio and multiply 7. For now,
+ * we'll assume worse case which is a 1:15 ratio.
+ */
+ asm(
+ "LSETUP (1f, 1f) LC0 = %0\n"
+ "1: nop;"
+ :
+ : "a" (15 * 7)
+ : "LC0", "LB0", "LT0"
+ );
- /* clear system soft reset */
+ /* Clear System software reset */
bfin_write_SWRST(0);
- asm("ssync;");
- /* issue core reset */
+
+ /* Wait for the SWRST write to complete. Cannot rely on SSYNC
+ * though as the System state is all reset now.
+ */
+ asm(
+ "LSETUP (1f, 1f) LC1 = %0\n"
+ "1: nop;"
+ :
+ : "a" (15 * 1)
+ : "LC1", "LB1", "LT1"
+ );
+
+ /* Issue core reset */
asm("raise 1");
}
}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 2255c289a714..8efea004aecb 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -35,6 +35,7 @@ u16 _bfin_swrst;
EXPORT_SYMBOL(_bfin_swrst);
unsigned long memory_start, memory_end, physical_mem_end;
+unsigned long _rambase, _ramstart, _ramend;
unsigned long reserved_mem_dcache_on;
unsigned long reserved_mem_icache_on;
EXPORT_SYMBOL(memory_start);
@@ -106,7 +107,7 @@ void __init bf53x_relocate_l1_mem(void)
l1_code_length = _etext_l1 - _stext_l1;
if (l1_code_length > L1_CODE_LENGTH)
- l1_code_length = L1_CODE_LENGTH;
+ panic("L1 Instruction SRAM Overflow\n");
/* cannot complain as printk is not available as yet.
* But we can continue booting and complain later!
*/
@@ -116,19 +117,18 @@ void __init bf53x_relocate_l1_mem(void)
l1_data_a_length = _ebss_l1 - _sdata_l1;
if (l1_data_a_length > L1_DATA_A_LENGTH)
- l1_data_a_length = L1_DATA_A_LENGTH;
+ panic("L1 Data SRAM Bank A Overflow\n");
/* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */
dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
l1_data_b_length = _ebss_b_l1 - _sdata_b_l1;
if (l1_data_b_length > L1_DATA_B_LENGTH)
- l1_data_b_length = L1_DATA_B_LENGTH;
+ panic("L1 Data SRAM Bank B Overflow\n");
/* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */
dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
l1_data_a_length, l1_data_b_length);
-
}
/* add_memory_region to memmap */
@@ -547,11 +547,38 @@ static __init void memory_setup(void)
);
}
+/*
+ * Find the lowest, highest page frame number we have available
+ */
+void __init find_min_max_pfn(void)
+{
+ int i;
+
+ max_pfn = 0;
+ min_low_pfn = memory_end;
+
+ for (i = 0; i < bfin_memmap.nr_map; i++) {
+ unsigned long start, end;
+ /* RAM? */
+ if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
+ continue;
+ start = PFN_UP(bfin_memmap.map[i].addr);
+ end = PFN_DOWN(bfin_memmap.map[i].addr +
+ bfin_memmap.map[i].size);
+ if (start >= end)
+ continue;
+ if (end > max_pfn)
+ max_pfn = end;
+ if (start < min_low_pfn)
+ min_low_pfn = start;
+ }
+}
+
static __init void setup_bootmem_allocator(void)
{
int bootmap_size;
int i;
- unsigned long min_pfn, max_pfn;
+ unsigned long start_pfn, end_pfn;
unsigned long curr_pfn, last_pfn, size;
/* mark memory between memory_start and memory_end usable */
@@ -561,8 +588,19 @@ static __init void setup_bootmem_allocator(void)
sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
print_memory_map("boot memmap");
- min_pfn = PAGE_OFFSET >> PAGE_SHIFT;
- max_pfn = memory_end >> PAGE_SHIFT;
+ /* intialize globals in linux/bootmem.h */
+ find_min_max_pfn();
+ /* pfn of the last usable page frame */
+ if (max_pfn > memory_end >> PAGE_SHIFT)
+ max_pfn = memory_end >> PAGE_SHIFT;
+ /* pfn of last page frame directly mapped by kernel */
+ max_low_pfn = max_pfn;
+ /* pfn of the first usable page frame after kernel image*/
+ if (min_low_pfn < memory_start >> PAGE_SHIFT)
+ min_low_pfn = memory_start >> PAGE_SHIFT;
+
+ start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
+ end_pfn = memory_end >> PAGE_SHIFT;
/*
* give all the memory to the bootmap allocator, tell it to put the
@@ -570,7 +608,7 @@ static __init void setup_bootmem_allocator(void)
*/
bootmap_size = init_bootmem_node(NODE_DATA(0),
memory_start >> PAGE_SHIFT, /* map goes here */
- min_pfn, max_pfn);
+ start_pfn, end_pfn);
/* register the memmap regions with the bootmem allocator */
for (i = 0; i < bfin_memmap.nr_map; i++) {
@@ -583,7 +621,7 @@ static __init void setup_bootmem_allocator(void)
* We are rounding up the start address of usable memory:
*/
curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
- if (curr_pfn >= max_pfn)
+ if (curr_pfn >= end_pfn)
continue;
/*
* ... and at the end of the usable range downwards:
@@ -591,8 +629,8 @@ static __init void setup_bootmem_allocator(void)
last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
bfin_memmap.map[i].size);
- if (last_pfn > max_pfn)
- last_pfn = max_pfn;
+ if (last_pfn > end_pfn)
+ last_pfn = end_pfn;
/*
* .. finally, did all the rounding and playing
@@ -611,9 +649,59 @@ static __init void setup_bootmem_allocator(void)
BOOTMEM_DEFAULT);
}
+#define EBSZ_TO_MEG(ebsz) \
+({ \
+ int meg = 0; \
+ switch (ebsz & 0xf) { \
+ case 0x1: meg = 16; break; \
+ case 0x3: meg = 32; break; \
+ case 0x5: meg = 64; break; \
+ case 0x7: meg = 128; break; \
+ case 0x9: meg = 256; break; \
+ case 0xb: meg = 512; break; \
+ } \
+ meg; \
+})
+static inline int __init get_mem_size(void)
+{
+#ifdef CONFIG_MEM_SIZE
+ return CONFIG_MEM_SIZE;
+#else
+# if defined(EBIU_SDBCTL)
+# if defined(BF561_FAMILY)
+ int ret = 0;
+ u32 sdbctl = bfin_read_EBIU_SDBCTL();
+ ret += EBSZ_TO_MEG(sdbctl >> 0);
+ ret += EBSZ_TO_MEG(sdbctl >> 8);
+ ret += EBSZ_TO_MEG(sdbctl >> 16);
+ ret += EBSZ_TO_MEG(sdbctl >> 24);
+ return ret;
+# else
+ return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
+# endif
+# elif defined(EBIU_DDRCTL1)
+ u32 ddrctl = bfin_read_EBIU_DDRCTL1();
+ int ret = 0;
+ switch (ddrctl & 0xc0000) {
+ case DEVSZ_64: ret = 64 / 8;
+ case DEVSZ_128: ret = 128 / 8;
+ case DEVSZ_256: ret = 256 / 8;
+ case DEVSZ_512: ret = 512 / 8;
+ }
+ switch (ddrctl & 0x30000) {
+ case DEVWD_4: ret *= 2;
+ case DEVWD_8: ret *= 2;
+ case DEVWD_16: break;
+ }
+ return ret;
+# endif
+#endif
+ BUG();
+}
+
void __init setup_arch(char **cmdline_p)
{
- unsigned long l1_length, sclk, cclk;
+ unsigned long sclk, cclk;
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
@@ -631,7 +719,7 @@ void __init setup_arch(char **cmdline_p)
/* setup memory defaults from the user config */
physical_mem_end = 0;
- _ramend = CONFIG_MEM_SIZE * 1024 * 1024;
+ _ramend = get_mem_size() * 1024 * 1024;
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
@@ -712,15 +800,6 @@ void __init setup_arch(char **cmdline_p)
paging_init();
- /* check the size of the l1 area */
- l1_length = _etext_l1 - _stext_l1;
- if (l1_length > L1_CODE_LENGTH)
- panic("L1 code memory overflow\n");
-
- l1_length = _ebss_l1 - _sdata_l1;
- if (l1_length > L1_DATA_A_LENGTH)
- panic("L1 data memory overflow\n");
-
/* Copy atomic sequences to their fixed location, and sanity check that
these locations are the ones that we advertise to userspace. */
memcpy((void *)FIXED_CODE_START, &fixed_code_start,
@@ -859,12 +938,17 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %d\n"
"vendor_id\t: %s\n"
"cpu family\t: 0x%x\n"
- "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK)\n"
+ "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
"stepping\t: %d\n",
0,
vendor,
(bfin_read_CHIPID() & CHIPID_FAMILY),
cpu, cclk/1000000, sclk/1000000,
+#ifdef CONFIG_MPU
+ "mpu on",
+#else
+ "mpu off",
+#endif
revid);
seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
@@ -973,7 +1057,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "No Ways are locked\n");
}
#endif
-
seq_printf(m, "board name\t: %s\n", bfin_board_name);
seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index 5564c9588aa8..cb9d883d493c 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -38,6 +38,7 @@
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
+#include <asm/fixed_code.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -50,18 +51,20 @@ struct rt_sigframe {
int sig;
struct siginfo *pinfo;
void *puc;
+ /* This is no longer needed by the kernel, but unfortunately userspace
+ * code expects it to be there. */
char retcode[8];
struct siginfo info;
struct ucontext uc;
};
-asmlinkage int sys_sigaltstack(const stack_t * uss, stack_t * uoss)
+asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
return do_sigaltstack(uss, uoss, rdusp());
}
static inline int
-rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *pr0)
+rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *pr0)
{
unsigned long usp = 0;
int err = 0;
@@ -159,11 +162,6 @@ static inline int rt_setup_sigcontext(struct sigcontext *sc, struct pt_regs *reg
return err;
}
-static inline void push_cache(unsigned long vaddr, unsigned int len)
-{
- flush_icache_range(vaddr, vaddr + len);
-}
-
static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
@@ -209,29 +207,19 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
err |= rt_setup_sigcontext(&frame->uc.uc_mcontext, regs);
err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- /* Set up to return from userspace. */
- err |= __put_user(0x28, &(frame->retcode[0]));
- err |= __put_user(0xe1, &(frame->retcode[1]));
- err |= __put_user(0xad, &(frame->retcode[2]));
- err |= __put_user(0x00, &(frame->retcode[3]));
- err |= __put_user(0xa0, &(frame->retcode[4]));
- err |= __put_user(0x00, &(frame->retcode[5]));
-
if (err)
goto give_sigsegv;
- push_cache((unsigned long)&frame->retcode, sizeof(frame->retcode));
-
/* Set up registers for signal handler */
wrusp((unsigned long)frame);
- if (get_personality & FDPIC_FUNCPTRS) {
+ if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor *) ka->sa.sa_handler;
__get_user(regs->pc, &funcptr->text);
__get_user(regs->p3, &funcptr->GOT);
} else
regs->pc = (unsigned long)ka->sa.sa_handler;
- regs->rets = (unsigned long)(frame->retcode);
+ regs->rets = SIGRETURN_STUB;
regs->r0 = frame->sig;
regs->r1 = (unsigned long)(&frame->info);
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index abcd14817d0e..efb7b25a2633 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -49,7 +49,7 @@
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
-asmlinkage int sys_pipe(unsigned long *fildes)
+asmlinkage int sys_pipe(unsigned long __user *fildes)
{
int fd[2];
int error;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
new file mode 100644
index 000000000000..4482c47c09e5
--- /dev/null
+++ b/arch/blackfin/kernel/time-ts.c
@@ -0,0 +1,219 @@
+/*
+ * linux/arch/kernel/time-ts.c
+ *
+ * Based on arm clockevents implementation and old bfin time tick.
+ *
+ * Copyright(C) 2008, GeoTechnologies, Vitja Makarov
+ *
+ * This code is licenced under the GPL version 2. For details see
+ * kernel-base/COPYING.
+ */
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpufreq.h>
+
+#include <asm/blackfin.h>
+#include <asm/time.h>
+
+#ifdef CONFIG_CYCLES_CLOCKSOURCE
+
+/* Accelerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
+ * ns = cycles * (10^6 / cpu_khz)
+ *
+ * Then we use scaling math (suggested by george@mvista.com) to get:
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ *
+ * We can use khz divisor instead of mhz to keep a better precision, since
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+
+static unsigned long cyc2ns_scale;
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline void set_cyc2ns_scale(unsigned long cpu_khz)
+{
+ cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR) / cpu_khz;
+}
+
+static inline unsigned long long cycles_2_ns(cycle_t cyc)
+{
+ return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+static cycle_t read_cycles(void)
+{
+ return get_cycles();
+}
+
+unsigned long long sched_clock(void)
+{
+ return cycles_2_ns(read_cycles());
+}
+
+static struct clocksource clocksource_bfin = {
+ .name = "bfin_cycles",
+ .rating = 350,
+ .read = read_cycles,
+ .mask = CLOCKSOURCE_MASK(64),
+ .shift = 22,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init bfin_clocksource_init(void)
+{
+ set_cyc2ns_scale(get_cclk() / 1000);
+
+ clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift);
+
+ if (clocksource_register(&clocksource_bfin))
+ panic("failed to register clocksource");
+
+ return 0;
+}
+
+#else
+# define bfin_clocksource_init()
+#endif
+
+static int bfin_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ bfin_write_TCOUNT(cycles);
+ CSYNC();
+ return 0;
+}
+
+static void bfin_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC: {
+ unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
+ bfin_write_TCNTL(TMPWR);
+ bfin_write_TSCALE(TIME_SCALE - 1);
+ CSYNC();
+ bfin_write_TPERIOD(tcount);
+ bfin_write_TCOUNT(tcount);
+ bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
+ CSYNC();
+ break;
+ }
+ case CLOCK_EVT_MODE_ONESHOT:
+ bfin_write_TSCALE(0);
+ bfin_write_TCOUNT(0);
+ bfin_write_TCNTL(TMPWR | TMREN);
+ CSYNC();
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ bfin_write_TCNTL(0);
+ CSYNC();
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static void __init bfin_timer_init(void)
+{
+ /* power up the timer, but don't enable it just yet */
+ bfin_write_TCNTL(TMPWR);
+ CSYNC();
+
+ /*
+ * the TSCALE prescaler counter.
+ */
+ bfin_write_TSCALE(TIME_SCALE - 1);
+ bfin_write_TPERIOD(0);
+ bfin_write_TCOUNT(0);
+
+ /* now enable the timer */
+ CSYNC();
+}
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+#ifdef CONFIG_CORE_TIMER_IRQ_L1
+__attribute__((l1_text))
+#endif
+irqreturn_t timer_interrupt(int irq, void *dev_id);
+
+static struct clock_event_device clockevent_bfin = {
+ .name = "bfin_core_timer",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
+ .cpumask = CPU_MASK_CPU0,
+ .set_next_event = bfin_timer_set_next_event,
+ .set_mode = bfin_timer_set_mode,
+};
+
+static struct irqaction bfin_timer_irq = {
+ .name = "Blackfin Core Timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = timer_interrupt,
+ .dev_id = &clockevent_bfin,
+};
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static int __init bfin_clockevent_init(void)
+{
+ setup_irq(IRQ_CORETMR, &bfin_timer_irq);
+ bfin_timer_init();
+
+ clockevent_bfin.mult = div_sc(get_cclk(), NSEC_PER_SEC, clockevent_bfin.shift);
+ clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
+ clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
+ clockevents_register_device(&clockevent_bfin);
+
+ return 0;
+}
+
+void __init time_init(void)
+{
+ time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
+
+#ifdef CONFIG_RTC_DRV_BFIN
+ /* [#2663] hack to filter junk RTC values that would cause
+ * userspace to have to deal with time values greater than
+ * 2^31 seconds (which uClibc cannot cope with yet)
+ */
+ if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
+ printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
+ bfin_write_RTC_STAT(0);
+ }
+#endif
+
+ /* Initialize xtime. From now on, xtime is updated with timer interrupts */
+ xtime.tv_sec = secs_since_1970;
+ xtime.tv_nsec = 0;
+ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
+
+ bfin_clocksource_init();
+ bfin_clockevent_init();
+}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 9bdc8f99183a..eb2352320454 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -6,9 +6,10 @@
* Created:
* Description: This file contains the bfin-specific time handling details.
* Most of the stuff is located in the machine specific files.
+ * FIXME: (This file is subject for removal)
*
* Modified:
- * Copyright 2004-2006 Analog Devices Inc.
+ * Copyright 2004-2008 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
@@ -35,12 +36,12 @@
#include <linux/irq.h>
#include <asm/blackfin.h>
+#include <asm/time.h>
/* This is an NTP setting */
#define TICK_SIZE (tick_nsec / 1000)
-static void time_sched_init(irqreturn_t(*timer_routine)
- (int, void *));
+static void time_sched_init(irq_handler_t timer_routine);
static unsigned long gettimeoffset(void);
static struct irqaction bfin_timer_irq = {
@@ -48,23 +49,8 @@ static struct irqaction bfin_timer_irq = {
.flags = IRQF_DISABLED
};
-/*
- * The way that the Blackfin core timer works is:
- * - CCLK is divided by a programmable 8-bit pre-scaler (TSCALE)
- * - Every time TSCALE ticks, a 32bit is counted down (TCOUNT)
- *
- * If you take the fastest clock (1ns, or 1GHz to make the math work easier)
- * 10ms is 10,000,000 clock ticks, which fits easy into a 32-bit counter
- * (32 bit counter is 4,294,967,296ns or 4.2 seconds) so, we don't need
- * to use TSCALE, and program it to zero (which is pass CCLK through).
- * If you feel like using it, try to keep HZ * TIMESCALE to some
- * value that divides easy (like power of 2).
- */
-
-#define TIME_SCALE 1
-
static void
-time_sched_init(irqreturn_t(*timer_routine) (int, void *))
+time_sched_init(irq_handler_t timer_routine)
{
u32 tcount;
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 56a67ab698c7..5b847070dae5 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -67,6 +67,8 @@ void __init trap_init(void)
CSYNC();
}
+void *saved_icplb_fault_addr, *saved_dcplb_fault_addr;
+
int kstack_depth_to_print = 48;
static void decode_address(char *buf, unsigned long address)
@@ -75,7 +77,7 @@ static void decode_address(char *buf, unsigned long address)
struct task_struct *p;
struct mm_struct *mm;
unsigned long flags, offset;
- unsigned int in_exception = bfin_read_IPEND() & 0x10;
+ unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
#ifdef CONFIG_KALLSYMS
unsigned long symsize;
@@ -117,7 +119,7 @@ static void decode_address(char *buf, unsigned long address)
*/
write_lock_irqsave(&tasklist_lock, flags);
for_each_process(p) {
- mm = (in_exception ? p->mm : get_task_mm(p));
+ mm = (in_atomic ? p->mm : get_task_mm(p));
if (!mm)
continue;
@@ -137,23 +139,36 @@ static void decode_address(char *buf, unsigned long address)
/* FLAT does not have its text aligned to the start of
* the map while FDPIC ELF does ...
*/
- if (current->mm &&
- (address > current->mm->start_code) &&
- (address < current->mm->end_code))
- offset = address - current->mm->start_code;
- else
- offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
-
- sprintf(buf, "<0x%p> [ %s + 0x%lx ]",
- (void *)address, name, offset);
- if (!in_exception)
+
+ /* before we can check flat/fdpic, we need to
+ * make sure current is valid
+ */
+ if ((unsigned long)current >= FIXED_CODE_START &&
+ !((unsigned long)current & 0x3)) {
+ if (current->mm &&
+ (address > current->mm->start_code) &&
+ (address < current->mm->end_code))
+ offset = address - current->mm->start_code;
+ else
+ offset = (address - vma->vm_start) +
+ (vma->vm_pgoff << PAGE_SHIFT);
+
+ sprintf(buf, "<0x%p> [ %s + 0x%lx ]",
+ (void *)address, name, offset);
+ } else
+ sprintf(buf, "<0x%p> [ %s vma:0x%lx-0x%lx]",
+ (void *)address, name,
+ vma->vm_start, vma->vm_end);
+
+ if (!in_atomic)
mmput(mm);
+
goto done;
}
vml = vml->next;
}
- if (!in_exception)
+ if (!in_atomic)
mmput(mm);
}
@@ -506,7 +521,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
info.si_signo = sig;
info.si_errno = 0;
- info.si_addr = (void *)fp->pc;
+ info.si_addr = (void __user *)fp->pc;
force_sig_info(sig, &info, current);
trace_buffer_restore(j);
@@ -655,21 +670,31 @@ void dump_bfin_process(struct pt_regs *fp)
else if (context & 0x8000)
printk(KERN_NOTICE "Kernel process context\n");
- if (current->pid && current->mm) {
+ /* Because we are crashing, and pointers could be bad, we check things
+ * pretty closely before we use them
+ */
+ if ((unsigned long)current >= FIXED_CODE_START &&
+ !((unsigned long)current & 0x3) && current->pid) {
printk(KERN_NOTICE "CURRENT PROCESS:\n");
- printk(KERN_NOTICE "COMM=%s PID=%d\n",
- current->comm, current->pid);
-
- printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
- KERN_NOTICE "BSS = 0x%p-0x%p USER-STACK = 0x%p\n"
- KERN_NOTICE "\n",
- (void *)current->mm->start_code,
- (void *)current->mm->end_code,
- (void *)current->mm->start_data,
- (void *)current->mm->end_data,
- (void *)current->mm->end_data,
- (void *)current->mm->brk,
- (void *)current->mm->start_stack);
+ if (current->comm >= (char *)FIXED_CODE_START)
+ printk(KERN_NOTICE "COMM=%s PID=%d\n",
+ current->comm, current->pid);
+ else
+ printk(KERN_NOTICE "COMM= invalid\n");
+
+ if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
+ printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
+ KERN_NOTICE " BSS = 0x%p-0x%p USER-STACK = 0x%p\n"
+ KERN_NOTICE "\n",
+ (void *)current->mm->start_code,
+ (void *)current->mm->end_code,
+ (void *)current->mm->start_data,
+ (void *)current->mm->end_data,
+ (void *)current->mm->end_data,
+ (void *)current->mm->brk,
+ (void *)current->mm->start_stack);
+ else
+ printk(KERN_NOTICE "invalid mm\n");
} else
printk(KERN_NOTICE "\n" KERN_NOTICE
"No Valid process in current context\n");
@@ -680,10 +705,7 @@ void dump_bfin_mem(struct pt_regs *fp)
unsigned short *addr, *erraddr, val = 0, err = 0;
char sti = 0, buf[6];
- if (unlikely((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR))
- erraddr = (void *)fp->pc;
- else
- erraddr = (void *)fp->retx;
+ erraddr = (void *)fp->pc;
printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr);
@@ -807,9 +829,9 @@ unlock:
if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
(((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
- decode_address(buf, bfin_read_DCPLB_FAULT_ADDR());
+ decode_address(buf, saved_dcplb_fault_addr);
printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
- decode_address(buf, bfin_read_ICPLB_FAULT_ADDR());
+ decode_address(buf, saved_icplb_fault_addr);
printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
}
@@ -917,8 +939,8 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
oops_in_progress = 1;
- printk(KERN_EMERG "DCPLB_FAULT_ADDR=%p\n", (void *)bfin_read_DCPLB_FAULT_ADDR());
- printk(KERN_EMERG "ICPLB_FAULT_ADDR=%p\n", (void *)bfin_read_ICPLB_FAULT_ADDR());
+ printk(KERN_EMERG "DCPLB_FAULT_ADDR=%p\n", saved_dcplb_fault_addr);
+ printk(KERN_EMERG "ICPLB_FAULT_ADDR=%p\n", saved_icplb_fault_addr);
dump_bfin_process(fp);
dump_bfin_mem(fp);
show_regs(fp);
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index cb01a9de2680..3ecc64cab3be 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -56,6 +56,10 @@ SECTIONS
*(.text.*)
*(.fixup)
+#if !L1_CODE_LENGTH
+ *(.l1.text)
+#endif
+
. = ALIGN(16);
___start___ex_table = .;
*(__ex_table)
@@ -73,6 +77,12 @@ SECTIONS
___bss_start = .;
*(.bss .bss.*)
*(COMMON)
+#if !L1_DATA_A_LENGTH
+ *(.l1.bss)
+#endif
+#if !L1_DATA_B_LENGTH
+ *(.l1.bss.B)
+#endif
___bss_stop = .;
}
@@ -83,6 +93,15 @@ SECTIONS
. = ALIGN(32);
*(.data.cacheline_aligned)
+#if !L1_DATA_A_LENGTH
+ . = ALIGN(32);
+ *(.data_l1.cacheline_aligned)
+ *(.l1.data)
+#endif
+#if !L1_DATA_B_LENGTH
+ *(.l1.data.B)
+#endif
+
DATA_DATA
*(.data.*)
CONSTRUCTORS
@@ -147,64 +166,43 @@ SECTIONS
__l1_lma_start = .;
-#if L1_CODE_LENGTH
-# define LDS_L1_CODE *(.l1.text)
-#else
-# define LDS_L1_CODE
-#endif
.text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
{
. = ALIGN(4);
__stext_l1 = .;
- LDS_L1_CODE
+ *(.l1.text)
. = ALIGN(4);
__etext_l1 = .;
}
-#if L1_DATA_A_LENGTH
-# define LDS_L1_A_DATA *(.l1.data)
-# define LDS_L1_A_BSS *(.l1.bss)
-# define LDS_L1_A_CACHE *(.data_l1.cacheline_aligned)
-#else
-# define LDS_L1_A_DATA
-# define LDS_L1_A_BSS
-# define LDS_L1_A_CACHE
-#endif
.data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
{
. = ALIGN(4);
__sdata_l1 = .;
- LDS_L1_A_DATA
+ *(.l1.data)
__edata_l1 = .;
. = ALIGN(4);
__sbss_l1 = .;
- LDS_L1_A_BSS
+ *(.l1.bss)
. = ALIGN(32);
- LDS_L1_A_CACHE
+ *(.data_l1.cacheline_aligned)
. = ALIGN(4);
__ebss_l1 = .;
}
-#if L1_DATA_B_LENGTH
-# define LDS_L1_B_DATA *(.l1.data.B)
-# define LDS_L1_B_BSS *(.l1.bss.B)
-#else
-# define LDS_L1_B_DATA
-# define LDS_L1_B_BSS
-#endif
.data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
{
. = ALIGN(4);
__sdata_b_l1 = .;
- LDS_L1_B_DATA
+ *(.l1.data.B)
__edata_b_l1 = .;
. = ALIGN(4);
__sbss_b_l1 = .;
- LDS_L1_B_BSS
+ *(.l1.bss.B)
. = ALIGN(4);
__ebss_b_l1 = .;
@@ -223,8 +221,6 @@ SECTIONS
DWARF_DEBUG
- NOTES
-
/DISCARD/ :
{
EXIT_TEXT