summaryrefslogtreecommitdiff
path: root/arch/ia64/sn
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r--arch/ia64/sn/kernel/Makefile2
-rw-r--r--arch/ia64/sn/kernel/irq.c101
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c32
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c42
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c2
10 files changed, 53 insertions, 136 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 0591038735af..d27df1d45da7 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -7,7 +7,7 @@
# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
#
-EXTRA_CFLAGS += -Iarch/ia64/sn/include
+ccflags-y := -Iarch/ia64/sn/include
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_acpi_init.o io_common.o \
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 13c15d968098..81a1f4e6bcd8 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -23,11 +23,9 @@
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_feature_sets.h>
-static void force_interrupt(int irq);
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
-int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
struct list_head **sn_irq_lh;
static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
@@ -78,62 +76,40 @@ u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
return ret_stuff.status;
}
-static unsigned int sn_startup_irq(unsigned int irq)
+static unsigned int sn_startup_irq(struct irq_data *data)
{
return 0;
}
-static void sn_shutdown_irq(unsigned int irq)
+static void sn_shutdown_irq(struct irq_data *data)
{
}
extern void ia64_mca_register_cpev(int);
-static void sn_disable_irq(unsigned int irq)
+static void sn_disable_irq(struct irq_data *data)
{
- if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(0);
}
-static void sn_enable_irq(unsigned int irq)
+static void sn_enable_irq(struct irq_data *data)
{
- if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
- ia64_mca_register_cpev(irq);
+ if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ ia64_mca_register_cpev(data->irq);
}
-static void sn_ack_irq(unsigned int irq)
+static void sn_ack_irq(struct irq_data *data)
{
u64 event_occurred, mask;
+ unsigned int irq = data->irq & 0xff;
- irq = irq & 0xff;
event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
mask = event_occurred & SH_ALL_INT_MASK;
HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
- move_native_irq(irq);
-}
-
-static void sn_end_irq(unsigned int irq)
-{
- int ivec;
- u64 event_occurred;
-
- ivec = irq & 0xff;
- if (ivec == SGI_UART_VECTOR) {
- event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
- /* If the UART bit is set here, we may have received an
- * interrupt from the UART that the driver missed. To
- * make sure, we IPI ourselves to force us to look again.
- */
- if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
- platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
- IA64_IPI_DM_INT, 0);
- }
- }
- __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
- if (sn_force_interrupt_flag)
- force_interrupt(irq);
+ irq_move_irq(data);
}
static void sn_irq_info_free(struct rcu_head *head);
@@ -228,9 +204,11 @@ finish_up:
return new_irq_info;
}
-static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
+static int sn_set_affinity_irq(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ unsigned int irq = data->irq;
nasid_t nasid;
int slice;
@@ -249,7 +227,7 @@ void sn_set_err_irq_affinity(unsigned int irq)
{
/*
* On systems which support CPU disabling (SHub2), all error interrupts
- * are targetted at the boot CPU.
+ * are targeted at the boot CPU.
*/
if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
set_irq_affinity_info(irq, cpu_physical_id(0), 0);
@@ -259,26 +237,25 @@ void sn_set_err_irq_affinity(unsigned int irq) { }
#endif
static void
-sn_mask_irq(unsigned int irq)
+sn_mask_irq(struct irq_data *data)
{
}
static void
-sn_unmask_irq(unsigned int irq)
+sn_unmask_irq(struct irq_data *data)
{
}
struct irq_chip irq_type_sn = {
- .name = "SN hub",
- .startup = sn_startup_irq,
- .shutdown = sn_shutdown_irq,
- .enable = sn_enable_irq,
- .disable = sn_disable_irq,
- .ack = sn_ack_irq,
- .end = sn_end_irq,
- .mask = sn_mask_irq,
- .unmask = sn_unmask_irq,
- .set_affinity = sn_set_affinity_irq
+ .name = "SN hub",
+ .irq_startup = sn_startup_irq,
+ .irq_shutdown = sn_shutdown_irq,
+ .irq_enable = sn_enable_irq,
+ .irq_disable = sn_disable_irq,
+ .irq_ack = sn_ack_irq,
+ .irq_mask = sn_mask_irq,
+ .irq_unmask = sn_unmask_irq,
+ .irq_set_affinity = sn_set_affinity_irq
};
ia64_vector sn_irq_to_vector(int irq)
@@ -296,15 +273,13 @@ unsigned int sn_local_vector_to_irq(u8 vector)
void sn_irq_init(void)
{
int i;
- struct irq_desc *base_desc = irq_desc;
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
for (i = 0; i < NR_IRQS; i++) {
- if (base_desc[i].chip == &no_irq_chip) {
- base_desc[i].chip = &irq_type_sn;
- }
+ if (irq_get_chip(i) == &no_irq_chip)
+ irq_set_chip(i, &irq_type_sn);
}
}
@@ -378,7 +353,6 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
int cpu = nasid_slice_to_cpuid(nasid, slice);
#ifdef CONFIG_SMP
int cpuphys;
- struct irq_desc *desc;
#endif
pci_dev_get(pci_dev);
@@ -395,12 +369,11 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpu);
set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
- desc = irq_to_desc(sn_irq_info->irq_irq);
/*
* Affinity was set by the PROM, prevent it from
* being reset by the request_irq() path.
*/
- desc->status |= IRQ_AFFINITY_SET;
+ irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq));
#endif
}
@@ -439,25 +412,11 @@ sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
/* Don't force an interrupt if the irq has been disabled */
- if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) &&
+ if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) &&
pci_provider && pci_provider->force_interrupt)
(*pci_provider->force_interrupt)(sn_irq_info);
}
-static void force_interrupt(int irq)
-{
- struct sn_irq_info *sn_irq_info;
-
- if (!sn_ioif_inited)
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
- sn_call_force_intr_provider(sn_irq_info);
-
- rcu_read_unlock();
-}
-
/*
* Check for lost interrupts. If the PIC int_status reg. says that
* an interrupt has been sent, but not handled, and the interrupt
@@ -476,7 +435,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
/*
* Bridge types attached to TIO (anything but PIC) do not need this WAR
* since they do not target Shub II interrupt registers. If that
- * ever changes, this check needs to accomodate.
+ * ever changes, this check needs to accommodate.
*/
if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
return;
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index a5e500f02853..2b98b9e088de 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -144,16 +144,16 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
*/
msg.data = 0x100 + irq;
- set_irq_msi(irq, entry);
+ irq_set_msi_desc(irq, entry);
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
return 0;
}
#ifdef CONFIG_SMP
-static int sn_set_msi_irq_affinity(unsigned int irq,
- const struct cpumask *cpu_mask)
+static int sn_set_msi_irq_affinity(struct irq_data *data,
+ const struct cpumask *cpu_mask, bool force)
{
struct msi_msg msg;
int slice;
@@ -164,7 +164,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq,
struct sn_irq_info *sn_irq_info;
struct sn_irq_info *new_irq_info;
struct sn_pcibus_provider *provider;
- unsigned int cpu;
+ unsigned int cpu, irq = data->irq;
cpu = cpumask_first(cpu_mask);
sn_irq_info = sn_msi_info[irq].sn_irq_info;
@@ -206,33 +206,33 @@ static int sn_set_msi_irq_affinity(unsigned int irq,
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg);
- cpumask_copy(irq_desc[irq].affinity, cpu_mask);
+ cpumask_copy(data->affinity, cpu_mask);
return 0;
}
#endif /* CONFIG_SMP */
-static void sn_ack_msi_irq(unsigned int irq)
+static void sn_ack_msi_irq(struct irq_data *data)
{
- move_native_irq(irq);
+ irq_move_irq(data);
ia64_eoi();
}
-static int sn_msi_retrigger_irq(unsigned int irq)
+static int sn_msi_retrigger_irq(struct irq_data *data)
{
- unsigned int vector = irq;
+ unsigned int vector = data->irq;
ia64_resend_irq(vector);
return 1;
}
static struct irq_chip sn_msi_chip = {
- .name = "PCI-MSI",
- .irq_mask = mask_msi_irq,
- .irq_unmask = unmask_msi_irq,
- .ack = sn_ack_msi_irq,
+ .name = "PCI-MSI",
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_ack = sn_ack_msi_irq,
#ifdef CONFIG_SMP
- .set_affinity = sn_set_msi_irq_affinity,
+ .irq_set_affinity = sn_set_msi_irq_affinity,
#endif
- .retrigger = sn_msi_retrigger_irq,
+ .irq_retrigger = sn_msi_retrigger_irq,
};
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index dbc4cbecb5ed..77db0b514fa4 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -592,7 +592,7 @@ void __cpuinit sn_cpu_init(void)
/*
* Don't check status. The SAL call is not supported on all PROMs
* but a failure is harmless.
- * Architechtuallly, cpu_init is always called twice on cpu 0. We
+ * Architecturally, cpu_init is always called twice on cpu 0. We
* should set cpu_number on cpu 0 once.
*/
if (cpuid == 0) {
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
index 08e6565dc908..3d09108d4277 100644
--- a/arch/ia64/sn/kernel/sn2/Makefile
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -9,7 +9,7 @@
# sn2 specific kernel files
#
-EXTRA_CFLAGS += -Iarch/ia64/sn/include
+ccflags-y := -Iarch/ia64/sn/include
obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index c76d8dc3aea3..7aab87f48060 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -45,38 +45,6 @@ static int licenseID_open(struct inode *inode, struct file *file)
return single_open(file, licenseID_show, NULL);
}
-/*
- * Enable forced interrupt by default.
- * When set, the sn interrupt handler writes the force interrupt register on
- * the bridge chip. The hardware will then send an interrupt message if the
- * interrupt line is active. This mimics a level sensitive interrupt.
- */
-extern int sn_force_interrupt_flag;
-
-static int sn_force_interrupt_show(struct seq_file *s, void *p)
-{
- seq_printf(s, "Force interrupt is %s\n",
- sn_force_interrupt_flag ? "enabled" : "disabled");
- return 0;
-}
-
-static ssize_t sn_force_interrupt_write_proc(struct file *file,
- const char __user *buffer, size_t count, loff_t *data)
-{
- char val;
-
- if (copy_from_user(&val, buffer, 1))
- return -EFAULT;
-
- sn_force_interrupt_flag = (val == '0') ? 0 : 1;
- return count;
-}
-
-static int sn_force_interrupt_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sn_force_interrupt_show, NULL);
-}
-
static int coherence_id_show(struct seq_file *s, void *p)
{
seq_printf(s, "%d\n", partition_coherence_id());
@@ -114,14 +82,6 @@ static const struct file_operations proc_license_id_fops = {
.release = single_release,
};
-static const struct file_operations proc_sn_force_intr_fops = {
- .open = sn_force_interrupt_open,
- .read = seq_read,
- .write = sn_force_interrupt_write_proc,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static const struct file_operations proc_coherence_id_fops = {
.open = coherence_id_open,
.read = seq_read,
@@ -149,8 +109,6 @@ void register_sn_procfs(void)
proc_create("system_serial_number", 0444, sgi_proc_dir,
&proc_system_sn_fops);
proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops);
- proc_create("sn_force_interrupt", 0644, sgi_proc_dir,
- &proc_sn_force_intr_fops);
proc_create("coherence_id", 0444, sgi_proc_dir,
&proc_coherence_id_fops);
proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index ad4ef34dfe26..df2a90145426 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,6 +7,6 @@
#
# Makefile for the sn pci general routines.
-EXTRA_CFLAGS += -Iarch/ia64/sn/include
+ccflags-y := -Iarch/ia64/sn/include
obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
index 01192d3247dd..396bcae36309 100644
--- a/arch/ia64/sn/pci/pcibr/Makefile
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -7,7 +7,7 @@
#
# Makefile for the sn2 io routines.
-EXTRA_CFLAGS += -Iarch/ia64/sn/include
+ccflags-y := -Iarch/ia64/sn/include
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index c659ad5613a0..33def666a664 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -227,7 +227,7 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
* after doing the read. For PIC this routine then forces a fake interrupt
* on another line, which is logically associated with the slot that the PIO
* is addressed to. It then spins while watching the memory location that
- * the interrupt is targetted to. When the interrupt response arrives, we
+ * the interrupt is targeted to. When the interrupt response arrives, we
* are sure that the DMA has landed in memory and it is safe for the driver
* to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
* Bridge register since it ensures the data has entered the coherence domain,
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 4d4536e3b6f3..9c271be9919a 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* use the GART mapped mode.
*/
static u64
-tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
+tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;