summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 16:17:32 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 16:17:32 +0200
commit3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (patch)
tree88647d9dc50d634dee9cfeb7f354d620977a2f33 /drivers/iommu
parent982653009b883ef1529089e3e6f1ae2fee41cbe2 (diff)
parent68cc3990a545dc0da221b4844dd8b9c06623a6c5 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock() lockdep: Comment all warnings lib: atomic64: Change the type of local lock to raw_spinlock_t locking, lib/atomic64: Annotate atomic64_lock::lock as raw locking, x86, iommu: Annotate qi->q_lock as raw locking, x86, iommu: Annotate irq_2_ir_lock as raw locking, x86, iommu: Annotate iommu->register_lock as raw locking, dma, ipu: Annotate bank_lock as raw locking, ARM: Annotate low level hw locks as raw locking, drivers/dca: Annotate dca_lock as raw locking, powerpc: Annotate uic->lock as raw locking, x86: mce: Annotate cmci_discover_lock as raw locking, ACPI: Annotate c3_lock as raw locking, oprofile: Annotate oprofilefs lock as raw locking, video: Annotate vga console lock as raw locking, latencytop: Annotate latency_lock as raw locking, timer_stats: Annotate table_lock as raw locking, rwsem: Annotate inner lock as raw locking, semaphores: Annotate inner lock as raw locking, sched: Annotate thread_group_cputimer as raw ... Fix up conflicts in kernel/posix-cpu-timers.c manually: making cputimer->cputime a raw lock conflicted with the ABBA fix in commit bcd5cff7216f ("cputimer: Cure lock inversion").
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dmar.c48
-rw-r--r--drivers/iommu/intel-iommu.c36
-rw-r--r--drivers/iommu/intr_remapping.c40
3 files changed, 62 insertions, 62 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 587e8f2d38d8..35c1e17fce1d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -652,7 +652,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
(unsigned long long)iommu->cap,
(unsigned long long)iommu->ecap);
- spin_lock_init(&iommu->register_lock);
+ raw_spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu;
return 0;
@@ -771,11 +771,11 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
restart:
rc = 0;
- spin_lock_irqsave(&qi->q_lock, flags);
+ raw_spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
- spin_unlock_irqrestore(&qi->q_lock, flags);
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax();
- spin_lock_irqsave(&qi->q_lock, flags);
+ raw_spin_lock_irqsave(&qi->q_lock, flags);
}
index = qi->free_head;
@@ -815,15 +815,15 @@ restart:
if (rc)
break;
- spin_unlock(&qi->q_lock);
+ raw_spin_unlock(&qi->q_lock);
cpu_relax();
- spin_lock(&qi->q_lock);
+ raw_spin_lock(&qi->q_lock);
}
qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
- spin_unlock_irqrestore(&qi->q_lock, flags);
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
if (rc == -EAGAIN)
goto restart;
@@ -912,7 +912,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
if (!ecap_qis(iommu->ecap))
return;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_QIES))
@@ -932,7 +932,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
!(sts & DMA_GSTS_QIES), sts);
end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/*
@@ -947,7 +947,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
qi->free_head = qi->free_tail = 0;
qi->free_cnt = QI_LENGTH;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* write zero to the tail reg */
writel(0, iommu->reg + DMAR_IQT_REG);
@@ -960,7 +960,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/*
@@ -1009,7 +1009,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->free_head = qi->free_tail = 0;
qi->free_cnt = QI_LENGTH;
- spin_lock_init(&qi->q_lock);
+ raw_spin_lock_init(&qi->q_lock);
__dmar_enable_qi(iommu);
@@ -1075,11 +1075,11 @@ void dmar_msi_unmask(struct irq_data *data)
unsigned long flag;
/* unmask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(0, iommu->reg + DMAR_FECTL_REG);
/* Read a reg to force flush the post write */
readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_mask(struct irq_data *data)
@@ -1088,11 +1088,11 @@ void dmar_msi_mask(struct irq_data *data)
struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
/* mask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
/* Read a reg to force flush the post write */
readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_write(int irq, struct msi_msg *msg)
@@ -1100,11 +1100,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_read(int irq, struct msi_msg *msg)
@@ -1112,11 +1112,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
@@ -1153,7 +1153,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
u32 fault_status;
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status)
printk(KERN_ERR "DRHD: handling fault status reg %x\n",
@@ -1192,7 +1192,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
writel(DMA_FRCD_F, iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN + 12);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
dmar_fault_do_one(iommu, type, fault_reason,
source_id, guest_addr);
@@ -1200,14 +1200,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index++;
if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
clear_rest:
/* clear all the other faults */
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
writel(fault_status, iommu->reg + DMAR_FSTS_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f28d933c7927..be1953c239b0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -939,7 +939,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
addr = iommu->root_entry;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -948,7 +948,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_RTPS), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
static void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -959,14 +959,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(val & DMA_GSTS_WBFS)), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
@@ -993,14 +993,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
}
val |= DMA_CCMD_ICC;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
@@ -1039,7 +1039,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
if (cap_write_drain(iommu->cap))
val |= DMA_TLB_WRITE_DRAIN;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
/* Note: Only uses first TLB reg currently */
if (val_iva)
dmar_writeq(iommu->reg + tlb_offset, val_iva);
@@ -1049,7 +1049,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
IOMMU_WAIT_OP(iommu, tlb_offset + 8,
dmar_readq, (!(val & DMA_TLB_IVT)), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
/* check IOTLB invalidation granularity */
if (DMA_TLB_IAIG(val) == 0)
@@ -1165,7 +1165,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
writel(pmen, iommu->reg + DMAR_PMEN_REG);
@@ -1174,7 +1174,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
readl, !(pmen & DMA_PMEN_PRS), pmen);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int iommu_enable_translation(struct intel_iommu *iommu)
@@ -1182,7 +1182,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
u32 sts;
unsigned long flags;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd |= DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@ -1190,7 +1190,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_TES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -1199,7 +1199,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
u32 sts;
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@ -1207,7 +1207,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(sts & DMA_GSTS_TES)), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return 0;
}
@@ -3329,7 +3329,7 @@ static int iommu_suspend(void)
for_each_active_iommu(iommu, drhd) {
iommu_disable_translation(iommu);
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->iommu_state[SR_DMAR_FECTL_REG] =
readl(iommu->reg + DMAR_FECTL_REG);
@@ -3340,7 +3340,7 @@ static int iommu_suspend(void)
iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
readl(iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
return 0;
@@ -3367,7 +3367,7 @@ static void iommu_resume(void)
for_each_active_iommu(iommu, drhd) {
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
iommu->reg + DMAR_FECTL_REG);
@@ -3378,7 +3378,7 @@ static void iommu_resume(void)
writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
for_each_active_iommu(iommu, drhd)
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index cfb0dd4bf0b6..07c9f189f314 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -54,7 +54,7 @@ static __init int setup_intremap(char *str)
}
early_param("intremap", setup_intremap);
-static DEFINE_SPINLOCK(irq_2_ir_lock);
+static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
@@ -71,12 +71,12 @@ int get_irte(int irq, struct irte *entry)
if (!entry || !irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
index = irq_iommu->irte_index + irq_iommu->sub_handle;
*entry = *(irq_iommu->iommu->ir_table->base + index);
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
@@ -110,7 +110,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
return -1;
}
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
do {
for (i = index; i < index + count; i++)
if (table->base[i].present)
@@ -122,7 +122,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
if (index == start_index) {
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate an IRTE\n");
return -1;
}
@@ -136,7 +136,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
@@ -161,10 +161,10 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
*sub_handle = irq_iommu->sub_handle;
index = irq_iommu->irte_index;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
@@ -176,14 +176,14 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = subhandle;
irq_iommu->irte_mask = 0;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
@@ -199,7 +199,7 @@ int modify_irte(int irq, struct irte *irte_modified)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
iommu = irq_iommu->iommu;
@@ -211,7 +211,7 @@ int modify_irte(int irq, struct irte *irte_modified)
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
@@ -279,7 +279,7 @@ int free_irte(int irq)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
rc = clear_entries(irq_iommu);
@@ -288,7 +288,7 @@ int free_irte(int irq)
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = 0;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
@@ -418,7 +418,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
addr = virt_to_phys((void *)iommu->ir_table->base);
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg + DMAR_IRTA_REG,
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
/*
* global invalidation of interrupt entry cache before enabling
@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
*/
qi_global_iec(iommu);
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
@@ -446,7 +446,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
@@ -494,7 +494,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
*/
qi_global_iec(iommu);
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_IRES))
@@ -507,7 +507,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
readl, !(sts & DMA_GSTS_IRES), sts);
end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int __init dmar_x2apic_optout(void)