/*
* drivers/video/tegra/host/host1x/host1x_intr.c
*
* Tegra Graphics Host Interrupt Management
*
* Copyright (C) 2010 Google, Inc.
* Copyright (c) 2010-2012, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
#include
#include
#include
#include
#include "nvhost_intr.h"
#include "dev.h"
#include "host1x_hardware.h"
/*** HW host sync management ***/
static void syncpt_thresh_mask(struct irq_data *data)
{
(void)data;
}
static void syncpt_thresh_unmask(struct irq_data *data)
{
(void)data;
}
static void syncpt_thresh_cascade(unsigned int irq, struct irq_desc *desc)
{
void __iomem *sync_regs = irq_desc_get_handler_data(desc);
unsigned long reg;
int id;
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
reg = readl(sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
for_each_set_bit(id, ®, 32)
generic_handle_irq(id + INT_SYNCPT_THRESH_BASE);
chained_irq_exit(chip, desc);
}
static struct irq_chip syncpt_thresh_irq = {
.name = "syncpt",
.irq_mask = syncpt_thresh_mask,
.irq_unmask = syncpt_thresh_unmask
};
static void t20_intr_init_host_sync(struct nvhost_intr *intr)
{
struct nvhost_master *dev = intr_to_dev(intr);
void __iomem *sync_regs = dev->sync_aperture;
int i, irq;
writel(0xffffffffUL,
sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
writel(0xffffffffUL,
sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
for (i = 0; i < INT_SYNCPT_THRESH_NR; i++) {
irq = INT_SYNCPT_THRESH_BASE + i;
irq_set_chip_and_handler(irq, &syncpt_thresh_irq,
handle_simple_irq);
irq_set_chip_data(irq, sync_regs);
set_irq_flags(irq, IRQF_VALID);
}
irq_set_chained_handler(INT_HOST1X_MPCORE_SYNCPT,
syncpt_thresh_cascade);
irq_set_handler_data(INT_HOST1X_MPCORE_SYNCPT, sync_regs);
/* disable the ip_busy_timeout. this prevents write drops, etc.
* there's no real way to recover from a hung client anyway.
*/
writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
/* increase the auto-ack timout to the maximum value. 2d will hang
* otherwise on ap20.
*/
writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
}
static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
{
struct nvhost_master *dev = intr_to_dev(intr);
void __iomem *sync_regs = dev->sync_aperture;
/* write microsecond clock register */
writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK);
}
static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr,
u32 id, u32 thresh)
{
struct nvhost_master *dev = intr_to_dev(intr);
void __iomem *sync_regs = dev->sync_aperture;
thresh &= 0xffff;
writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
}
static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
{
struct nvhost_master *dev = intr_to_dev(intr);
void __iomem *sync_regs = dev->sync_aperture;
writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
}
static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
{
struct nvhost_master *dev = intr_to_dev(intr);
void __iomem *sync_regs = dev->sync_aperture;
/* disable interrupts for both cpu's */
writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
/* clear status for both cpu's */
writel(0xffffffffu, sync_regs +
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
writel(0xffffffffu, sync_regs +
HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS);
}
/**
* Sync point threshold interrupt service function
* Handles sync point threshold triggers, in interrupt context
*/
irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id)
{
struct nvhost_intr_syncpt *syncpt = dev_id;
unsigned int id = syncpt->id;
struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
writel(BIT(id),
sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
writel(BIT(id),
sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
return IRQ_WAKE_THREAD;
}
/**
* Host general interrupt service function
* Handles read / write failures
*/
static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id)
{
struct nvhost_intr *intr = dev_id;
void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
u32 stat;
u32 ext_stat;
u32 addr;
stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_READ_INT, ext_stat)) {
addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
pr_err("Host read timeout at address %x\n", addr);
}
if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_WRITE_INT, ext_stat)) {
addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
pr_err("Host write timeout at address %x\n", addr);
}
writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
return IRQ_HANDLED;
}
static int t20_intr_request_host_general_irq(struct nvhost_intr *intr)
{
void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
int err;
if (intr->host_general_irq_requested)
return 0;
/* master disable for general (not syncpt) host interrupts */
writel(0, sync_regs + HOST1X_SYNC_INTMASK);
/* clear status & extstatus */
writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS);
err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0,
"host_status", intr);
if (err)
return err;
/* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
/* enable extra interrupt sources */
writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
/* enable host module interrupt to CPU0 */
writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
/* master enable for general (not syncpt) host interrupts */
writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
intr->host_general_irq_requested = true;
return err;
}
static void t20_intr_free_host_general_irq(struct nvhost_intr *intr)
{
if (intr->host_general_irq_requested) {
void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
/* master disable for general (not syncpt) host interrupts */
writel(0, sync_regs + HOST1X_SYNC_INTMASK);
free_irq(intr->host_general_irq, intr);
intr->host_general_irq_requested = false;
}
}
static int t20_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
{
int err;
if (syncpt->irq_requested)
return 0;
err = request_threaded_irq(syncpt->irq,
t20_intr_syncpt_thresh_isr,
nvhost_syncpt_thresh_fn,
0, syncpt->thresh_irq_name, syncpt);
if (err)
return err;
syncpt->irq_requested = 1;
return 0;
}
int nvhost_init_t20_intr_support(struct nvhost_chip_support *op)
{
op->intr.init_host_sync = t20_intr_init_host_sync;
op->intr.set_host_clocks_per_usec = t20_intr_set_host_clocks_per_usec;
op->intr.set_syncpt_threshold = t20_intr_set_syncpt_threshold;
op->intr.enable_syncpt_intr = t20_intr_enable_syncpt_intr;
op->intr.disable_all_syncpt_intrs = t20_intr_disable_all_syncpt_intrs;
op->intr.request_host_general_irq = t20_intr_request_host_general_irq;
op->intr.free_host_general_irq = t20_intr_free_host_general_irq;
op->intr.request_syncpt_irq = t20_request_syncpt_irq;
return 0;
}