summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-09-01 13:49:12 +0100
committerMark Brown <broonie@linaro.org>2013-09-01 13:49:12 +0100
commita3e412dcf568cc389a584cd37ccb0057a89dcb0a (patch)
tree279b97d2e178721c2d0b0d6663631e9f9ccc69b8
parent368ce0bca434347b7cd5550ee71b046d7727830f (diff)
parent692fb0fe5aacea861e4006342029cf505a7dbe18 (diff)
Merge remote-tracking branch 'spi/topic/sirf' into spi-next
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-sirf.c227
2 files changed, 182 insertions, 47 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f552c89abdd4..0170d4c4a8a3 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -433,7 +433,7 @@ config SPI_SH_HSPI
config SPI_SIRF
tristate "CSR SiRFprimaII SPI controller"
- depends on ARCH_SIRF
+ depends on SIRF_DMA
select SPI_BITBANG
help
SPI driver for CSR SiRFprimaII SoCs
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index fc5081ab3677..a1f21b747733 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -19,6 +19,10 @@
#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/sirfsoc_dma.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -119,9 +123,19 @@
#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
+/*
+ * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
+ * due to the limitation of dma controller
+ */
+
+#define ALIGNED(x) (!((u32)x & 0x3))
+#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
+ ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
+
struct sirfsoc_spi {
struct spi_bitbang bitbang;
- struct completion done;
+ struct completion rx_done;
+ struct completion tx_done;
void __iomem *base;
u32 ctrl_freq; /* SPI controller clock speed */
@@ -137,8 +151,16 @@ struct sirfsoc_spi {
void (*tx_word) (struct sirfsoc_spi *);
/* number of words left to be tranmitted/received */
- unsigned int left_tx_cnt;
- unsigned int left_rx_cnt;
+ unsigned int left_tx_word;
+ unsigned int left_rx_word;
+
+ /* rx & tx DMA channels */
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ void *dummypage;
+ int word_width; /* in bytes */
int chipselect[0];
};
@@ -155,7 +177,7 @@ static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
@@ -169,7 +191,7 @@ static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
@@ -184,7 +206,7 @@ static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
@@ -198,7 +220,7 @@ static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
@@ -213,7 +235,7 @@ static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
@@ -228,7 +250,7 @@ static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
@@ -241,7 +263,7 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
/* Error Conditions */
if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
spi_stat & SIRFSOC_SPI_TX_UFLOW) {
- complete(&sspi->done);
+ complete(&sspi->rx_done);
writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
}
@@ -249,50 +271,61 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
| SIRFSOC_SPI_RXFIFO_THD_REACH))
while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
& SIRFSOC_SPI_FIFO_EMPTY)) &&
- sspi->left_rx_cnt)
+ sspi->left_rx_word)
sspi->rx_word(sspi);
if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY
| SIRFSOC_SPI_TXFIFO_THD_REACH))
while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
& SIRFSOC_SPI_FIFO_FULL)) &&
- sspi->left_tx_cnt)
+ sspi->left_tx_word)
sspi->tx_word(sspi);
/* Received all words */
- if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) {
- complete(&sspi->done);
+ if ((sspi->left_rx_word == 0) && (sspi->left_tx_word == 0)) {
+ complete(&sspi->rx_done);
writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
}
return IRQ_HANDLED;
}
+static void spi_sirfsoc_dma_fini_callback(void *data)
+{
+ struct completion *dma_complete = data;
+
+ complete(dma_complete);
+}
+
static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct sirfsoc_spi *sspi;
int timeout = t->len * 10;
sspi = spi_master_get_devdata(spi->master);
- sspi->tx = t->tx_buf;
- sspi->rx = t->rx_buf;
- sspi->left_tx_cnt = sspi->left_rx_cnt = t->len;
- INIT_COMPLETION(sspi->done);
+ sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
+ sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
+ sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
+ INIT_COMPLETION(sspi->rx_done);
+ INIT_COMPLETION(sspi->tx_done);
writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
- if (t->len == 1) {
+ if (sspi->left_tx_word == 1) {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
SIRFSOC_SPI_ENA_AUTO_CLR,
sspi->base + SIRFSOC_SPI_CTRL);
writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
- } else if ((t->len > 1) && (t->len < SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
+ } else if ((sspi->left_tx_word > 1) && (sspi->left_tx_word <
+ SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
SIRFSOC_SPI_MUL_DAT_MODE |
SIRFSOC_SPI_ENA_AUTO_CLR,
sspi->base + SIRFSOC_SPI_CTRL);
- writel(t->len - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
- writel(t->len - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
} else {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
sspi->base + SIRFSOC_SPI_CTRL);
@@ -305,17 +338,64 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- /* Send the first word to trigger the whole tx/rx process */
- sspi->tx_word(sspi);
+ if (IS_DMA_VALID(t)) {
+ struct dma_async_tx_descriptor *rx_desc, *tx_desc;
+
+ sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE);
+ rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
+ sspi->dst_start, t->len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ rx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ rx_desc->callback_param = &sspi->rx_done;
+
+ sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE);
+ tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
+ sspi->src_start, t->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ tx_desc->callback_param = &sspi->tx_done;
+
+ dmaengine_submit(tx_desc);
+ dmaengine_submit(rx_desc);
+ dma_async_issue_pending(sspi->tx_chan);
+ dma_async_issue_pending(sspi->rx_chan);
+ } else {
+ /* Send the first word to trigger the whole tx/rx process */
+ sspi->tx_word(sspi);
+
+ writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
+ SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
+ SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
+ SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
+ }
- writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
- SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
- SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
- SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
- if (wait_for_completion_timeout(&sspi->done, timeout) == 0)
+ if (!IS_DMA_VALID(t)) { /* for PIO */
+ if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0)
+ dev_err(&spi->dev, "transfer timeout\n");
+ } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
dev_err(&spi->dev, "transfer timeout\n");
+ dmaengine_terminate_all(sspi->rx_chan);
+ } else
+ sspi->left_rx_word = 0;
+
+ /*
+ * we only wait tx-done event if transferring by DMA. for PIO,
+ * we get rx data by writing tx data, so if rx is done, tx has
+ * done earlier
+ */
+ if (IS_DMA_VALID(t)) {
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ dmaengine_terminate_all(sspi->tx_chan);
+ }
+ }
+
+ if (IS_DMA_VALID(t)) {
+ dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
+ dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
+ }
/* TX, RX FIFO stop */
writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
@@ -323,7 +403,7 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
- return t->len - sspi->left_rx_cnt;
+ return t->len - sspi->left_rx_word * sspi->word_width;
}
static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
@@ -332,7 +412,6 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
if (sspi->chipselect[spi->chip_select] == 0) {
u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
- regval |= SIRFSOC_SPI_CS_IO_OUT;
switch (value) {
case BITBANG_CS_ACTIVE:
if (spi->mode & SPI_CS_HIGH)
@@ -369,11 +448,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
- /* Enable IO mode for RX, TX */
- writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
- writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
regval = (sspi->ctrl_freq / (2 * hz)) - 1;
-
if (regval > 0xFFFF || regval < 0) {
dev_err(&spi->dev, "Speed %d not supported\n", hz);
return -EINVAL;
@@ -388,6 +463,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_FIFO_WIDTH_BYTE;
rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
SIRFSOC_SPI_FIFO_WIDTH_BYTE;
+ sspi->word_width = 1;
break;
case 12:
case 16:
@@ -399,6 +475,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_FIFO_WIDTH_WORD;
rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
SIRFSOC_SPI_FIFO_WIDTH_WORD;
+ sspi->word_width = 2;
break;
case 32:
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
@@ -408,6 +485,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_FIFO_WIDTH_DWORD;
rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
SIRFSOC_SPI_FIFO_WIDTH_DWORD;
+ sspi->word_width = 4;
break;
default:
BUG();
@@ -442,6 +520,17 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
+
+ if (IS_DMA_VALID(t)) {
+ /* Enable DMA mode for RX, TX */
+ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ } else {
+ /* Enable IO mode for RX, TX */
+ writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ }
+
return 0;
}
@@ -466,6 +555,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct spi_master *master;
struct resource *mem_res;
int num_cs, cs_gpio, irq;
+ u32 rx_dma_ch, tx_dma_ch;
+ dma_cap_mask_t dma_cap_mask;
int i;
int ret;
@@ -476,6 +567,20 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
goto err_cs;
}
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "sirf,spi-dma-rx-channel", &rx_dma_ch);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get rx dma channel\n");
+ goto err_cs;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "sirf,spi-dma-tx-channel", &tx_dma_ch);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get tx dma channel\n");
+ goto err_cs;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
@@ -484,12 +589,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(&pdev->dev, "Unable to get IO resource\n");
- ret = -ENODEV;
- goto free_master;
- }
master->num_chipselect = num_cs;
for (i = 0; i < master->num_chipselect; i++) {
@@ -516,6 +615,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
}
}
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sspi->base)) {
ret = PTR_ERR(sspi->base);
@@ -538,19 +638,40 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
sspi->bitbang.master->setup = spi_sirfsoc_setup;
master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+ /* request DMA channels */
+ dma_cap_zero(dma_cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma_cap_mask);
+
+ sspi->rx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
+ (void *)rx_dma_ch);
+ if (!sspi->rx_chan) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
+ goto free_master;
+ }
+ sspi->tx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
+ (void *)tx_dma_ch);
+ if (!sspi->tx_chan) {
+ dev_err(&pdev->dev, "can not allocate tx dma channel\n");
+ ret = -ENODEV;
+ goto free_rx_dma;
+ }
+
sspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(sspi->clk)) {
- ret = -EINVAL;
- goto free_master;
+ ret = PTR_ERR(sspi->clk);
+ goto free_tx_dma;
}
clk_prepare_enable(sspi->clk);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
- init_completion(&sspi->done);
+ init_completion(&sspi->rx_done);
+ init_completion(&sspi->tx_done);
writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
@@ -559,17 +680,28 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
/* We are not using dummy delay between command and data */
writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
+ sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
+ if (!sspi->dummypage) {
+ ret = -ENOMEM;
+ goto free_clk;
+ }
+
ret = spi_bitbang_start(&sspi->bitbang);
if (ret)
- goto free_clk;
+ goto free_dummypage;
dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
return 0;
-
+free_dummypage:
+ kfree(sspi->dummypage);
free_clk:
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
+free_tx_dma:
+ dma_release_channel(sspi->tx_chan);
+free_rx_dma:
+ dma_release_channel(sspi->rx_chan);
free_master:
spi_master_put(master);
err_cs:
@@ -590,8 +722,11 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
if (sspi->chipselect[i] > 0)
gpio_free(sspi->chipselect[i]);
}
+ kfree(sspi->dummypage);
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
+ dma_release_channel(sspi->rx_chan);
+ dma_release_channel(sspi->tx_chan);
spi_master_put(master);
return 0;
}