summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2011-02-23 20:02:45 +0530
committerDan Willemsen <dwillemsen@nvidia.com>2011-11-30 21:43:22 -0800
commit74687bd6c007e8addd5af4c5757ba53ca82556c2 (patch)
tree156444bc59ae4b29ce6bfd24e151dd61758efa15 /drivers/spi
parent18f1cc38976f71df2fcab41b724b9f3bfc3ab9b6 (diff)
spi: tegra: Fixing misc issues.
Following are the fixes; - Supportng half duplex. - Only using SW based CS. - Write to readback with command register does not work. Fixing issue. - Using cpu based transfer for smaller size and dma based for larger size. - reading proper transfer status after every transaction. bug 791149 bug 791780 Original-Change-Id: I293b3f1b571276f5d8fe4ad4da67f827926e4b73 Reviewed-on: http://git-master/r/20581 Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com> Tested-by: Laxman Dewangan <ldewangan@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Reviewed-by: Amit Kamath <akamath@nvidia.com> Rebase-Id: R29f88f7509bdb182f05916ecf31e1090b1b9d017
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi-tegra.c1110
1 files changed, 742 insertions, 368 deletions
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
index 8ff0e7233dbe..c4e2b615f34f 100644
--- a/drivers/spi/spi-tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -17,6 +17,9 @@
*
*/
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -27,10 +30,13 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/completion.h>
#include <linux/spi/spi.h>
+#include <linux/spi-tegra.h>
#include <mach/dma.h>
+#include <mach/clk.h>
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -125,20 +131,31 @@
#define SLINK_STATUS2 0x01c
#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
-#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
#define SLINK_TX_FIFO 0x100
#define SLINK_RX_FIFO 0x180
-#define SLINK_FIFO_DEPTH 0x20
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SPI_FIFO_DEPTH 32
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
static const unsigned long spi_tegra_req_sels[] = {
TEGRA_DMA_REQ_SEL_SL2B1,
TEGRA_DMA_REQ_SEL_SL2B2,
TEGRA_DMA_REQ_SEL_SL2B3,
TEGRA_DMA_REQ_SEL_SL2B4,
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC)
+ TEGRA_DMA_REQ_SEL_SL2B5,
+ TEGRA_DMA_REQ_SEL_SL2B6,
+#endif
+
};
-#define BB_LEN 2048
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
@@ -146,63 +163,88 @@ static const unsigned long spi_tegra_req_sels[] = {
(TX_FIFO_EMPTY_COUNT_MAX | \
RX_FIFO_FULL_COUNT_ZERO << 16)
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 4
struct spi_tegra_data {
struct spi_master *master;
struct platform_device *pdev;
spinlock_t lock;
+ char port_name[32];
struct clk *clk;
void __iomem *base;
unsigned long phys;
+ unsigned irq;
u32 cur_speed;
struct list_head queue;
struct spi_transfer *cur;
+ struct spi_device *cur_spi;
unsigned cur_pos;
unsigned cur_len;
- unsigned cur_bytes_per_word;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+
+ unsigned cur_direction;
+
+ bool is_dma_allowed;
- /* The tegra spi controller has a bug which causes the first word
- * in PIO transactions to be garbage. Since packed DMA transactions
- * require transfers to be 4 byte aligned we need a bounce buffer
- * for the generic case.
- */
struct tegra_dma_req rx_dma_req;
struct tegra_dma_channel *rx_dma;
- u32 *rx_bb;
- dma_addr_t rx_bb_phys;
+ u32 *rx_buf;
+ dma_addr_t rx_buf_phys;
+ unsigned cur_rx_pos;
+
struct tegra_dma_req tx_dma_req;
struct tegra_dma_channel *tx_dma;
- u32 *tx_bb;
- dma_addr_t tx_bb_phys;
+ u32 *tx_buf;
+ dma_addr_t tx_buf_phys;
+ unsigned cur_tx_pos;
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ bool is_clkon_always;
+ bool clk_state;
bool is_suspended;
- unsigned long save_slink_cmd;
- u32 rx_complete;
- u32 tx_complete;
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 rx_complete;
+ u32 tx_complete;
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
bool is_packed;
- unsigned long packed_size;
- unsigned (*spi_tegra_rx)(struct spi_tegra_data *tspi,
- struct spi_transfer *t);
- unsigned (*spi_tegra_tx)(struct spi_tegra_data *tspi,
- struct spi_transfer *t);
- u8 g_bits_per_word;
+ unsigned long packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+ u32 max_speed[MAX_CHIP_SELECT];
+ u32 modes[MAX_CHIP_SELECT];
};
-
static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
- unsigned long reg)
+ unsigned long reg)
{
+ if (!tspi->clk_state)
+ BUG();
return readl(tspi->base + reg);
}
static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
- unsigned long val,
- unsigned long reg)
+ unsigned long val, unsigned long reg)
{
+ if (!tspi->clk_state)
+ BUG();
writel(val, tspi->base + reg);
}
@@ -212,156 +254,18 @@ static void spi_tegra_clear_status(struct spi_tegra_data *tspi)
unsigned long val_write = 0;
val = spi_tegra_readl(tspi, SLINK_STATUS);
- if (val & SLINK_BSY)
- val_write |= SLINK_BSY;
-
- if (val & SLINK_ERR) {
- val_write |= SLINK_ERR;
- pr_err("%s ERROR bit set 0x%lx \n", __func__, val);
- if (val & SLINK_TX_OVF)
- val_write |= SLINK_TX_OVF;
- if (val & SLINK_RX_OVF)
- val_write |= SLINK_RX_OVF;
- if (val & SLINK_RX_UNF)
- val_write |= SLINK_RX_UNF;
- if (val & SLINK_TX_UNF)
- val_write |= SLINK_TX_UNF;
- if (!(val & SLINK_TX_EMPTY))
- val_write |= SLINK_TX_FLUSH;
- if (!(val & SLINK_RX_EMPTY))
- val_write |= SLINK_RX_FLUSH;
- }
- spi_tegra_writel(tspi, val_write, SLINK_STATUS);
-}
-static void spi_tegra_go(struct spi_tegra_data *tspi)
-{
- unsigned long val;
- unsigned long test_val;
- unsigned unused_fifo_size;
-
- wmb();
-
- val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
- val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
- if (tspi->is_packed) {
- val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size - 1);
- val |= tspi->packed_size;
- } else {
- val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1);
- }
- spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
- tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
- tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
-
- val &= ~SLINK_TX_TRIG_MASK & ~SLINK_RX_TRIG_MASK;
-
- if (tspi->rx_dma_req.size & 0xF)
- val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
- else if (((tspi->rx_dma_req.size) >> 4) & 0x1)
- val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
- else
- val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
-
- spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
- /*
- * TRM 24.1.1.7 wait for the FIFO to be full
- */
- test_val = spi_tegra_readl(tspi, SLINK_STATUS2);
- unused_fifo_size = (tspi->tx_dma_req.size/4) >= 0x20 ?
- 0:
- SLINK_FIFO_DEPTH - (tspi->tx_dma_req.size/4);
- while (SLINK_TX_FIFO_EMPTY_COUNT(test_val) != (unused_fifo_size))
- test_val = spi_tegra_readl(tspi, SLINK_STATUS2);
-
- if (tspi->is_packed) {
- val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
- val |= SLINK_PACKED;
- spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
- udelay(1);
- }
-
- val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
- val |= SLINK_DMA_EN;
- spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
-}
-static unsigned spi_tegra_fill_tx_fifo_packed(struct spi_tegra_data *tspi,
- struct spi_transfer *t)
-{
- unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
- tspi->cur_bytes_per_word);
- unsigned long val;
-
- val = spi_tegra_readl(tspi, SLINK_COMMAND);
- val &= ~SLINK_WORD_SIZE(~0);
- val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
- spi_tegra_writel(tspi, val, SLINK_COMMAND);
- memcpy(tspi->tx_bb, t->tx_buf, len);
- tspi->tx_dma_req.size = len;
- return len;
-}
-
-static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi,
- struct spi_transfer *t)
-{
- unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
- tspi->cur_bytes_per_word);
- u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos;
- int i, j;
- unsigned long val;
-
- val = spi_tegra_readl(tspi, SLINK_COMMAND);
- val &= ~SLINK_WORD_SIZE(~0);
- val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
- spi_tegra_writel(tspi, val, SLINK_COMMAND);
-
- if (tspi->g_bits_per_word == 32) {
- memcpy(tspi->tx_bb, (void *)tx_buf, len);
- } else {
- for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
- val = 0;
- for (j = 0; j < tspi->cur_bytes_per_word; j++)
- val |= tx_buf[i + j] << (tspi->cur_bytes_per_word-j-1) * 8;
-
- tspi->tx_bb[i / tspi->cur_bytes_per_word] = val;
- }
- }
-
- tspi->tx_dma_req.size = len / tspi->cur_bytes_per_word * 4;
-
- return len;
-}
-
-static unsigned spi_tegra_drain_rx_fifo_packed(struct spi_tegra_data *tspi,
- struct spi_transfer *t)
-{
- unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
- tspi->cur_bytes_per_word);
-
- memcpy(t->rx_buf, tspi->rx_bb, len);
- tspi->rx_dma_req.size = len;
- return len;
-}
-static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi,
- struct spi_transfer *t)
-{
- unsigned len = tspi->cur_len;
- int i, j;
- u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos;
- unsigned long val;
+ val_write = SLINK_RDY;
+ if (val & SLINK_TX_OVF)
+ val_write |= SLINK_TX_OVF;
+ if (val & SLINK_RX_OVF)
+ val_write |= SLINK_RX_OVF;
+ if (val & SLINK_RX_UNF)
+ val_write |= SLINK_RX_UNF;
+ if (val & SLINK_TX_UNF)
+ val_write |= SLINK_TX_UNF;
- if (tspi->g_bits_per_word == 32) {
- memcpy(rx_buf, (void *)tspi->rx_bb, len);
- } else {
- for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
- val = tspi->rx_bb[i / tspi->cur_bytes_per_word];
- for (j = 0; j < tspi->cur_bytes_per_word; j++)
- rx_buf[i + j] =
- (val >> (tspi->cur_bytes_per_word - j - 1) * 8) & 0xff;
- }
- }
-
- return len;
+ spi_tegra_writel(tspi, val_write, SLINK_STATUS);
}
static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
@@ -369,7 +273,7 @@ static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
{
unsigned long val;
- switch (tspi->cur_bytes_per_word) {
+ switch (tspi->bytes_per_word) {
case 0:
val = SLINK_PACK_SIZE_4;
break;
@@ -387,189 +291,379 @@ static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
}
return val;
}
-static void spi_tegra_start_transfer(struct spi_device *spi,
- struct spi_transfer *t)
+
+static unsigned spi_tegra_calculate_curr_xfer_param(
+ struct spi_device *spi, struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
{
- struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
- u32 speed;
- u8 bits_per_word;
- unsigned long val;
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word ;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+ tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+ tspi->packed_size = spi_tegra_get_packed_size(tspi, t);
- speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
- bits_per_word = t->bits_per_word ? t->bits_per_word :
- spi->bits_per_word;
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = remain_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = remain_len/tspi->bytes_per_word;
+ }
+ return total_fifo_words;
+}
- tspi->g_bits_per_word = bits_per_word;
+static unsigned spi_tegra_fill_tx_fifo_from_client_txbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ unsigned long fifo_status;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int written_words;
+
+ fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
- tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1;
+ if (tspi->is_packed) {
+ nbytes = tspi->curr_dma_words * tspi->bytes_per_word;
+ max_n_32bit = (min(nbytes, tx_empty_count*4) - 1)/4 + 1;
+ for (count = 0; count < max_n_32bit; ++count) {
+ x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (*tx_buf++) << (i*8);
+ spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ written_words = min(max_n_32bit * tspi->words_per_32bit,
+ tspi->curr_dma_words);
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ nbytes = max_n_32bit * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; ++count) {
+ x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ ++i, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ written_words = max_n_32bit;
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
- /* !!! fix me: Packed mode disabled */
- tspi->is_packed = 0;
+static unsigned int spi_tegra_read_rx_fifo_to_client_rxbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ unsigned long fifo_status;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int read_words;
+ unsigned len;
+
+ fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ dev_dbg(&tspi->pdev->dev, "Rx fifo count %d\n", rx_full_count);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; ++count) {
+ x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); ++i, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ for (count = 0; count < rx_full_count; ++count) {
+ x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; (i < tspi->bytes_per_word); ++i)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
- tspi->packed_size = spi_tegra_get_packed_size(tspi, t);
+static void spi_tegra_copy_client_txbuf_to_spi_txbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
if (tspi->is_packed) {
- tspi->spi_tegra_tx = spi_tegra_fill_tx_fifo_packed;
- tspi->spi_tegra_rx = spi_tegra_drain_rx_fifo_packed;
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_buf, t->tx_buf + tspi->cur_pos, len);
} else {
- tspi->spi_tegra_tx = spi_tegra_fill_tx_fifo;
- tspi->spi_tegra_rx = spi_tegra_drain_rx_fifo;
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int x;
+
+ for (count = 0; count < tspi->curr_dma_words; ++count) {
+ x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ ++i, consume--)
+ x |= ((*tx_buf++) << i*8);
+ tspi->tx_buf[count] = x;
+ }
}
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+}
- if (speed != tspi->cur_speed)
- clk_set_rate(tspi->clk, speed);
+static void spi_tegra_copy_spi_rxbuf_to_client_rxbuf(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ unsigned int x;
+ for (count = 0; count < tspi->curr_dma_words; ++count) {
+ x = tspi->rx_buf[count];
+ for (i = 0; (i < tspi->bytes_per_word); ++i)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+}
- if (tspi->cur_speed == 0)
- clk_enable(tspi->clk);
+static int spi_tegra_start_dma_based_transfer(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned long test_val;
+ unsigned int len;
+ int ret = 0;
- tspi->cur_speed = speed;
+ INIT_COMPLETION(tspi->rx_dma_complete);
+ INIT_COMPLETION(tspi->tx_dma_complete);
- spi_tegra_clear_status(tspi);
- val = spi_tegra_readl(tspi, SLINK_COMMAND2);
- val &= ~(SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN);
- if (t->rx_buf)
- val |= SLINK_RXEN;
- if (t->tx_buf)
- val |= SLINK_TXEN;
- val |= SLINK_SS_EN_CS(spi->chip_select);
- val |= SLINK_SPIE;
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
if (tspi->is_packed)
- val |= SLINK_CS_ACTIVE_BETWEEN;
- spi_tegra_writel(tspi, val, SLINK_COMMAND2);
-
- val = spi_tegra_readl(tspi, SLINK_COMMAND);
- val &= ~SLINK_BIT_LENGTH(~0);
- val |= SLINK_BIT_LENGTH(bits_per_word - 1);
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
- /* FIXME: should probably control CS manually so that we can be sure
- * it does not go low between transfer and to support delay_usecs
- * correctly.
- */
- val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW;
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
- if (spi->mode & SPI_CPHA)
- val |= SLINK_CK_SDA;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
- if (spi->mode & SPI_CPOL)
- val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
- else
- val |= SLINK_IDLE_SCLK_DRIVE_LOW;
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
- val |= SLINK_M_S;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ spi_tegra_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ tspi->tx_dma_req.size = len;
+ ret = tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
+ if (ret < 0) {
+ dev_err(&tspi->pdev->dev, "Error in starting tx dma "
+ " error = %d\n", ret);
+ return ret;
+ }
- spi_tegra_writel(tspi, val, SLINK_COMMAND);
+ /* Wait for tx fifo to be fill before starting slink */
+ test_val = spi_tegra_readl(tspi, SLINK_STATUS);
+ while (!(test_val & SLINK_TX_FULL))
+ test_val = spi_tegra_readl(tspi, SLINK_STATUS);
+ }
- spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ tspi->rx_dma_req.size = len;
+ ret = tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
+ if (ret < 0) {
+ dev_err(&tspi->pdev->dev, "Error in starting rx dma "
+ " error = %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tegra_dma_dequeue_req(tspi->tx_dma,
+ &tspi->tx_dma_req);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
- tspi->cur = t;
- tspi->cur_pos = 0;
- tspi->cur_len = tspi->spi_tegra_tx(tspi, t);
- tspi->rx_dma_req.size = tspi->tx_dma_req.size;
- tspi->rx_complete = 0;
- tspi->tx_complete = 0;
- spi_tegra_go(tspi);
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
}
-static void spi_tegra_start_message(struct spi_device *spi,
- struct spi_message *m)
+static int spi_tegra_start_cpu_based_transfer(
+ struct spi_tegra_data *tspi, struct spi_transfer *t)
{
- struct spi_transfer *t;
+ unsigned long val;
+ unsigned curr_words;
- m->actual_length = 0;
- m->status = 0;
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
- t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
- spi_tegra_start_transfer(spi, t);
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ curr_words = spi_tegra_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ curr_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(curr_words - 1);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
}
-static void complete_operation(struct tegra_dma_req *req)
+
+static void spi_tegra_start_transfer(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg)
{
- struct spi_tegra_data *tspi = req->dev;
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
unsigned long val;
- struct spi_message *m;
- struct spi_device *spi;
- u32 timeout = 0;
- u32 temp = 0;
+ unsigned total_fifo_words;
+ int ret;
+ unsigned int cs_pol_bit[] = {
+ SLINK_CS_POLARITY,
+ SLINK_CS_POLARITY1,
+ SLINK_CS_POLARITY2,
+ SLINK_CS_POLARITY3,
+ };
- /* the SPI controller may come back with both the BSY and RDY bits
- * set. In this case we need to wait for the BSY bit to clear so
- * that we are sure the DMA is finished. 1000 reads was empirically
- * determined to be long enough.
- */
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
- while ((spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY)) {
- if (timeout++ > 1000)
- break;
- }
- while ((spi_tegra_readl(tspi, SLINK_STATUS2)) != SLINK_STATUS2_RESET) {
- if (temp++ > 50000)
- break;
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed * 4);
+ tspi->cur_speed = speed;
}
- spi_tegra_clear_status(tspi);
+ if (is_first_of_msg) {
- val = spi_tegra_readl(tspi, SLINK_STATUS);
- val |= SLINK_RDY;
- spi_tegra_writel(tspi, val, SLINK_STATUS);
+ if (!tspi->is_clkon_always) {
+ if (!tspi->clk_state) {
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
+ }
+ }
- m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ spi_tegra_clear_status(tspi);
- if ((timeout >= 1000) || (temp >= 50000))
- m->status = -EIO;
+ val = tspi->def_command_reg;
+ val |= SLINK_BIT_LENGTH(bits_per_word - 1);
- spi = m->state;
+ val ^= cs_pol_bit[spi->chip_select];
- tspi->cur_pos += tspi->spi_tegra_rx(tspi, tspi->cur);
- m->actual_length += tspi->cur_pos;
+ val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA;
+ if (spi->mode & SPI_CPHA)
+ val |= SLINK_CK_SDA;
- if (!list_is_last(&tspi->cur->transfer_list, &m->transfers)) {
- tspi->cur = list_first_entry(&tspi->cur->transfer_list,
- struct spi_transfer, transfer_list);
- spi_tegra_start_transfer(spi, tspi->cur);
+ if (spi->mode & SPI_CPOL)
+ val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ val |= SLINK_IDLE_SCLK_DRIVE_LOW;
} else {
- list_del(&m->queue);
-
- m->complete(m->context);
-
- if (!list_empty(&tspi->queue)) {
- m = list_first_entry(&tspi->queue, struct spi_message,
- queue);
- spi = m->state;
- spi_tegra_start_message(spi, m);
- } else {
- clk_disable(tspi->clk);
- tspi->cur_speed = 0;
- }
+ val = tspi->command_reg;
+ val &= ~SLINK_BIT_LENGTH(~0);
+ val |= SLINK_BIT_LENGTH(bits_per_word - 1);
}
-}
-static void tegra_spi_tx_dma_complete(struct tegra_dma_req *req)
-{
- struct spi_tegra_data *tspi = req->dev;
- unsigned long flags;
-
- spin_lock_irqsave(&tspi->lock, flags);
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+ tspi->command_reg = val;
- (tspi->tx_complete)++;
+ dev_dbg(&tspi->pdev->dev, "The def 0x%x and written 0x%lx\n",
+ tspi->def_command_reg, val);
- if (((tspi->rx_complete) == 1) && ((tspi->tx_complete) == 1))
- complete_operation(req);
+ val = tspi->def_command2_reg;
+ val &= ~(SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN);
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ val |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ val |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ val |= SLINK_SS_EN_CS(spi->chip_select);
+ spi_tegra_writel(tspi, val, SLINK_COMMAND2);
- spin_unlock_irqrestore(&tspi->lock, flags);
+ tspi->cur = t;
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->rx_complete = 0;
+ tspi->tx_complete = 0;
+ total_fifo_words = spi_tegra_calculate_curr_xfer_param(spi, tspi, t);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = spi_tegra_start_dma_based_transfer(tspi, t);
+ else
+ ret = spi_tegra_start_cpu_based_transfer(tspi, t);
+ WARN_ON(ret < 0);
}
-static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
+static void spi_tegra_start_message(struct spi_device *spi,
+ struct spi_message *m)
{
- struct spi_tegra_data *tspi = req->dev;
- unsigned long flags;
-
-
- spin_lock_irqsave(&tspi->lock, flags);
-
- (tspi->rx_complete)++;
+ struct spi_transfer *t;
- if (((tspi->rx_complete) == 1) && ((tspi->tx_complete) == 1))
- complete_operation(req);
+ m->actual_length = 0;
+ m->status = 0;
- spin_unlock_irqrestore(&tspi->lock, flags);
+ t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
+ spi_tegra_start_transfer(spi, t, true);
}
static int spi_tegra_setup(struct spi_device *spi)
@@ -585,6 +679,7 @@ static int spi_tegra_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
+ BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
switch (spi->chip_select) {
case 0:
cs_bit = SLINK_CS_POLARITY;
@@ -598,7 +693,7 @@ static int spi_tegra_setup(struct spi_device *spi)
cs_bit = SLINK_CS_POLARITY2;
break;
- case 4:
+ case 3:
cs_bit = SLINK_CS_POLARITY3;
break;
@@ -607,23 +702,15 @@ static int spi_tegra_setup(struct spi_device *spi)
}
spin_lock_irqsave(&tspi->lock, flags);
-
- if (spi->max_speed_hz != tspi->cur_speed)
- clk_set_rate(tspi->clk, spi->max_speed_hz);
-
- if (tspi->cur_speed == 0)
- clk_enable(tspi->clk);
- tspi->cur_speed = spi->max_speed_hz;
-
- val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val = tspi->def_command_reg;
if (spi->mode & SPI_CS_HIGH)
val |= cs_bit;
else
val &= ~cs_bit;
- spi_tegra_writel(tspi, val, SLINK_COMMAND);
-
+ tspi->def_command_reg |= val;
+ tspi->modes[spi->chip_select] = spi->mode;
+ tspi->max_speed[spi->chip_select] = spi->max_speed_hz;
spin_unlock_irqrestore(&tspi->lock, flags);
-
return 0;
}
@@ -668,11 +755,221 @@ static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
+static void spi_tegra_curr_transfer_complete(struct spi_tegra_data *tspi,
+ unsigned err, unsigned cur_xfer_size)
+{
+ struct spi_message *m;
+ struct spi_device *spi;
+
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+ if (err)
+ m->status = -EIO;
+ spi = m->state;
+
+ m->actual_length += cur_xfer_size;
+ if (!list_is_last(&tspi->cur->transfer_list, &m->transfers)) {
+ tspi->cur = list_first_entry(&tspi->cur->transfer_list,
+ struct spi_transfer, transfer_list);
+ spi_tegra_start_transfer(spi, tspi->cur, false);
+ } else {
+ list_del(&m->queue);
+ m->complete(m->context);
+ if (!list_empty(&tspi->queue)) {
+ m = list_first_entry(&tspi->queue, struct spi_message,
+ queue);
+ spi = m->state;
+ spi_tegra_start_message(spi, m);
+ } else {
+ spi_tegra_writel(tspi, tspi->def_command_reg,
+ SLINK_COMMAND);
+ spi_tegra_writel(tspi, tspi->def_command2_reg,
+ SLINK_COMMAND2);
+ if (!tspi->is_clkon_always) {
+ if (tspi->clk_state) {
+ /* Provide delay to stablize the signal
+ state */
+ udelay(10);
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ }
+ }
+ }
+}
+
+static void tegra_spi_tx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ complete(&tspi->tx_dma_complete);
+}
+
+static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ complete(&tspi->rx_dma_complete);
+}
+
+static void handle_cpu_based_xfer(void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+ struct spi_transfer *t = tspi->cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
+ __func__, tspi->status_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ WARN_ON(1);
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len);
+ goto exit;
+ }
+
+ dev_vdbg(&tspi->pdev->dev, " Current direction %x\n",
+ tspi->cur_direction);
+ if (tspi->cur_direction & DATA_DIR_RX)
+ spi_tegra_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->cur_pos = tspi->cur_rx_pos;
+ else
+ WARN_ON(1);
+
+ dev_vdbg(&tspi->pdev->dev, "current position %d and length of the "
+ "transfer %d\n", tspi->cur_pos, t->len);
+ if (tspi->cur_pos == t->len) {
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len);
+ goto exit;
+ }
+
+ spi_tegra_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ spi_tegra_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return;
+}
+
+static irqreturn_t spi_tegra_isr_thread(int irq, void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+ struct spi_transfer *t = tspi->cur;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ if (!tspi->is_curr_dma_xfer) {
+ handle_cpu_based_xfer(context_data);
+ return IRQ_HANDLED;
+ }
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ tegra_dma_dequeue(tspi->tx_dma);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ tegra_dma_dequeue(tspi->tx_dma);
+ dev_err(&tspi->pdev->dev, "Error in Dma Tx "
+ "transfer\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ tegra_dma_dequeue(tspi->rx_dma);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ tegra_dma_dequeue(tspi->rx_dma);
+ dev_err(&tspi->pdev->dev, "Error in Dma Rx "
+ "transfer\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
+ __func__, tspi->status_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ WARN_ON(1);
+ spi_tegra_curr_transfer_complete(tspi, err, t->len);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ spi_tegra_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->cur_pos = tspi->cur_rx_pos;
+ else
+ WARN_ON(1);
+
+ if (tspi->cur_pos == t->len) {
+ spi_tegra_curr_transfer_complete(tspi,
+ tspi->tx_status || tspi->rx_status, t->len);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = spi_tegra_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ err = spi_tegra_start_dma_based_transfer(tspi, t);
+ else
+ err = spi_tegra_start_cpu_based_transfer(tspi, t);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ WARN_ON(err < 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t spi_tegra_isr(int irq, void *context_data)
+{
+ struct spi_tegra_data *tspi = context_data;
+
+ tspi->status_reg = spi_tegra_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ spi_tegra_clear_status(tspi);
+
+
+ return IRQ_WAKE_THREAD;
+}
+
static int __init spi_tegra_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct spi_tegra_data *tspi;
struct resource *r;
+ struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof *tspi);
@@ -689,7 +986,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
master->setup = spi_tegra_setup;
master->transfer = spi_tegra_transfer;
- master->num_chipselect = 4;
+ master->num_chipselect = MAX_CHIP_SELECT;
dev_set_drvdata(&pdev->dev, master);
tspi = spi_master_get_devdata(master);
@@ -700,13 +997,13 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
ret = -ENODEV;
- goto err0;
+ goto fail_no_mem;
}
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
ret = -EBUSY;
- goto err0;
+ goto fail_no_mem;
}
tspi->phys = r->start;
@@ -714,38 +1011,74 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
if (!tspi->base) {
dev_err(&pdev->dev, "can't ioremap iomem\n");
ret = -ENOMEM;
- goto err1;
+ goto fail_io_map;
+ }
+
+ tspi->irq = platform_get_irq(pdev, 0);
+ if (unlikely(tspi->irq < 0)) {
+ dev_err(&pdev->dev, "can't find irq resource\n");
+ ret = -ENXIO;
+ goto fail_irq_req;
+ }
+
+ sprintf(tspi->port_name, "tegra_spi_%d", pdev->id);
+ ret = request_threaded_irq(tspi->irq, spi_tegra_isr,
+ spi_tegra_isr_thread, IRQF_DISABLED,
+ tspi->port_name, tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto fail_irq_req;
}
tspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
- goto err2;
+ goto fail_clk_get;
}
INIT_LIST_HEAD(&tspi->queue);
+ if (pdata) {
+ tspi->is_clkon_always = pdata->is_clkon_always;
+ tspi->is_dma_allowed = pdata->is_dma_based;
+ tspi->dma_buf_size = (pdata->max_dma_buffer) ?
+ pdata->max_dma_buffer : DEFAULT_SPI_DMA_BUF_LEN;
+ } else {
+ tspi->is_clkon_always = false;
+ tspi->is_dma_allowed = true;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+ }
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+
+ if (!tspi->is_dma_allowed)
+ goto skip_dma_alloc;
+
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+
tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
if (!tspi->rx_dma) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
ret = -ENODEV;
- goto err3;
+ goto fail_rx_dma_alloc;
}
- tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- &tspi->rx_bb_phys, GFP_KERNEL);
- if (!tspi->rx_bb) {
+ tspi->rx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
+ &tspi->rx_buf_phys, GFP_KERNEL);
+ if (!tspi->rx_buf) {
dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
ret = -ENOMEM;
- goto err4;
+ goto fail_rx_buf_alloc;
}
memset(&tspi->rx_dma_req, 0, sizeof(struct tegra_dma_req));
tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
tspi->rx_dma_req.to_memory = 1;
- tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys;
- tspi->rx_dma_req.virt_addr = tspi->rx_bb;
+ tspi->rx_dma_req.dest_addr = tspi->rx_buf_phys;
+ tspi->rx_dma_req.virt_addr = tspi->rx_buf;
tspi->rx_dma_req.dest_bus_width = 32;
tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
tspi->rx_dma_req.source_bus_width = 32;
@@ -755,57 +1088,77 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
tspi->rx_dma_req.dev = tspi;
tspi->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
- if (IS_ERR(tspi->tx_dma)) {
+ if (!tspi->tx_dma) {
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
- ret = PTR_ERR(tspi->tx_dma);
- goto err5;
+ ret = -ENODEV;
+ goto fail_tx_dma_alloc;
}
- tspi->tx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- &tspi->tx_bb_phys, GFP_KERNEL);
- if (!tspi->tx_bb) {
+ tspi->tx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
+ &tspi->tx_buf_phys, GFP_KERNEL);
+ if (!tspi->tx_buf) {
dev_err(&pdev->dev, "can not allocate tx bounce buffer\n");
ret = -ENOMEM;
- goto err6;
+ goto fail_tx_buf_alloc;
}
memset(&tspi->tx_dma_req, 0, sizeof(struct tegra_dma_req));
tspi->tx_dma_req.complete = tegra_spi_tx_dma_complete;
tspi->tx_dma_req.to_memory = 0;
tspi->tx_dma_req.dest_addr = tspi->phys + SLINK_TX_FIFO;
- tspi->tx_dma_req.virt_addr = tspi->tx_bb;
+ tspi->tx_dma_req.virt_addr = tspi->tx_buf;
tspi->tx_dma_req.dest_bus_width = 32;
tspi->tx_dma_req.dest_wrap = 4;
tspi->tx_dma_req.source_wrap = 0;
- tspi->tx_dma_req.source_addr = tspi->tx_bb_phys;
+ tspi->tx_dma_req.source_addr = tspi->tx_buf_phys;
tspi->tx_dma_req.source_bus_width = 32;
tspi->tx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
tspi->tx_dma_req.dev = tspi;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ tspi->def_command_reg = SLINK_CS_SW | SLINK_M_S;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+
+skip_dma_alloc:
+ clk_enable(tspi->clk);
+ tspi->clk_state = 1;
master->dev.of_node = pdev->dev.of_node;
ret = spi_register_master(master);
+ if (!tspi->is_clkon_always) {
+ if (tspi->clk_state) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+ }
- if (ret < 0)
- goto err7;
-
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto fail_master_register;
+ }
return ret;
-err7:
- dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- tspi->tx_bb, tspi->tx_bb_phys);
-err6:
- tegra_dma_free_channel(tspi->tx_dma);
-err5:
- dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- tspi->rx_bb, tspi->rx_bb_phys);
-err4:
- tegra_dma_free_channel(tspi->rx_dma);
-err3:
+fail_master_register:
+ if (tspi->tx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->tx_buf, tspi->tx_buf_phys);
+fail_tx_buf_alloc:
+ if (tspi->tx_dma)
+ tegra_dma_free_channel(tspi->tx_dma);
+fail_tx_dma_alloc:
+ if (tspi->rx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->rx_buf, tspi->rx_buf_phys);
+fail_rx_buf_alloc:
+ if (tspi->rx_dma)
+ tegra_dma_free_channel(tspi->rx_dma);
+fail_rx_dma_alloc:
clk_put(tspi->clk);
-err2:
+fail_clk_get:
+ free_irq(tspi->irq, tspi);
+fail_irq_req:
iounmap(tspi->base);
-err1:
+fail_io_map:
release_mem_region(r->start, resource_size(r));
-err0:
+fail_no_mem:
spi_master_put(master);
return ret;
}
@@ -820,10 +1173,21 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
tspi = spi_master_get_devdata(master);
spi_unregister_master(master);
- tegra_dma_free_channel(tspi->rx_dma);
-
- dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
- tspi->rx_bb, tspi->rx_bb_phys);
+ if (tspi->tx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->tx_buf, tspi->tx_buf_phys);
+ if (tspi->tx_dma)
+ tegra_dma_free_channel(tspi->tx_dma);
+ if (tspi->rx_buf)
+ dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
+ tspi->rx_buf, tspi->rx_buf_phys);
+ if (tspi->rx_dma)
+ tegra_dma_free_channel(tspi->rx_dma);
+
+ if (tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
clk_put(tspi->clk);
iounmap(tspi->base);
@@ -846,16 +1210,20 @@ static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
tspi = spi_master_get_devdata(master);
spin_lock_irqsave(&tspi->lock, flags);
tspi->is_suspended = true;
+
WARN_ON(!list_empty(&tspi->queue));
while (!list_empty(&tspi->queue) && limit--) {
spin_unlock_irqrestore(&tspi->lock, flags);
- msleep(10);
+ msleep(20);
spin_lock_irqsave(&tspi->lock, flags);
}
- tspi->save_slink_cmd = spi_tegra_readl(tspi, SLINK_COMMAND);
spin_unlock_irqrestore(&tspi->lock, flags);
+ if (tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
return 0;
}
@@ -867,10 +1235,16 @@ static int spi_tegra_resume(struct platform_device *pdev)
master = dev_get_drvdata(&pdev->dev);
tspi = spi_master_get_devdata(master);
+
spin_lock_irqsave(&tspi->lock, flags);
clk_enable(tspi->clk);
- spi_tegra_writel(tspi, tspi->save_slink_cmd, SLINK_COMMAND);
- clk_disable(tspi->clk);
+ tspi->clk_state = 1;
+ spi_tegra_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ if (!tspi->is_clkon_always) {
+ clk_disable(tspi->clk);
+ tspi->clk_state = 0;
+ }
+
tspi->cur_speed = 0;
tspi->is_suspended = false;
spin_unlock_irqrestore(&tspi->lock, flags);
@@ -907,7 +1281,7 @@ static int __init spi_tegra_init(void)
{
return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
}
-module_init(spi_tegra_init);
+subsys_initcall(spi_tegra_init);
static void __exit spi_tegra_exit(void)
{