summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/devices/Kconfig6
-rw-r--r--drivers/mtd/devices/Makefile2
-rw-r--r--drivers/mtd/devices/tegra_nand.c1778
-rw-r--r--drivers/mtd/devices/tegra_nand.h148
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/tegra_nor.c476
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/nand_base.c39
9 files changed, 2481 insertions, 0 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce77fbd..943d90f08c08 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -78,6 +78,12 @@ config MTD_DATAFLASH_OTP
other key product data. The second half is programmed with a
unique-to-each-chip bit pattern at the factory.
+config MTD_NAND_TEGRA
+ tristate "Support for NAND Controller on NVIDIA Tegra"
+ depends on ARCH_TEGRA
+ help
+ Enables NAND flash support for NVIDIA's Tegra family of chips.
+
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1d38fc..67345a00a5ab 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,6 +1,7 @@
#
# linux/drivers/mtd/devices/Makefile
#
+GCOV_PROFILE := y
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
obj-$(CONFIG_MTD_DOC2001) += doc2001.o
@@ -17,3 +18,4 @@ obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
diff --git a/drivers/mtd/devices/tegra_nand.c b/drivers/mtd/devices/tegra_nand.c
new file mode 100644
index 000000000000..ba90925285e4
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.c
@@ -0,0 +1,1778 @@
+/*
+ * drivers/mtd/devices/tegra_nand.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * Copyright (C) 2010-2011 Nvidia Graphics Pvt. Ltd.
+ * http://www.nvidia.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Derived from: drivers/mtd/nand/nand_base.c
+ * drivers/mtd/nand/pxa3xx.c
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+#include <mach/nand.h>
+
+#include "tegra_nand.h"
+
+#define DRIVER_NAME "tegra_nand"
+#define DRIVER_DESC "Nvidia Tegra NAND Flash Controller driver"
+
+#define MAX_DMA_SZ SZ_64K
+#define ECC_BUF_SZ SZ_1K
+
+/* FIXME: is this right?!
+ * NvRM code says it should be 128 bytes, but that seems awfully small
+ */
+
+/*#define TEGRA_NAND_DEBUG
+#define TEGRA_NAND_DEBUG_PEDANTIC*/
+
+#ifdef TEGRA_NAND_DEBUG
+#define TEGRA_DBG(fmt, args...) \
+ do { pr_info(fmt, ##args); } while (0)
+#else
+#define TEGRA_DBG(fmt, args...)
+#endif
+
+/* TODO: will vary with devices, move into appropriate device spcific header */
+#define SCAN_TIMING_VAL 0x3f0bd214
+#define SCAN_TIMING2_VAL 0xb
+
+#define TIMEOUT (2 * HZ)
+/* TODO: pull in the register defs (fields, masks, etc) from Nvidia files
+ * so we don't have to redefine them */
+
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+
+struct tegra_nand_chip {
+ spinlock_t lock;
+ uint32_t chipsize;
+ int num_chips;
+ int curr_chip;
+
+ /* addr >> chip_shift == chip number */
+ uint32_t chip_shift;
+ /* (addr >> page_shift) & page_mask == page number within chip */
+ uint32_t page_shift;
+ uint32_t page_mask;
+ /* column within page */
+ uint32_t column_mask;
+ /* addr >> block_shift == block number (across the whole mtd dev, not
+ * just a single chip. */
+ uint32_t block_shift;
+
+ void *priv;
+};
+
+struct tegra_nand_info {
+ struct tegra_nand_chip chip;
+ struct mtd_info mtd;
+ struct tegra_nand_platform *plat;
+ struct device *dev;
+ struct mtd_partition *parts;
+
+ /* synchronizes access to accessing the actual NAND controller */
+ struct mutex lock;
+ /* partial_unaligned_rw_buffer is temporary buffer used during
+ reading of unaligned data from nand pages or if data to be read
+ is less than nand page size.
+ */
+ uint8_t *partial_unaligned_rw_buffer;
+
+ void *oob_dma_buf;
+ dma_addr_t oob_dma_addr;
+ /* ecc error vector info (offset into page and data mask to apply */
+ void *ecc_buf;
+ dma_addr_t ecc_addr;
+ /* ecc error status (page number, err_cnt) */
+ uint32_t *ecc_errs;
+ uint32_t num_ecc_errs;
+ uint32_t max_ecc_errs;
+ spinlock_t ecc_lock;
+
+ uint32_t command_reg;
+ uint32_t config_reg;
+ uint32_t dmactrl_reg;
+
+ struct completion cmd_complete;
+ struct completion dma_complete;
+
+ /* bad block bitmap: 1 == good, 0 == bad/unknown */
+ unsigned long *bb_bitmap;
+
+ struct clk *clk;
+ uint32_t is_data_bus_width_16;
+ uint32_t device_id;
+ uint32_t vendor_id;
+ uint32_t num_bad_blocks;
+};
+#define MTD_TO_INFO(mtd) container_of((mtd), struct tegra_nand_info, mtd)
+
+/* 64 byte oob block info for large page (== 2KB) device
+ *
+ * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC:
+ * Skipped bytes(4)
+ * Main area Ecc(36)
+ * Tag data(20)
+ * Tag data Ecc(4)
+ *
+ * Yaffs2 will use 16 tag bytes.
+ */
+
+static struct nand_ecclayout tegra_nand_oob_64 = {
+ .eccbytes = 36,
+ .eccpos = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobavail = 20,
+ .oobfree = {
+ {.offset = 40,
+ .length = 20,
+ },
+ },
+};
+
+static struct nand_ecclayout tegra_nand_oob_128 = {
+ .eccbytes = 72,
+ .eccpos = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ /* ECC POS is only of size 64 bytes so commenting the remaining
+ * bytes here. As driver uses the Hardware ECC so it there is
+ * no issue with it
+ */
+ /*67, 68, 69, 70, 71, 72, 73, 74, 75, */
+ },
+ .oobavail = 48,
+ .oobfree = {
+ {.offset = 76,
+ .length = 48,
+ },
+ },
+};
+
+static struct nand_flash_dev *find_nand_flash_device(int dev_id)
+{
+ struct nand_flash_dev *dev = &nand_flash_ids[0];
+
+ while (dev->name && dev->id != dev_id)
+ dev++;
+ return dev->name ? dev : NULL;
+}
+
+static struct nand_manufacturers *find_nand_flash_vendor(int vendor_id)
+{
+ struct nand_manufacturers *vendor = &nand_manuf_ids[0];
+
+ while (vendor->id && vendor->id != vendor_id)
+ vendor++;
+ return vendor->id ? vendor : NULL;
+}
+
+#define REG_NAME(name) { name, #name }
+static struct {
+ uint32_t addr;
+ char *name;
+} reg_names[] = {
+ REG_NAME(COMMAND_REG),
+ REG_NAME(STATUS_REG),
+ REG_NAME(ISR_REG),
+ REG_NAME(IER_REG),
+ REG_NAME(CONFIG_REG),
+ REG_NAME(TIMING_REG),
+ REG_NAME(RESP_REG),
+ REG_NAME(TIMING2_REG),
+ REG_NAME(CMD_REG1),
+ REG_NAME(CMD_REG2),
+ REG_NAME(ADDR_REG1),
+ REG_NAME(ADDR_REG2),
+ REG_NAME(DMA_MST_CTRL_REG),
+ REG_NAME(DMA_CFG_A_REG),
+ REG_NAME(DMA_CFG_B_REG),
+ REG_NAME(FIFO_CTRL_REG),
+ REG_NAME(DATA_BLOCK_PTR_REG),
+ REG_NAME(TAG_PTR_REG),
+ REG_NAME(ECC_PTR_REG),
+ REG_NAME(DEC_STATUS_REG),
+ REG_NAME(HWSTATUS_CMD_REG),
+ REG_NAME(HWSTATUS_MASK_REG),
+ {0, NULL},
+};
+
+#undef REG_NAME
+
+static int dump_nand_regs(void)
+{
+ int i = 0;
+
+ TEGRA_DBG("%s: dumping registers\n", __func__);
+ while (reg_names[i].name != NULL) {
+ TEGRA_DBG("%s = 0x%08x\n", reg_names[i].name,
+ readl(reg_names[i].addr));
+ i++;
+ }
+ TEGRA_DBG("%s: end of reg dump\n", __func__);
+ return 1;
+}
+
+static inline void enable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) | mask, IER_REG);
+}
+
+static inline void disable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) & ~mask, IER_REG);
+}
+
+static inline void
+split_addr(struct tegra_nand_info *info, loff_t offset, int *chipnr,
+ uint32_t *page, uint32_t *column)
+{
+ *chipnr = (int)(offset >> info->chip.chip_shift);
+ *page = (offset >> info->chip.page_shift) & info->chip.page_mask;
+ *column = offset & info->chip.column_mask;
+}
+
+static irqreturn_t tegra_nand_irq(int irq, void *dev_id)
+{
+ struct tegra_nand_info *info = dev_id;
+ uint32_t isr;
+ uint32_t ier;
+ uint32_t dma_ctrl;
+ uint32_t tmp;
+
+ isr = readl(ISR_REG);
+ ier = readl(IER_REG);
+ dma_ctrl = readl(DMA_MST_CTRL_REG);
+#ifdef DEBUG_DUMP_IRQ
+ pr_info("IRQ: ISR=0x%08x IER=0x%08x DMA_IS=%d DMA_IE=%d\n",
+ isr, ier, !!(dma_ctrl & (1 << 20)), !!(dma_ctrl & (1 << 28)));
+#endif
+ if (isr & ISR_CMD_DONE) {
+ if (likely(!(readl(COMMAND_REG) & COMMAND_GO)))
+ complete(&info->cmd_complete);
+ else
+ pr_err("tegra_nand_irq: Spurious cmd done irq!\n");
+ }
+
+ if (isr & ISR_ECC_ERR) {
+ /* always want to read the decode status so xfers don't stall. */
+ tmp = readl(DEC_STATUS_REG);
+
+ /* was ECC check actually enabled */
+ if ((ier & IER_ECC_ERR)) {
+ unsigned long flags;
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ info->ecc_errs[info->num_ecc_errs++] = tmp;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+ }
+ }
+
+ if ((dma_ctrl & DMA_CTRL_IS_DMA_DONE) &&
+ (dma_ctrl & DMA_CTRL_IE_DMA_DONE)) {
+ complete(&info->dma_complete);
+ writel(dma_ctrl, DMA_MST_CTRL_REG);
+ }
+
+ if ((isr & ISR_UND) && (ier & IER_UND))
+ pr_err("%s: fifo underrun.\n", __func__);
+
+ if ((isr & ISR_OVR) && (ier & IER_OVR))
+ pr_err("%s: fifo overrun.\n", __func__);
+
+ /* clear ALL interrupts?! */
+ writel(isr & 0xfffc, ISR_REG);
+
+ return IRQ_HANDLED;
+}
+
+static inline int tegra_nand_is_cmd_done(struct tegra_nand_info *info)
+{
+ return (readl(COMMAND_REG) & COMMAND_GO) ? 0 : 1;
+}
+
+static int tegra_nand_wait_cmd_done(struct tegra_nand_info *info)
+{
+ uint32_t timeout = TIMEOUT; /* TODO: make this realistic */
+ int ret;
+
+ ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(!ret && dump_nand_regs());
+#endif
+
+ return ret ? 0 : ret;
+}
+
+static inline void select_chip(struct tegra_nand_info *info, int chipnr)
+{
+ BUG_ON(chipnr != -1 && chipnr >= info->plat->max_chips);
+ info->chip.curr_chip = chipnr;
+}
+
+static void cfg_hwstatus_mon(struct tegra_nand_info *info)
+{
+ uint32_t val;
+
+ val = (HWSTATUS_RDSTATUS_MASK(1) |
+ HWSTATUS_RDSTATUS_EXP_VAL(0) |
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) |
+ HWSTATUS_RBSY_EXP_VAL(NAND_STATUS_READY));
+ writel(NAND_CMD_STATUS, HWSTATUS_CMD_REG);
+ writel(val, HWSTATUS_MASK_REG);
+}
+
+/* Tells the NAND controller to initiate the command. */
+static int tegra_nand_go(struct tegra_nand_info *info)
+{
+ BUG_ON(!tegra_nand_is_cmd_done(info));
+
+ INIT_COMPLETION(info->cmd_complete);
+ writel(info->command_reg | COMMAND_GO, COMMAND_REG);
+
+ if (unlikely(tegra_nand_wait_cmd_done(info))) {
+ /* TODO: abort command if needed? */
+ pr_err("%s: Timeout while waiting for command\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* TODO: maybe wait for dma here? */
+ return 0;
+}
+
+static void tegra_nand_prep_readid(struct tegra_nand_info *info)
+{
+ info->command_reg =
+ (COMMAND_CLE | COMMAND_ALE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_ALE_BYTE_SIZE(0) | COMMAND_TRANS_SIZE(3) |
+ (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_READID, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(0, CONFIG_REG);
+}
+
+static int
+tegra_nand_cmd_readid(struct tegra_nand_info *info, uint32_t *chip_id)
+{
+ int err;
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(info->chip.curr_chip == -1);
+#endif
+
+ tegra_nand_prep_readid(info);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *chip_id = readl(RESP_REG);
+ return 0;
+}
+
+/* assumes right locks are held */
+static int nand_cmd_get_status(struct tegra_nand_info *info, uint32_t *status)
+{
+ int err;
+
+ info->command_reg = (COMMAND_CLE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_RBSY_CHK |
+ (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_STATUS, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *status = readl(RESP_REG) & 0xff;
+ return 0;
+}
+
+/* must be called with lock held */
+static int check_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (info->bb_bitmap[BIT_WORD(block)] & BIT_MASK(block))
+ return 0;
+
+ offs &= ~(mtd->erasesize - 1);
+
+ if (info->is_data_bus_width_16)
+ writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG);
+ else
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+
+ /* check fist two pages of the block */
+ if (info->is_data_bus_width_16)
+ column = column >> 1;
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
+ COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_RX |
+ COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID |
+ COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ /* ... poison me ... */
+ writel(0xaa55aa55, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0) {
+ pr_info("baaaaaad\n");
+ goto out;
+ }
+
+ if ((readl(RESP_REG) & 0xffff) != 0xffff) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Note: The assumption here is that we cannot cross chip
+ * boundary since the we are only looking at the first 2 pages in
+ * a block, i.e. erasesize > writesize ALWAYS */
+ page++;
+ }
+
+out:
+ /* update the bitmap if the block is good */
+ if (ret == 0)
+ set_bit(block, info->bb_bitmap);
+ return ret;
+}
+
+static int tegra_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int ret;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ mutex_lock(&info->lock);
+ ret = check_block_isbad(mtd, offs);
+ mutex_unlock(&info->lock);
+
+#if 0
+ if (ret > 0)
+ pr_info("block @ 0x%llx is bad.\n", offs);
+ else if (ret < 0)
+ pr_err("error checking block @ 0x%llx for badness.\n", offs);
+#endif
+
+ return ret;
+}
+
+static int tegra_nand_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ pr_info("tegra_nand: setting block %d bad\n", block);
+
+ mutex_lock(&info->lock);
+ offs &= ~(mtd->erasesize - 1);
+
+ /* mark the block bad in our bitmap */
+ clear_bit(block, info->bb_bitmap);
+ mtd->ecc_stats.badblocks++;
+
+ if (info->is_data_bus_width_16)
+ writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG);
+ else
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+ if (info->is_data_bus_width_16)
+ column = column >> 1;
+ /* write to fist two pages in the block */
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
+ COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_TX |
+ COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID |
+ COMMAND_RBSY_CHK | COMMAND_AFT_DAT | COMMAND_SEC_CMD;
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ writel(0x0, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0)
+ goto out;
+
+ /* TODO: check if the program op worked? */
+ page++;
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int tegra_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t num_blocks;
+ uint32_t offs;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint32_t status = 0;
+
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08llx len=%lld\n", instr->addr,
+ instr->len);
+
+ if ((instr->addr + instr->len) > mtd->size) {
+ pr_err("tegra_nand_erase: Can't erase past end of device\n");
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->addr & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: addr=0x%08llx not block-aligned\n",
+ instr->addr);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->len & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: len=%lld not block-aligned\n",
+ instr->len);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ instr->fail_addr = 0xffffffff;
+
+ mutex_lock(&info->lock);
+
+ instr->state = MTD_ERASING;
+
+ offs = instr->addr;
+ num_blocks = instr->len >> info->chip.block_shift;
+
+ select_chip(info, -1);
+
+ while (num_blocks--) {
+ split_addr(info, offs, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08x, page=0x%08x\n", offs,
+ page);
+
+ if (check_block_isbad(mtd, offs)) {
+ pr_info("%s: skipping bad block @ 0x%08x\n", __func__,
+ offs);
+ goto next_block;
+ }
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
+ COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(2) |
+ COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
+ writel(NAND_CMD_ERASE1, CMD_REG1);
+ writel(NAND_CMD_ERASE2, CMD_REG2);
+
+ writel(page & 0xffffff, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ if (tegra_nand_go(info) != 0) {
+ instr->fail_addr = offs;
+ goto out_err;
+ }
+
+ /* TODO: do we want a timeout here? */
+ if ((nand_cmd_get_status(info, &status) != 0) ||
+ (status & NAND_STATUS_FAIL) ||
+ ((status & NAND_STATUS_READY) != NAND_STATUS_READY)) {
+ instr->fail_addr = offs;
+ pr_info("%s: erase failed @ 0x%08x (stat=0x%08x)\n",
+ __func__, offs, status);
+ goto out_err;
+ }
+next_block:
+ offs += mtd->erasesize;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mutex_unlock(&info->lock);
+ mtd_erase_callback(instr);
+ return 0;
+
+out_err:
+ instr->state = MTD_ERASE_FAILED;
+ mutex_unlock(&info->lock);
+ return -EIO;
+}
+
+static inline void dump_mtd_oob_ops(struct mtd_oob_ops *ops)
+{
+ pr_info("%s: oob_ops: mode=%s len=0x%x ooblen=0x%x "
+ "ooboffs=0x%x dat=0x%p oob=0x%p\n", __func__,
+ (ops->mode == MTD_OOB_AUTO ? "MTD_OOB_AUTO" :
+ (ops->mode ==
+ MTD_OOB_PLACE ? "MTD_OOB_PLACE" : "MTD_OOB_RAW")), ops->len,
+ ops->ooblen, ops->ooboffs, ops->datbuf, ops->oobbuf);
+}
+
+static int
+tegra_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: read: from=0x%llx len=0x%x\n", __func__, from, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = mtd->read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static void
+correct_ecc_errors_on_blank_page(struct tegra_nand_info *info, u8 *datbuf,
+ u8 *oobbuf, unsigned int a_len,
+ unsigned int b_len)
+{
+ int i;
+ int all_ff = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ if (info->num_ecc_errs) {
+ if (datbuf) {
+ for (i = 0; i < a_len; i++)
+ if (datbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (oobbuf) {
+ for (i = 0; i < b_len; i++)
+ if (oobbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (all_ff)
+ info->num_ecc_errs = 0;
+ }
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static void update_ecc_counts(struct tegra_nand_info *info, int check_oob)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ for (i = 0; i < info->num_ecc_errs; ++i) {
+ /* correctable */
+ info->mtd.ecc_stats.corrected +=
+ DEC_STATUS_ERR_CNT(info->ecc_errs[i]);
+
+ /* uncorrectable */
+ if (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_A)
+ info->mtd.ecc_stats.failed++;
+ if (check_oob && (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_B))
+ info->mtd.ecc_stats.failed++;
+ }
+ info->num_ecc_errs = 0;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static inline void clear_regs(struct tegra_nand_info *info)
+{
+ info->command_reg = 0;
+ info->config_reg = 0;
+ info->dmactrl_reg = 0;
+}
+
+static void
+prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc,
+ uint32_t page, uint32_t column, dma_addr_t data_dma,
+ uint32_t data_len, dma_addr_t oob_dma, uint32_t oob_len)
+{
+ uint32_t tag_sz = oob_len;
+
+ uint32_t page_size_sel = (info->mtd.writesize >> 11) + 2;
+#if 0
+ pr_info("%s: rx=%d ecc=%d page=%d col=%d data_dma=0x%x "
+ "data_len=0x%08x oob_dma=0x%x ooblen=%d\n", __func__,
+ rx, do_ecc, page, column, data_dma, data_len, oob_dma, oob_len);
+#endif
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_SEC_CMD | COMMAND_RBSY_CHK |
+ COMMAND_TRANS_SIZE(8);
+
+ info->config_reg = (CONFIG_PIPELINE_EN | CONFIG_EDO_MODE |
+ CONFIG_COM_BSY);
+ if (info->is_data_bus_width_16)
+ info->config_reg |= CONFIG_BUS_WIDTH;
+ info->dmactrl_reg = (DMA_CTRL_DMA_GO |
+ DMA_CTRL_DMA_PERF_EN | DMA_CTRL_IE_DMA_DONE |
+ DMA_CTRL_IS_DMA_DONE | DMA_CTRL_BURST_SIZE(4));
+
+ if (rx) {
+ if (do_ecc)
+ info->config_reg |= CONFIG_HW_ERR_CORRECTION;
+ info->command_reg |= COMMAND_RX;
+ info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+ } else {
+ info->command_reg |= (COMMAND_TX | COMMAND_AFT_DAT);
+ info->dmactrl_reg |= DMA_CTRL_DIR; /* DMA_RD == TX */
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+ }
+
+ if (data_len) {
+ if (do_ecc)
+ info->config_reg |= CONFIG_HW_ECC | CONFIG_ECC_SEL;
+ info->config_reg |=
+ CONFIG_PAGE_SIZE_SEL(page_size_sel) | CONFIG_TVALUE(0) |
+ CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0);
+ info->command_reg |= COMMAND_A_VALID;
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_A;
+ writel(DMA_CFG_BLOCK_SIZE(data_len - 1), DMA_CFG_A_REG);
+ writel(data_dma, DATA_BLOCK_PTR_REG);
+ } else {
+ column = info->mtd.writesize;
+ if (do_ecc)
+ column += info->mtd.ecclayout->oobfree[0].offset;
+ writel(0, DMA_CFG_A_REG);
+ writel(0, DATA_BLOCK_PTR_REG);
+ }
+
+ if (oob_len) {
+ if (do_ecc) {
+ oob_len = info->mtd.oobavail;
+ tag_sz = info->mtd.oobavail;
+ tag_sz += 4; /* size of tag ecc */
+ if (rx)
+ oob_len += 4; /* size of tag ecc */
+ info->config_reg |= CONFIG_ECC_EN_TAG;
+ }
+ if (data_len && rx)
+ oob_len += 4; /* num of skipped bytes */
+
+ info->command_reg |= COMMAND_B_VALID;
+ info->config_reg |= CONFIG_TAG_BYTE_SIZE(tag_sz - 1);
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_B;
+ writel(DMA_CFG_BLOCK_SIZE(oob_len - 1), DMA_CFG_B_REG);
+ writel(oob_dma, TAG_PTR_REG);
+ } else {
+ writel(0, DMA_CFG_B_REG);
+ writel(0, TAG_PTR_REG);
+ }
+ /* For x16 bit we needs to divide the column number by 2 */
+ if (info->is_data_bus_width_16)
+ column = column >> 1;
+ writel((column & 0xffff) | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+}
+
+static dma_addr_t
+tegra_nand_dma_map(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ if (virt_addr_valid(addr))
+ page = virt_to_page(addr);
+ else {
+ if (WARN_ON(size + offset > PAGE_SIZE))
+ return ~0;
+ page = vmalloc_to_page(addr);
+ }
+ return dma_map_page(dev, page, offset, size, dir);
+}
+
+static ssize_t show_vendor_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ return sprintf(buf, "0x%x\n", info->vendor_id);
+}
+
+static DEVICE_ATTR(vendor_id, S_IRUSR, show_vendor_id, NULL);
+
+static ssize_t show_device_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ return sprintf(buf, "0x%x\n", info->device_id);
+}
+
+static DEVICE_ATTR(device_id, S_IRUSR, show_device_id, NULL);
+
+static ssize_t show_flash_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ struct mtd_info *mtd = &info->mtd;
+ return sprintf(buf, "%llu bytes\n", mtd->size);
+}
+
+static DEVICE_ATTR(flash_size, S_IRUSR, show_flash_size, NULL);
+
+static ssize_t show_num_bad_blocks(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", info->num_bad_blocks);
+}
+
+static DEVICE_ATTR(num_bad_blocks, S_IRUSR, show_num_bad_blocks, NULL);
+
+static ssize_t show_bb_bitmap(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(dev);
+ struct mtd_info *mtd = &info->mtd;
+ int num_blocks = mtd->size >> info->chip.block_shift, i, ret = 0, size =
+ 0;
+
+ for (i = 0; i < num_blocks / (8 * sizeof(unsigned long)); i++) {
+ size = sprintf(buf, "0x%lx\n", info->bb_bitmap[i]);
+ ret += size;
+ buf += size;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(bb_bitmap, S_IRUSR, show_bb_bitmap, NULL);
+
+/*
+ * Independent of Mode, we read main data and the OOB data from the oobfree areas as
+ * specified nand_ecclayout
+ * This function also checks buffer pool partial_unaligned_rw_buffer
+ * if the address is already present and is not 'unused' then it will use
+ * data in buffer else it will go for DMA.
+ */
+static int
+do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct mtd_ecc_stats old_ecc_stats;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int unaligned = from & info->chip.column_mask;
+ uint32_t len = datbuf ? ((ops->len) + unaligned) : 0;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(ops);
+#endif
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ from = from - unaligned;
+
+ /* Don't care about the MTD_OOB_ value field always use oobavail and ecc. */
+ oobsz = mtd->oobavail;
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't read OOB from multiple pages (%d > %d)\n",
+ __func__, ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf && !len) {
+ page_count = 1;
+ } else {
+ page_count =
+ (uint32_t) ((len + mtd->writesize - 1) / mtd->writesize);
+ }
+
+ mutex_lock(&info->lock);
+
+ memcpy(&old_ecc_stats, &mtd->ecc_stats, sizeof(old_ecc_stats));
+
+ if (do_ecc) {
+ enable_ints(info, IER_ECC_ERR);
+ writel(info->ecc_addr, ECC_PTR_REG);
+ } else
+ disable_ints(info, IER_ECC_ERR);
+
+ split_addr(info, from, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ /* reset it to point back to beginning of page */
+ from -= column;
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize - column, len);
+ int b_len = min(oobsz, ooblen);
+ int temp_len = 0;
+ char *temp_buf = NULL;
+ /* Take care when read is of less than page size.
+ * Otherwise there will be kernel Panic due to DMA timeout */
+ if (((a_len < mtd->writesize) && len) || unaligned) {
+ temp_len = a_len;
+ a_len = mtd->writesize;
+ temp_buf = datbuf;
+ datbuf = info->partial_unaligned_rw_buffer;
+ }
+#if 0
+ pr_info("%s: chip:=%d page=%d col=%d\n", __func__, chipnr,
+ page, column);
+#endif
+
+ clear_regs(info);
+ if (datbuf)
+ datbuf_dma_addr =
+ tegra_nand_dma_map(info->dev, datbuf, a_len,
+ DMA_FROM_DEVICE);
+
+ prep_transfer_dma(info, 1, do_ecc, page, column,
+ datbuf_dma_addr, a_len, info->oob_dma_addr,
+ b_len);
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ err = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ /*pr_info("tegra_read_oob: DMA complete\n"); */
+
+ /* if we are here, transfer is done */
+ if (datbuf)
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len,
+ DMA_FROM_DEVICE);
+
+ if (oobbuf) {
+ uint32_t ofs = datbuf && oobbuf ? 4 : 0; /* skipped bytes */
+ memcpy(oobbuf, info->oob_dma_buf + ofs, b_len);
+ }
+
+ correct_ecc_errors_on_blank_page(info, datbuf, oobbuf, a_len,
+ b_len);
+ /* Take care when read is of less than page size */
+ if (temp_len) {
+ memcpy(temp_buf, datbuf + unaligned,
+ temp_len - unaligned);
+ a_len = temp_len;
+ datbuf = temp_buf;
+ }
+ if (datbuf) {
+ len -= a_len;
+ datbuf += a_len - unaligned;
+ ops->retlen += a_len - unaligned;
+ }
+
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ unaligned = 0;
+ update_ecc_counts(info, oobbuf != NULL);
+
+ if (!page_count)
+ break;
+
+ from += mtd->writesize;
+ column = 0;
+
+ split_addr(info, from, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ disable_ints(info, IER_ECC_ERR);
+
+ if (mtd->ecc_stats.failed != old_ecc_stats.failed)
+ err = -EBADMSG;
+ else if (mtd->ecc_stats.corrected != old_ecc_stats.corrected)
+ err = -EUCLEAN;
+ else
+ err = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ disable_ints(info, IER_ECC_ERR);
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+/* just does some parameter checking and calls do_read_oob */
+static int
+tegra_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ if (ops->datbuf && unlikely((from + ops->len) > mtd->size)) {
+ pr_err("%s: Can't read past end of device.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Reading 0 bytes from OOB is meaningless\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->mode != MTD_OOB_AUTO)) {
+ if (ops->oobbuf && ops->datbuf) {
+ pr_err("%s: can't read OOB + Data in non-AUTO mode.\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((ops->mode == MTD_OOB_RAW) && !ops->datbuf) {
+ pr_err("%s: Raw mode only supports reading data area.\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return do_read_oob(mtd, from, ops);
+}
+
+static int
+tegra_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: write: to=0x%llx len=0x%x\n", __func__, to, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = (uint8_t *) buf;
+ ops.oobbuf = NULL;
+ ret = mtd->write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static int
+do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t len = datbuf ? ops->len : 0;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(ops);
+#endif
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ if (!ops->len)
+ return 0;
+
+ oobsz = mtd->oobavail;
+
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't write OOB to multiple pages (%d > %d)\n",
+ __func__, ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf && !len) {
+ page_count = 1;
+ } else
+ page_count =
+ max((uint32_t) (ops->len / mtd->writesize), (uint32_t) 1);
+
+ mutex_lock(&info->lock);
+
+ split_addr(info, to, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize, len);
+ int b_len = min(oobsz, ooblen);
+ int temp_len = 0;
+ char *temp_buf = NULL;
+ /* Take care when write is of less than page size. Otherwise
+ * there will be kernel panic due to dma timeout */
+ if ((a_len < mtd->writesize) && len) {
+ temp_len = a_len;
+ a_len = mtd->writesize;
+ temp_buf = datbuf;
+ datbuf = info->partial_unaligned_rw_buffer;
+ memset(datbuf, 0xff, a_len);
+ memcpy(datbuf, temp_buf, temp_len);
+ }
+
+ if (datbuf)
+ datbuf_dma_addr =
+ tegra_nand_dma_map(info->dev, datbuf, a_len,
+ DMA_TO_DEVICE);
+ if (oobbuf)
+ memcpy(info->oob_dma_buf, oobbuf, b_len);
+
+ clear_regs(info);
+ prep_transfer_dma(info, 0, do_ecc, page, column,
+ datbuf_dma_addr, a_len, info->oob_dma_addr,
+ b_len);
+
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ goto out_err;
+ }
+ if (temp_len) {
+ a_len = temp_len;
+ datbuf = temp_buf;
+ }
+
+ if (datbuf) {
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len,
+ DMA_TO_DEVICE);
+ len -= a_len;
+ datbuf += a_len;
+ ops->retlen += a_len;
+ }
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ if (!page_count)
+ break;
+
+ to += mtd->writesize;
+ column = 0;
+
+ split_addr(info, to, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+static int
+tegra_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+
+ if (unlikely(to & info->chip.column_mask)) {
+ pr_err("%s: Unaligned write (to 0x%llx) not supported\n",
+ __func__, to);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Writing 0 bytes to OOB is meaningless\n", __func__);
+ return -EINVAL;
+ }
+
+ return do_write_oob(mtd, to, ops);
+}
+
+static int tegra_nand_suspend(struct mtd_info *mtd)
+{
+ return 0;
+}
+
+static void tegra_nand_resume(struct mtd_info *mtd)
+{
+}
+
+static int scan_bad_blocks(struct tegra_nand_info *info)
+{
+ struct mtd_info *mtd = &info->mtd;
+ int num_blocks = mtd->size >> info->chip.block_shift;
+ uint32_t block;
+ int is_bad = 0;
+ info->num_bad_blocks = 0;
+
+ for (block = 0; block < num_blocks; ++block) {
+ /* make sure the bit is cleared, meaning it's bad/unknown before
+ * we check. */
+ clear_bit(block, info->bb_bitmap);
+ is_bad = mtd->block_isbad(mtd, block << info->chip.block_shift);
+
+ if (is_bad == 0)
+ set_bit(block, info->bb_bitmap);
+ else if (is_bad > 0) {
+ info->num_bad_blocks++;
+ pr_debug("block 0x%08x is bad.\n", block);
+ } else {
+ pr_err("Fatal error (%d) while scanning for "
+ "bad blocks\n", is_bad);
+ return is_bad;
+ }
+ }
+ return 0;
+}
+
+static void
+set_chip_timing(struct tegra_nand_info *info, uint32_t vendor_id,
+ uint32_t dev_id, uint32_t fourth_id_field)
+{
+ struct tegra_nand_chip_parms *chip_parms = NULL;
+ uint32_t tmp;
+ int i = 0;
+ unsigned long nand_clk_freq_khz = clk_get_rate(info->clk) / 1000;
+ for (i = 0; i < info->plat->nr_chip_parms; i++)
+ if (info->plat->chip_parms[i].vendor_id == vendor_id &&
+ info->plat->chip_parms[i].device_id == dev_id &&
+ info->plat->chip_parms[i].read_id_fourth_byte ==
+ fourth_id_field)
+ chip_parms = &info->plat->chip_parms[i];
+
+ if (!chip_parms) {
+ pr_warn("WARNING:tegra_nand: timing for vendor-id: "
+ "%x device-id: %x fourth-id-field: %x not found. Using Bootloader timing",
+ vendor_id, dev_id, fourth_id_field);
+ return;
+ }
+ /* TODO: Handle the change of frequency if DVFS is enabled */
+#define CNT(t) (((((t) * nand_clk_freq_khz) + 1000000 - 1) / 1000000) - 1)
+ tmp = (TIMING_TRP_RESP(CNT(chip_parms->timing.trp_resp)) |
+ TIMING_TWB(CNT(chip_parms->timing.twb)) |
+ TIMING_TCR_TAR_TRR(CNT(chip_parms->timing.tcr_tar_trr)) |
+ TIMING_TWHR(CNT(chip_parms->timing.twhr)) |
+ TIMING_TCS(CNT(chip_parms->timing.tcs)) |
+ TIMING_TWH(CNT(chip_parms->timing.twh)) |
+ TIMING_TWP(CNT(chip_parms->timing.twp)) |
+ TIMING_TRH(CNT(chip_parms->timing.trh)) |
+ TIMING_TRP(CNT(chip_parms->timing.trp)));
+ writel(tmp, TIMING_REG);
+ writel(TIMING2_TADL(CNT(chip_parms->timing.tadl)), TIMING2_REG);
+#undef CNT
+}
+
+/* Scans for nand flash devices, identifies them, and fills in the
+ * device info. */
+static int tegra_nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct nand_flash_dev *dev_info;
+ struct nand_manufacturers *vendor_info;
+ uint32_t tmp;
+ uint32_t dev_id;
+ uint32_t vendor_id;
+ uint32_t dev_parms;
+ uint32_t mlc_parms;
+ int cnt;
+ int err = 0;
+
+ writel(SCAN_TIMING_VAL, TIMING_REG);
+ writel(SCAN_TIMING2_VAL, TIMING2_REG);
+ writel(0, CONFIG_REG);
+
+ select_chip(info, 0);
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+
+ vendor_id = tmp & 0xff;
+ dev_id = (tmp >> 8) & 0xff;
+ mlc_parms = (tmp >> 16) & 0xff;
+ dev_parms = (tmp >> 24) & 0xff;
+
+ dev_info = find_nand_flash_device(dev_id);
+ if (dev_info == NULL) {
+ pr_err("%s: unknown flash device id (0x%02x) found.\n",
+ __func__, dev_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ vendor_info = find_nand_flash_vendor(vendor_id);
+ if (vendor_info == NULL) {
+ pr_err("%s: unknown flash vendor id (0x%02x) found.\n",
+ __func__, vendor_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ /* loop through and see if we can find more devices */
+ for (cnt = 1; cnt < info->plat->max_chips; ++cnt) {
+ select_chip(info, cnt);
+ /* TODO: figure out what to do about errors here */
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+ if ((dev_id != ((tmp >> 8) & 0xff)) ||
+ (vendor_id != (tmp & 0xff)))
+ break;
+ }
+
+ pr_info("%s: %d NAND chip(s) found (vend=0x%02x, dev=0x%02x) (%s %s)\n",
+ DRIVER_NAME, cnt, vendor_id, dev_id, vendor_info->name,
+ dev_info->name);
+ info->vendor_id = vendor_id;
+ info->device_id = dev_id;
+ info->chip.num_chips = cnt;
+ info->chip.chipsize = dev_info->chipsize << 20;
+ mtd->size = info->chip.num_chips * info->chip.chipsize;
+
+ /* format of 4th id byte returned by READ ID
+ * bit 7 = rsvd
+ * bit 6 = bus width. 1 == 16bit, 0 == 8bit
+ * bits 5:4 = data block size. 64kb * (2^val)
+ * bit 3 = rsvd
+ * bit 2 = spare area size / 512 bytes. 0 == 8bytes, 1 == 16bytes
+ * bits 1:0 = page size. 1kb * (2^val)
+ */
+
+ /* page_size */
+ tmp = dev_parms & 0x3;
+ mtd->writesize = 1024 << tmp;
+ info->chip.column_mask = mtd->writesize - 1;
+
+ if (mtd->writesize > 4096) {
+ pr_err("%s: Large page devices with pagesize > 4kb are NOT "
+ "supported\n", __func__);
+ goto out_error;
+ } else if (mtd->writesize < 2048) {
+ pr_err("%s: Small page devices are NOT supported\n", __func__);
+ goto out_error;
+ }
+
+ /* spare area, must be at least 64 bytes */
+ tmp = (dev_parms >> 2) & 0x1;
+ tmp = (8 << tmp) * (mtd->writesize / 512);
+ if (tmp < 64) {
+ pr_err("%s: Spare area (%d bytes) too small\n", __func__, tmp);
+ goto out_error;
+ }
+ mtd->oobsize = tmp;
+
+ /* data block size (erase size) (w/o spare) */
+ tmp = (dev_parms >> 4) & 0x3;
+ mtd->erasesize = (64 * 1024) << tmp;
+ info->chip.block_shift = ffs(mtd->erasesize) - 1;
+ /* bus width of the nand chip 8/16 */
+ tmp = (dev_parms >> 6) & 0x1;
+ info->is_data_bus_width_16 = tmp;
+ /* used to select the appropriate chip/page in case multiple devices
+ * are connected */
+ info->chip.chip_shift = ffs(info->chip.chipsize) - 1;
+ info->chip.page_shift = ffs(mtd->writesize) - 1;
+ info->chip.page_mask =
+ (info->chip.chipsize >> info->chip.page_shift) - 1;
+
+ /* now fill in the rest of the mtd fields */
+ if (mtd->oobsize == 64)
+ mtd->ecclayout = &tegra_nand_oob_64;
+ else
+ mtd->ecclayout = &tegra_nand_oob_128;
+
+ mtd->oobavail = mtd->ecclayout->oobavail;
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+
+ mtd->erase = tegra_nand_erase;
+ mtd->lock = NULL;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = tegra_nand_read;
+ mtd->write = tegra_nand_write;
+ mtd->read_oob = tegra_nand_read_oob;
+ mtd->write_oob = tegra_nand_write_oob;
+
+ mtd->resume = tegra_nand_resume;
+ mtd->suspend = tegra_nand_suspend;
+ mtd->block_isbad = tegra_nand_block_isbad;
+ mtd->block_markbad = tegra_nand_block_markbad;
+
+ set_chip_timing(info, vendor_id, dev_id, dev_parms);
+
+ return 0;
+
+out_error:
+ pr_err("%s: NAND device scan aborted due to error(s).\n", __func__);
+ return err;
+}
+
+static int __devinit tegra_nand_probe(struct platform_device *pdev)
+{
+ struct tegra_nand_platform *plat = pdev->dev.platform_data;
+ struct tegra_nand_info *info = NULL;
+ struct tegra_nand_chip *chip = NULL;
+ struct mtd_info *mtd = NULL;
+ int err = 0;
+ uint64_t num_erase_blocks;
+
+ pr_debug("%s: probing (%p)\n", __func__, pdev);
+
+ if (!plat) {
+ pr_err("%s: no platform device info\n", __func__);
+ return -EINVAL;
+ } else if (!plat->chip_parms) {
+ pr_err("%s: no platform nand parms\n", __func__);
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(struct tegra_nand_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: no memory for flash info\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+ info->plat = plat;
+
+ platform_set_drvdata(pdev, info);
+
+ init_completion(&info->cmd_complete);
+ init_completion(&info->dma_complete);
+
+ mutex_init(&info->lock);
+ spin_lock_init(&info->ecc_lock);
+
+ chip = &info->chip;
+ chip->priv = &info->mtd;
+ chip->curr_chip = -1;
+
+ mtd = &info->mtd;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->priv = &info->chip;
+ mtd->owner = THIS_MODULE;
+
+ /* HACK: allocate a dma buffer to hold 1 page oob data */
+ info->oob_dma_buf = dma_alloc_coherent(NULL, 128,
+ &info->oob_dma_addr, GFP_KERNEL);
+ if (!info->oob_dma_buf) {
+ err = -ENOMEM;
+ goto out_free_info;
+ }
+
+ /* this will store the ecc error vector info */
+ info->ecc_buf = dma_alloc_coherent(NULL, ECC_BUF_SZ, &info->ecc_addr,
+ GFP_KERNEL);
+ if (!info->ecc_buf) {
+ err = -ENOMEM;
+ goto out_free_dma_buf;
+ }
+
+ /* grab the irq */
+ if (!(pdev->resource[0].flags & IORESOURCE_IRQ)) {
+ pr_err("NAND IRQ resource not defined\n");
+ err = -EINVAL;
+ goto out_free_ecc_buf;
+ }
+
+ err = request_irq(pdev->resource[0].start, tegra_nand_irq,
+ IRQF_SHARED, DRIVER_NAME, info);
+ if (err) {
+ pr_err("Unable to request IRQ %d (%d)\n",
+ pdev->resource[0].start, err);
+ goto out_free_ecc_buf;
+ }
+
+ /* TODO: configure pinmux here?? */
+ info->clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(info->clk)) {
+ err = PTR_ERR(info->clk);
+ goto out_free_ecc_buf;
+ }
+ err = clk_enable(info->clk);
+ if (err != 0)
+ goto out_free_ecc_buf;
+
+ if (plat->wp_gpio) {
+ gpio_request(plat->wp_gpio, "nand_wp");
+ tegra_gpio_enable(plat->wp_gpio);
+ gpio_direction_output(plat->wp_gpio, 1);
+ }
+
+ cfg_hwstatus_mon(info);
+
+ /* clear all pending interrupts */
+ writel(readl(ISR_REG), ISR_REG);
+
+ /* clear dma interrupt */
+ writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG);
+
+ /* enable interrupts */
+ disable_ints(info, 0xffffffff);
+ enable_ints(info,
+ IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE |
+ IER_ECC_ERR | IER_GIE);
+
+ if (tegra_nand_scan(mtd, plat->max_chips)) {
+ err = -ENXIO;
+ goto out_dis_irq;
+ }
+ pr_info("%s: NVIDIA Tegra NAND controller @ base=0x%08x irq=%d.\n",
+ DRIVER_NAME, TEGRA_NAND_PHYS, pdev->resource[0].start);
+
+ /* allocate memory to hold the ecc error info */
+ info->max_ecc_errs = MAX_DMA_SZ / mtd->writesize;
+ info->ecc_errs = kmalloc(info->max_ecc_errs * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!info->ecc_errs) {
+ err = -ENOMEM;
+ goto out_dis_irq;
+ }
+
+ /* alloc the bad block bitmap */
+ num_erase_blocks = mtd->size;
+ do_div(num_erase_blocks, mtd->erasesize);
+ info->bb_bitmap = kzalloc(BITS_TO_LONGS(num_erase_blocks) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!info->bb_bitmap) {
+ err = -ENOMEM;
+ goto out_free_ecc;
+ }
+
+ err = scan_bad_blocks(info);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+#if 0
+ dump_nand_regs();
+#endif
+
+ err = parse_mtd_partitions(mtd, part_probes, &info->parts, 0);
+ if (err > 0) {
+ err = mtd_device_register(mtd, info->parts, err);
+ } else if (err <= 0 && plat->parts) {
+ err = mtd_device_register(mtd, plat->parts, plat->nr_parts);
+ } else
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+ dev_set_drvdata(&pdev->dev, info);
+
+ info->partial_unaligned_rw_buffer = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!info->partial_unaligned_rw_buffer) {
+ err = -ENOMEM;
+ goto out_free_bbbmap;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_device_id);
+ if (err != 0)
+ goto out_free_rw_buffer;
+
+ err = device_create_file(&pdev->dev, &dev_attr_vendor_id);
+ if (err != 0)
+ goto err_nand_sysfs_vendorid_failed;
+
+ err = device_create_file(&pdev->dev, &dev_attr_flash_size);
+ if (err != 0)
+ goto err_nand_sysfs_flash_size_failed;
+
+ err = device_create_file(&pdev->dev, &dev_attr_num_bad_blocks);
+ if (err != 0)
+ goto err_nand_sysfs_num_bad_blocks_failed;
+
+ err = device_create_file(&pdev->dev, &dev_attr_bb_bitmap);
+ if (err != 0)
+ goto err_nand_sysfs_bb_bitmap_failed;
+
+ pr_debug("%s: probe done.\n", __func__);
+ return 0;
+
+err_nand_sysfs_bb_bitmap_failed:
+ device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks);
+
+err_nand_sysfs_num_bad_blocks_failed:
+ device_remove_file(&pdev->dev, &dev_attr_flash_size);
+
+err_nand_sysfs_flash_size_failed:
+ device_remove_file(&pdev->dev, &dev_attr_vendor_id);
+
+err_nand_sysfs_vendorid_failed:
+ device_remove_file(&pdev->dev, &dev_attr_device_id);
+
+out_free_rw_buffer:
+ kfree(info->partial_unaligned_rw_buffer);
+
+out_free_bbbmap:
+ kfree(info->bb_bitmap);
+
+out_free_ecc:
+ kfree(info->ecc_errs);
+
+out_dis_irq:
+ disable_ints(info, 0xffffffff);
+ free_irq(pdev->resource[0].start, info);
+
+out_free_ecc_buf:
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
+
+out_free_dma_buf:
+ dma_free_coherent(NULL, 128, info->oob_dma_buf, info->oob_dma_addr);
+
+out_free_info:
+ platform_set_drvdata(pdev, NULL);
+ kfree(info);
+
+ return err;
+}
+
+static int __devexit tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (info) {
+ free_irq(pdev->resource[0].start, info);
+ kfree(info->bb_bitmap);
+ kfree(info->ecc_errs);
+ kfree(info->partial_unaligned_rw_buffer);
+
+ device_remove_file(&pdev->dev, &dev_attr_device_id);
+ device_remove_file(&pdev->dev, &dev_attr_vendor_id);
+ device_remove_file(&pdev->dev, &dev_attr_flash_size);
+ device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks);
+ device_remove_file(&pdev->dev, &dev_attr_bb_bitmap);
+
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf,
+ info->ecc_addr);
+ dma_free_coherent(NULL, info->mtd.writesize + info->mtd.oobsize,
+ info->oob_dma_buf, info->oob_dma_addr);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tegra_nand_driver = {
+ .probe = tegra_nand_probe,
+ .remove = __devexit_p(tegra_nand_remove),
+ .suspend = NULL,
+ .resume = NULL,
+ .driver = {
+ .name = "tegra_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_nand_init(void)
+{
+ return platform_driver_register(&tegra_nand_driver);
+}
+
+static void __exit tegra_nand_exit(void)
+{
+ platform_driver_unregister(&tegra_nand_driver);
+}
+
+module_init(tegra_nand_init);
+module_exit(tegra_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/mtd/devices/tegra_nand.h b/drivers/mtd/devices/tegra_nand.h
new file mode 100644
index 000000000000..339d6cc7330c
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.h
@@ -0,0 +1,148 @@
+/*
+ * drivers/mtd/devices/tegra_nand.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTD_DEV_TEGRA_NAND_H
+#define __MTD_DEV_TEGRA_NAND_H
+
+#include <mach/io.h>
+
+#define __BITMASK0(len) ((1 << (len)) - 1)
+#define __BITMASK(start, len) (__BITMASK0(len) << (start))
+#define REG_BIT(bit) (1 << (bit))
+#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
+#define REG_FIELD_MASK(start, len) (~(__BITMASK((start), (len))))
+#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
+
+/* tegra nand registers... */
+#define TEGRA_NAND_PHYS 0x70008000
+#define TEGRA_NAND_BASE IO_TO_VIRT(TEGRA_NAND_PHYS)
+#define COMMAND_REG (TEGRA_NAND_BASE + 0x00)
+#define STATUS_REG (TEGRA_NAND_BASE + 0x04)
+#define ISR_REG (TEGRA_NAND_BASE + 0x08)
+#define IER_REG (TEGRA_NAND_BASE + 0x0c)
+#define CONFIG_REG (TEGRA_NAND_BASE + 0x10)
+#define TIMING_REG (TEGRA_NAND_BASE + 0x14)
+#define RESP_REG (TEGRA_NAND_BASE + 0x18)
+#define TIMING2_REG (TEGRA_NAND_BASE + 0x1c)
+#define CMD_REG1 (TEGRA_NAND_BASE + 0x20)
+#define CMD_REG2 (TEGRA_NAND_BASE + 0x24)
+#define ADDR_REG1 (TEGRA_NAND_BASE + 0x28)
+#define ADDR_REG2 (TEGRA_NAND_BASE + 0x2c)
+#define DMA_MST_CTRL_REG (TEGRA_NAND_BASE + 0x30)
+#define DMA_CFG_A_REG (TEGRA_NAND_BASE + 0x34)
+#define DMA_CFG_B_REG (TEGRA_NAND_BASE + 0x38)
+#define FIFO_CTRL_REG (TEGRA_NAND_BASE + 0x3c)
+#define DATA_BLOCK_PTR_REG (TEGRA_NAND_BASE + 0x40)
+#define TAG_PTR_REG (TEGRA_NAND_BASE + 0x44)
+#define ECC_PTR_REG (TEGRA_NAND_BASE + 0x48)
+#define DEC_STATUS_REG (TEGRA_NAND_BASE + 0x4c)
+#define HWSTATUS_CMD_REG (TEGRA_NAND_BASE + 0x50)
+#define HWSTATUS_MASK_REG (TEGRA_NAND_BASE + 0x54)
+#define LL_CONFIG_REG (TEGRA_NAND_BASE + 0x58)
+#define LL_PTR_REG (TEGRA_NAND_BASE + 0x5c)
+#define LL_STATUS_REG (TEGRA_NAND_BASE + 0x60)
+
+/* nand_command bits */
+#define COMMAND_GO REG_BIT(31)
+#define COMMAND_CLE REG_BIT(30)
+#define COMMAND_ALE REG_BIT(29)
+#define COMMAND_PIO REG_BIT(28)
+#define COMMAND_TX REG_BIT(27)
+#define COMMAND_RX REG_BIT(26)
+#define COMMAND_SEC_CMD REG_BIT(25)
+#define COMMAND_AFT_DAT REG_BIT(24)
+#define COMMAND_TRANS_SIZE(val) REG_FIELD((val), 20, 4)
+#define COMMAND_A_VALID REG_BIT(19)
+#define COMMAND_B_VALID REG_BIT(18)
+#define COMMAND_RD_STATUS_CHK REG_BIT(17)
+#define COMMAND_RBSY_CHK REG_BIT(16)
+#define COMMAND_CE(val) REG_BIT(8 + ((val) & 0x7))
+#define COMMAND_CLE_BYTE_SIZE(val) REG_FIELD((val), 4, 2)
+#define COMMAND_ALE_BYTE_SIZE(val) REG_FIELD((val), 0, 4)
+
+/* nand isr bits */
+#define ISR_UND REG_BIT(7)
+#define ISR_OVR REG_BIT(6)
+#define ISR_CMD_DONE REG_BIT(5)
+#define ISR_ECC_ERR REG_BIT(4)
+
+/* nand ier bits */
+#define IER_ERR_TRIG_VAL(val) REG_FIELD((val), 16, 4)
+#define IER_UND REG_BIT(7)
+#define IER_OVR REG_BIT(6)
+#define IER_CMD_DONE REG_BIT(5)
+#define IER_ECC_ERR REG_BIT(4)
+#define IER_GIE REG_BIT(0)
+
+/* nand config bits */
+#define CONFIG_HW_ECC REG_BIT(31)
+#define CONFIG_ECC_SEL REG_BIT(30)
+#define CONFIG_HW_ERR_CORRECTION REG_BIT(29)
+#define CONFIG_PIPELINE_EN REG_BIT(28)
+#define CONFIG_ECC_EN_TAG REG_BIT(27)
+#define CONFIG_TVALUE(val) REG_FIELD((val), 24, 2)
+#define CONFIG_SKIP_SPARE REG_BIT(23)
+#define CONFIG_COM_BSY REG_BIT(22)
+#define CONFIG_BUS_WIDTH REG_BIT(21)
+#define CONFIG_EDO_MODE REG_BIT(19)
+#define CONFIG_PAGE_SIZE_SEL(val) REG_FIELD((val), 16, 3)
+#define CONFIG_SKIP_SPARE_SEL(val) REG_FIELD((val), 14, 2)
+#define CONFIG_TAG_BYTE_SIZE(val) REG_FIELD((val), 0, 8)
+
+/* nand timing bits */
+#define TIMING_TRP_RESP(val) REG_FIELD((val), 28, 4)
+#define TIMING_TWB(val) REG_FIELD((val), 24, 4)
+#define TIMING_TCR_TAR_TRR(val) REG_FIELD((val), 20, 4)
+#define TIMING_TWHR(val) REG_FIELD((val), 16, 4)
+#define TIMING_TCS(val) REG_FIELD((val), 14, 2)
+#define TIMING_TWH(val) REG_FIELD((val), 12, 2)
+#define TIMING_TWP(val) REG_FIELD((val), 8, 4)
+#define TIMING_TRH(val) REG_FIELD((val), 4, 2)
+#define TIMING_TRP(val) REG_FIELD((val), 0, 4)
+
+/* nand timing2 bits */
+#define TIMING2_TADL(val) REG_FIELD((val), 0, 4)
+
+/* nand dma_mst_ctrl bits */
+#define DMA_CTRL_DMA_GO REG_BIT(31)
+#define DMA_CTRL_DIR REG_BIT(30)
+#define DMA_CTRL_DMA_PERF_EN REG_BIT(29)
+#define DMA_CTRL_IE_DMA_DONE REG_BIT(28)
+#define DMA_CTRL_REUSE_BUFFER REG_BIT(27)
+#define DMA_CTRL_BURST_SIZE(val) REG_FIELD((val), 24, 3)
+#define DMA_CTRL_IS_DMA_DONE REG_BIT(20)
+#define DMA_CTRL_DMA_EN_A REG_BIT(2)
+#define DMA_CTRL_DMA_EN_B REG_BIT(1)
+
+/* nand dma_cfg_a/cfg_b bits */
+#define DMA_CFG_BLOCK_SIZE(val) REG_FIELD((val), 0, 16)
+
+/* nand dec_status bits */
+#define DEC_STATUS_ERR_PAGE_NUM(val) REG_GET_FIELD((val), 24, 8)
+#define DEC_STATUS_ERR_CNT(val) REG_GET_FIELD((val), 16, 8)
+#define DEC_STATUS_ECC_FAIL_A REG_BIT(1)
+#define DEC_STATUS_ECC_FAIL_B REG_BIT(0)
+
+/* nand hwstatus_mask bits */
+#define HWSTATUS_RDSTATUS_MASK(val) REG_FIELD((val), 24, 8)
+#define HWSTATUS_RDSTATUS_EXP_VAL(val) REG_FIELD((val), 16, 8)
+#define HWSTATUS_RBSY_MASK(val) REG_FIELD((val), 8, 8)
+#define HWSTATUS_RBSY_EXP_VAL(val) REG_FIELD((val), 0, 8)
+
+#endif
+
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index c0c328c5b133..299e67c039ff 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -561,4 +561,11 @@ config MTD_LATCH_ADDR
If compiled as a module, it will be called latch-addr-flash.
+config MTD_NOR_TEGRA
+ bool "NOR Flash mapping driver for NVIDIA Tegra based boards"
+ depends on MTD_COMPLEX_MAPPINGS && ARCH_TEGRA
+ help
+ This enables access routines for the flash chips on the NVIDIA Tegra
+ based boards.
+
endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index cb48b11affff..bb5eef14a367 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
+obj-$(CONFIG_MTD_NOR_TEGRA) += tegra_nor.o
diff --git a/drivers/mtd/maps/tegra_nor.c b/drivers/mtd/maps/tegra_nor.c
new file mode 100644
index 000000000000..a423bb2b84d0
--- /dev/null
+++ b/drivers/mtd/maps/tegra_nor.c
@@ -0,0 +1,476 @@
+/*
+ * drivers/mtd/maps/tegra_nor.c
+ *
+ * MTD mapping driver for the internal SNOR controller in Tegra SoCs
+ *
+ * Copyright (C) 2009 - 2011 NVIDIA Corporation
+ *
+ * Author:
+ * Raghavendra VK <rvk@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_data/tegra_nor.h>
+#include <asm/cacheflush.h>
+
+#define __BITMASK0(len) (BIT(len) - 1)
+#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
+#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
+
+/* tegra gmi registers... */
+#define TEGRA_SNOR_CONFIG_REG 0x00
+#define TEGRA_SNOR_NOR_ADDR_PTR_REG 0x08
+#define TEGRA_SNOR_AHB_ADDR_PTR_REG 0x0C
+#define TEGRA_SNOR_TIMING0_REG 0x10
+#define TEGRA_SNOR_TIMING1_REG 0x14
+#define TEGRA_SNOR_DMA_CFG_REG 0x20
+
+/* config register */
+#define TEGRA_SNOR_CONFIG_GO BIT(31)
+#define TEGRA_SNOR_CONFIG_WORDWIDE BIT(30)
+#define TEGRA_SNOR_CONFIG_DEVICE_TYPE BIT(29)
+#define TEGRA_SNOR_CONFIG_MUX_MODE BIT(28)
+#define TEGRA_SNOR_CONFIG_BURST_LEN(val) REG_FIELD((val), 26, 2)
+#define TEGRA_SNOR_CONFIG_RDY_ACTIVE BIT(24)
+#define TEGRA_SNOR_CONFIG_RDY_POLARITY BIT(23)
+#define TEGRA_SNOR_CONFIG_ADV_POLARITY BIT(22)
+#define TEGRA_SNOR_CONFIG_OE_WE_POLARITY BIT(21)
+#define TEGRA_SNOR_CONFIG_CS_POLARITY BIT(20)
+#define TEGRA_SNOR_CONFIG_NOR_DPD BIT(19)
+#define TEGRA_SNOR_CONFIG_WP BIT(15)
+#define TEGRA_SNOR_CONFIG_PAGE_SZ(val) REG_FIELD((val), 8, 2)
+#define TEGRA_SNOR_CONFIG_MST_ENB BIT(7)
+#define TEGRA_SNOR_CONFIG_SNOR_CS(val) REG_FIELD((val), 4, 2)
+#define TEGRA_SNOR_CONFIG_CE_LAST REG_FIELD(3)
+#define TEGRA_SNOR_CONFIG_CE_FIRST REG_FIELD(2)
+#define TEGRA_SNOR_CONFIG_DEVICE_MODE(val) REG_FIELD((val), 0, 2)
+
+/* dma config register */
+#define TEGRA_SNOR_DMA_CFG_GO BIT(31)
+#define TEGRA_SNOR_DMA_CFG_BSY BIT(30)
+#define TEGRA_SNOR_DMA_CFG_DIR BIT(29)
+#define TEGRA_SNOR_DMA_CFG_INT_ENB BIT(28)
+#define TEGRA_SNOR_DMA_CFG_INT_STA BIT(27)
+#define TEGRA_SNOR_DMA_CFG_BRST_SZ(val) REG_FIELD((val), 24, 3)
+#define TEGRA_SNOR_DMA_CFG_WRD_CNT(val) REG_FIELD((val), 2, 14)
+
+/* timing 0 register */
+#define TEGRA_SNOR_TIMING0_PG_RDY(val) REG_FIELD((val), 28, 4)
+#define TEGRA_SNOR_TIMING0_PG_SEQ(val) REG_FIELD((val), 20, 4)
+#define TEGRA_SNOR_TIMING0_MUX(val) REG_FIELD((val), 12, 4)
+#define TEGRA_SNOR_TIMING0_HOLD(val) REG_FIELD((val), 8, 4)
+#define TEGRA_SNOR_TIMING0_ADV(val) REG_FIELD((val), 4, 4)
+#define TEGRA_SNOR_TIMING0_CE(val) REG_FIELD((val), 0, 4)
+
+/* timing 1 register */
+#define TEGRA_SNOR_TIMING1_WE(val) REG_FIELD((val), 16, 8)
+#define TEGRA_SNOR_TIMING1_OE(val) REG_FIELD((val), 8, 8)
+#define TEGRA_SNOR_TIMING1_WAIT(val) REG_FIELD((val), 0, 8)
+
+/* SNOR DMA supports 2^14 AHB (32-bit words)
+ * Maximum data in one transfer = 2^16 bytes
+ */
+#define TEGRA_SNOR_DMA_LIMIT 0x10000
+#define TEGRA_SNOR_DMA_LIMIT_WORDS (TEGRA_SNOR_DMA_LIMIT >> 2)
+
+/* Even if BW is 1 MB/s, maximum time to
+ * transfer SNOR_DMA_LIMIT bytes is 66 ms
+ */
+#define TEGRA_SNOR_DMA_TIMEOUT_MS 67
+
+struct tegra_nor_info {
+ struct tegra_nor_platform_data *plat;
+ struct device *dev;
+ struct clk *clk;
+ struct mtd_partition *parts;
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct completion dma_complete;
+ void __iomem *base;
+ u32 init_config;
+ u32 timing0_default, timing1_default;
+ u32 timing0_read, timing1_read;
+};
+
+static inline unsigned long snor_tegra_readl(struct tegra_nor_info *tnor,
+ unsigned long reg)
+{
+ return readl(tnor->base + reg);
+}
+
+static inline void snor_tegra_writel(struct tegra_nor_info *tnor,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, tnor->base + reg);
+}
+
+#define DRV_NAME "tegra-nor"
+
+static const char * const part_probes[] = { "cmdlinepart", NULL };
+
+static int wait_for_dma_completion(struct tegra_nor_info *info)
+{
+ unsigned long dma_timeout;
+ int ret;
+
+ dma_timeout = msecs_to_jiffies(TEGRA_SNOR_DMA_TIMEOUT_MS);
+ ret = wait_for_completion_timeout(&info->dma_complete, dma_timeout);
+ return ret ? 0 : -ETIMEDOUT;
+}
+
+static void tegra_flash_dma(struct map_info *map,
+ void *to, unsigned long from, ssize_t len)
+{
+ u32 snor_config, dma_config = 0;
+ int dma_transfer_count = 0, word32_count = 0;
+ u32 nor_address, ahb_address, current_transfer;
+ struct tegra_nor_info *c =
+ container_of(map, struct tegra_nor_info, map);
+ unsigned int bytes_remaining = len;
+
+ snor_config = c->init_config;
+ snor_tegra_writel(c, c->timing0_read, TEGRA_SNOR_TIMING0_REG);
+ snor_tegra_writel(c, c->timing1_read, TEGRA_SNOR_TIMING1_REG);
+
+ if (len > 32) {
+
+ if (to >= high_memory)
+ goto out_copy;
+
+ ahb_address = dma_map_single(c->dev, to, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(c->dev, ahb_address)) {
+ dev_err(c->dev,
+ "Couldn't DMA map a %d byte buffer\n", len);
+ goto out_copy;
+ }
+ word32_count = len >> 2;
+ bytes_remaining = len & 0x00000003;
+ /*
+ * The parameters can be setup in any order since we write to
+ * controller register only after all parameters are set.
+ */
+ /* SNOR CONFIGURATION SETUP */
+ snor_config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(1);
+ /* 8 word page */
+ snor_config |= TEGRA_SNOR_CONFIG_PAGE_SZ(2);
+ snor_config |= TEGRA_SNOR_CONFIG_MST_ENB;
+ /* SNOR DMA CONFIGURATION SETUP */
+ /* NOR -> AHB */
+ dma_config &= ~TEGRA_SNOR_DMA_CFG_DIR;
+ /* One word burst */
+ dma_config |= TEGRA_SNOR_DMA_CFG_BRST_SZ(4);
+
+ for (nor_address = (unsigned int)(map->phys + from);
+ word32_count > 0;
+ word32_count -= current_transfer,
+ dma_transfer_count += current_transfer,
+ nor_address += (current_transfer * 4),
+ ahb_address += (current_transfer * 4)) {
+
+ current_transfer =
+ (word32_count > TEGRA_SNOR_DMA_LIMIT_WORDS)
+ ? (TEGRA_SNOR_DMA_LIMIT_WORDS) : word32_count;
+ /* Start NOR operation */
+ snor_config |= TEGRA_SNOR_CONFIG_GO;
+ dma_config |= TEGRA_SNOR_DMA_CFG_GO;
+ /* Enable interrupt before every transaction since the
+ * interrupt handler disables it */
+ dma_config |= TEGRA_SNOR_DMA_CFG_INT_ENB;
+ /* Num of AHB (32-bit) words to transferred minus 1 */
+ dma_config |=
+ TEGRA_SNOR_DMA_CFG_WRD_CNT(current_transfer - 1);
+ snor_tegra_writel(c, ahb_address,
+ TEGRA_SNOR_AHB_ADDR_PTR_REG);
+ snor_tegra_writel(c, nor_address,
+ TEGRA_SNOR_NOR_ADDR_PTR_REG);
+ snor_tegra_writel(c, snor_config,
+ TEGRA_SNOR_CONFIG_REG);
+ snor_tegra_writel(c, dma_config,
+ TEGRA_SNOR_DMA_CFG_REG);
+ if (wait_for_dma_completion(c)) {
+ dev_err(c->dev, "timout waiting for DMA\n");
+ /* Transfer the remaining words by memcpy */
+ bytes_remaining += (word32_count << 2);
+ break;
+ }
+ }
+ dma_unmap_single(c->dev, ahb_address, len, DMA_FROM_DEVICE);
+ }
+ /* Put the controller back into slave mode. */
+ snor_config = snor_tegra_readl(c, TEGRA_SNOR_CONFIG_REG);
+ snor_config &= ~TEGRA_SNOR_CONFIG_MST_ENB;
+ snor_config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(0);
+ snor_tegra_writel(c, snor_config, TEGRA_SNOR_CONFIG_REG);
+out_copy:
+ memcpy_fromio(((char *)to + (dma_transfer_count << 2)),
+ ((char *)(map->virt + from) + (dma_transfer_count << 2)),
+ bytes_remaining);
+
+ snor_tegra_writel(c, c->timing0_default, TEGRA_SNOR_TIMING0_REG);
+ snor_tegra_writel(c, c->timing1_default, TEGRA_SNOR_TIMING1_REG);
+}
+
+static irqreturn_t tegra_nor_isr(int flag, void *dev_id)
+{
+ struct tegra_nor_info *info = (struct tegra_nor_info *)dev_id;
+ u32 dma_config = snor_tegra_readl(info, TEGRA_SNOR_DMA_CFG_REG);
+ if (dma_config & TEGRA_SNOR_DMA_CFG_INT_STA) {
+ /* Disable interrupts. WAR for BUG:821560 */
+ dma_config &= ~TEGRA_SNOR_DMA_CFG_INT_ENB;
+ snor_tegra_writel(info, dma_config, TEGRA_SNOR_DMA_CFG_REG);
+ complete(&info->dma_complete);
+ } else {
+ pr_err("%s: Spurious interrupt\n", __func__);
+ }
+ return IRQ_HANDLED;
+}
+
+static int tegra_snor_controller_init(struct tegra_nor_info *info)
+{
+ struct tegra_nor_chip_parms *chip_parm = &info->plat->chip_parms;
+ u32 width = info->plat->flash.width;
+ u32 config = 0;
+
+ config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(0);
+ config |= TEGRA_SNOR_CONFIG_SNOR_CS(0);
+ config &= ~TEGRA_SNOR_CONFIG_DEVICE_TYPE; /* Select NOR */
+ config |= TEGRA_SNOR_CONFIG_WP; /* Enable writes */
+ switch (width) {
+ case 2:
+ config &= ~TEGRA_SNOR_CONFIG_WORDWIDE; /* 16 bit */
+ break;
+ case 4:
+ config |= TEGRA_SNOR_CONFIG_WORDWIDE; /* 32 bit */
+ break;
+ default:
+ return -EINVAL;
+ }
+ config |= TEGRA_SNOR_CONFIG_BURST_LEN(0);
+ config &= ~TEGRA_SNOR_CONFIG_MUX_MODE;
+ snor_tegra_writel(info, config, TEGRA_SNOR_CONFIG_REG);
+ info->init_config = config;
+
+ info->timing0_default = chip_parm->timing_default.timing0;
+ info->timing0_read = chip_parm->timing_read.timing0;
+ info->timing1_default = chip_parm->timing_default.timing1;
+ info->timing1_read = chip_parm->timing_read.timing0;
+
+ snor_tegra_writel(info, info->timing1_default, TEGRA_SNOR_TIMING1_REG);
+ snor_tegra_writel(info, info->timing0_default, TEGRA_SNOR_TIMING0_REG);
+ return 0;
+}
+
+static int tegra_nor_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct tegra_nor_platform_data *plat = pdev->dev.platform_data;
+ struct tegra_nor_info *info = NULL;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+
+ if (!plat) {
+ pr_err("%s: no platform device info\n", __func__);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ info = devm_kzalloc(dev, sizeof(struct tegra_nor_info),
+ GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Get NOR flash aperture & map the same */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no mem resource?\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(dev, "NOR region already claimed\n");
+ err = -EBUSY;
+ goto fail;
+ }
+
+ info->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!info->base) {
+ dev_err(dev, "Can't ioremap NOR region\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Get NOR flash aperture & map the same */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "no mem resource?\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "NOR region already claimed\n");
+ err = -EBUSY;
+ goto fail;
+ }
+
+ info->map.virt = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!info->map.virt) {
+ dev_err(dev, "Can't ioremap NOR region\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ info->plat = plat;
+ info->dev = dev;
+ info->map.bankwidth = plat->flash.width;
+ info->map.name = dev_name(dev);
+ info->map.phys = res->start;
+ info->map.size = resource_size(res);
+
+ info->clk = clk_get(dev, NULL);
+ if (IS_ERR(info->clk)) {
+ err = PTR_ERR(info->clk);
+ goto fail;
+ }
+
+ err = clk_enable(info->clk);
+ if (err != 0)
+ goto out_clk_put;
+
+ simple_map_init(&info->map);
+ info->map.copy_from = tegra_flash_dma;
+
+ /* Intialise the SNOR controller before probe */
+ err = tegra_snor_controller_init(info);
+ if (err) {
+ dev_err(dev, "Error initializing controller\n");
+ goto out_clk_disable;
+ }
+
+ init_completion(&info->dma_complete);
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(dev, "no irq resource?\n");
+ err = -ENODEV;
+ goto out_clk_disable;
+ }
+
+ /* Register SNOR DMA completion interrupt */
+ err = devm_request_irq(dev, irq, tegra_nor_isr, IRQF_DISABLED,
+ dev_name(dev), info);
+ if (err) {
+ dev_err(dev, "Failed to request irq %i\n", irq);
+ goto out_clk_disable;
+ }
+
+ info->mtd = do_map_probe(plat->flash.map_name, &info->map);
+ if (!info->mtd) {
+ err = -EIO;
+ goto out_clk_disable;
+ }
+ info->mtd->owner = THIS_MODULE;
+ info->parts = NULL;
+
+ platform_set_drvdata(pdev, info);
+ err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ err = add_mtd_partitions(info->mtd, info->parts, err);
+ else if (err <= 0 && plat->flash.parts)
+ err =
+ add_mtd_partitions(info->mtd, plat->flash.parts,
+ plat->flash.nr_parts);
+ else
+ add_mtd_device(info->mtd);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable(info->clk);
+out_clk_put:
+ clk_put(info->clk);
+fail:
+ pr_err("Tegra NOR probe failed\n");
+ return err;
+}
+
+static int tegra_nor_remove(struct platform_device *pdev)
+{
+ struct tegra_nor_info *info = platform_get_drvdata(pdev);
+
+ if (info->parts) {
+ del_mtd_partitions(info->mtd);
+ kfree(info->parts);
+ } else
+ del_mtd_device(info->mtd);
+ map_destroy(info->mtd);
+ clk_disable(info->clk);
+ clk_put(info->clk);
+
+ return 0;
+}
+
+static struct platform_driver __refdata tegra_nor_driver = {
+ .probe = tegra_nor_probe,
+ .remove = __devexit_p(tegra_nor_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_nor_init(void)
+{
+ return platform_driver_register(&tegra_nor_driver);
+}
+
+static void __exit tegra_nor_exit(void)
+{
+ platform_driver_unregister(&tegra_nor_driver);
+}
+
+module_init(tegra_nor_init);
+module_exit(tegra_nor_exit);
+
+MODULE_AUTHOR("Raghavendra VK <rvk@nvidia.com>");
+MODULE_DESCRIPTION("NOR Flash mapping driver for NVIDIA Tegra based boards");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4c3425235adc..43173a335e49 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+ tristate "Include chip ids for known NAND devices."
+ depends on MTD
+ help
+ Useful for NAND drivers that do not use the NAND subsystem but
+ still like to take advantage of the known chip information.
+
config MTD_NAND_ECC
tristate
@@ -121,6 +128,23 @@ config MTD_NAND_OMAP2
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+config MTD_NAND_OMAP_PREFETCH
+ bool "GPMC prefetch support for NAND Flash device"
+ depends on MTD_NAND_OMAP2
+ default y
+ help
+ The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
+ to improve the performance.
+
+config MTD_NAND_OMAP_PREFETCH_DMA
+ depends on MTD_NAND_OMAP_PREFETCH
+ bool "DMA mode"
+ default n
+ help
+ The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
+ or in DMA interrupt mode.
+ Say y for DMA mode or MPU mode will be used
+
config MTD_NAND_IDS
tristate
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 86f05f45780a..15d71658b4f1 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3222,6 +3222,44 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
}
EXPORT_SYMBOL(nand_scan_ident);
+static void nand_panic_wait(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int i;
+
+ if (chip->state != FL_READY)
+ for (i = 0; i < 40; i++) {
+ if (chip->dev_ready(mtd))
+ break;
+ mdelay(10);
+ }
+ chip->state = FL_READY;
+}
+
+static int nand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ /* Do not allow reads past end of device */
+ if ((to + len) > mtd->size)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ nand_panic_wait(mtd);
+
+ chip->ops.len = len;
+ chip->ops.datbuf = (uint8_t *)buf;
+ chip->ops.oobbuf = NULL;
+
+ ret = nand_do_write_ops(mtd, to, &chip->ops);
+
+ *retlen = chip->ops.retlen;
+ return ret;
+}
+
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
@@ -3465,6 +3503,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->panic_write = panic_nand_write;
mtd->read_oob = nand_read_oob;
mtd->write_oob = nand_write_oob;
+ mtd->panic_write = nand_panic_write;
mtd->sync = nand_sync;
mtd->lock = NULL;
mtd->unlock = NULL;